aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/kernel/module.c34
-rw-r--r--arch/arm/Kconfig63
-rw-r--r--arch/arm/Makefile4
-rw-r--r--arch/arm/boot/compressed/Makefile10
-rw-r--r--arch/arm/boot/compressed/head-shmobile.S12
-rw-r--r--arch/arm/boot/compressed/head.S3
-rw-r--r--arch/arm/boot/compressed/mmcif-sh7372.c2
-rw-r--r--arch/arm/boot/compressed/sdhi-sh7372.c95
-rw-r--r--arch/arm/boot/compressed/sdhi-shmobile.c449
-rw-r--r--arch/arm/boot/compressed/sdhi-shmobile.h11
-rw-r--r--arch/arm/boot/compressed/vmlinux.lds.in12
-rw-r--r--arch/arm/common/dmabounce.c193
-rw-r--r--arch/arm/common/gic.c7
-rw-r--r--arch/arm/common/it8152.c16
-rw-r--r--arch/arm/common/sa1111.c60
-rw-r--r--arch/arm/configs/cm_x300_defconfig18
-rw-r--r--arch/arm/configs/loki_defconfig120
-rw-r--r--arch/arm/configs/mx51_defconfig3
-rw-r--r--arch/arm/configs/mxs_defconfig2
-rw-r--r--arch/arm/include/asm/assembler.h9
-rw-r--r--arch/arm/include/asm/bitops.h4
-rw-r--r--arch/arm/include/asm/dma-mapping.h88
-rw-r--r--arch/arm/include/asm/dma.h11
-rw-r--r--arch/arm/include/asm/entry-macro-multi.S14
-rw-r--r--arch/arm/include/asm/hardware/scoop.h1
-rw-r--r--arch/arm/include/asm/hwcap.h36
-rw-r--r--arch/arm/include/asm/kprobes.h28
-rw-r--r--arch/arm/include/asm/mach/arch.h4
-rw-r--r--arch/arm/include/asm/memory.h12
-rw-r--r--arch/arm/include/asm/perf_event.h2
-rw-r--r--arch/arm/include/asm/pmu.h2
-rw-r--r--arch/arm/include/asm/proc-fns.h14
-rw-r--r--arch/arm/include/asm/ptrace.h11
-rw-r--r--arch/arm/include/asm/scatterlist.h4
-rw-r--r--arch/arm/include/asm/setup.h8
-rw-r--r--arch/arm/include/asm/suspend.h22
-rw-r--r--arch/arm/include/asm/tcm.h2
-rw-r--r--arch/arm/include/asm/tlbflush.h58
-rw-r--r--arch/arm/include/asm/traps.h3
-rw-r--r--arch/arm/kernel/Makefile7
-rw-r--r--arch/arm/kernel/asm-offsets.c3
-rw-r--r--arch/arm/kernel/entry-armv.S523
-rw-r--r--arch/arm/kernel/entry-header.S31
-rw-r--r--arch/arm/kernel/head-nommu.S8
-rw-r--r--arch/arm/kernel/head.S8
-rw-r--r--arch/arm/kernel/hw_breakpoint.c12
-rw-r--r--arch/arm/kernel/irq.c51
-rw-r--r--arch/arm/kernel/kprobes-arm.c999
-rw-r--r--arch/arm/kernel/kprobes-common.c577
-rw-r--r--arch/arm/kernel/kprobes-decode.c1670
-rw-r--r--arch/arm/kernel/kprobes-thumb.c1462
-rw-r--r--arch/arm/kernel/kprobes.c222
-rw-r--r--arch/arm/kernel/kprobes.h420
-rw-r--r--arch/arm/kernel/module.c29
-rw-r--r--arch/arm/kernel/perf_event.c10
-rw-r--r--arch/arm/kernel/perf_event_v7.c344
-rw-r--r--arch/arm/kernel/pmu.c87
-rw-r--r--arch/arm/kernel/ptrace.c28
-rw-r--r--arch/arm/kernel/setup.c109
-rw-r--r--arch/arm/kernel/sleep.S84
-rw-r--r--arch/arm/kernel/smp.c11
-rw-r--r--arch/arm/kernel/smp_scu.c2
-rw-r--r--arch/arm/kernel/tcm.c68
-rw-r--r--arch/arm/kernel/traps.c17
-rw-r--r--arch/arm/kernel/vmlinux.lds.S126
-rw-r--r--arch/arm/mach-bcmring/include/mach/entry-macro.S4
-rw-r--r--arch/arm/mach-cns3xxx/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-davinci/board-da830-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-da850-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-dm355-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-dm355-leopard.c1
-rw-r--r--arch/arm/mach-davinci/board-dm365-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-dm644x-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-dm646x-evm.c19
-rw-r--r--arch/arm/mach-davinci/board-mityomapl138.c1
-rw-r--r--arch/arm/mach-davinci/board-neuros-osd2.c1
-rw-r--r--arch/arm/mach-davinci/board-omapl138-hawk.c1
-rw-r--r--arch/arm/mach-davinci/board-sffsdr.c1
-rw-r--r--arch/arm/mach-davinci/board-tnetv107x-evm.c1
-rw-r--r--arch/arm/mach-davinci/clock.c38
-rw-r--r--arch/arm/mach-davinci/clock.h2
-rw-r--r--arch/arm/mach-davinci/dm646x.c4
-rw-r--r--arch/arm/mach-davinci/include/mach/dm646x.h2
-rw-r--r--arch/arm/mach-davinci/include/mach/entry-macro.S3
-rw-r--r--arch/arm/mach-davinci/include/mach/memory.h7
-rw-r--r--arch/arm/mach-davinci/include/mach/psc.h148
-rw-r--r--arch/arm/mach-exynos4/Kconfig4
-rw-r--r--arch/arm/mach-exynos4/Makefile1
-rw-r--r--arch/arm/mach-exynos4/clock.c177
-rw-r--r--arch/arm/mach-exynos4/cpufreq.c569
-rw-r--r--arch/arm/mach-exynos4/include/mach/clkdev.h7
-rw-r--r--arch/arm/mach-exynos4/mach-smdkc210.c16
-rw-r--r--arch/arm/mach-exynos4/mach-smdkv310.c16
-rw-r--r--arch/arm/mach-exynos4/platsmp.c8
-rw-r--r--arch/arm/mach-exynos4/pm.c2
-rw-r--r--arch/arm/mach-exynos4/sleep.S22
-rw-r--r--arch/arm/mach-h720x/h7201-eval.c1
-rw-r--r--arch/arm/mach-h720x/h7202-eval.c1
-rw-r--r--arch/arm/mach-h720x/include/mach/entry-macro.S3
-rw-r--r--arch/arm/mach-h720x/include/mach/memory.h7
-rw-r--r--arch/arm/mach-imx/Kconfig5
-rw-r--r--arch/arm/mach-imx/dma-v1.c25
-rw-r--r--arch/arm/mach-imx/eukrea_mbimx27-baseboard.c23
-rw-r--r--arch/arm/mach-imx/eukrea_mbimxsd25-baseboard.c15
-rw-r--r--arch/arm/mach-imx/eukrea_mbimxsd35-baseboard.c13
-rw-r--r--arch/arm/mach-imx/mach-apf9328.c7
-rw-r--r--arch/arm/mach-imx/mach-imx27_visstrim_m10.c2
-rw-r--r--arch/arm/mach-imx/mach-mx27_3ds.c44
-rw-r--r--arch/arm/mach-imx/mach-mx31_3ds.c10
-rw-r--r--arch/arm/mach-imx/mach-mx31moboard.c14
-rw-r--r--arch/arm/mach-imx/mach-mx35_3ds.c2
-rw-r--r--arch/arm/mach-imx/mach-scb9328.c17
-rw-r--r--arch/arm/mach-imx/mx31lite-db.c15
-rw-r--r--arch/arm/mach-integrator/include/mach/bits.h61
-rw-r--r--arch/arm/mach-ixp4xx/avila-setup.c6
-rw-r--r--arch/arm/mach-ixp4xx/common-pci.c12
-rw-r--r--arch/arm/mach-ixp4xx/coyote-setup.c3
-rw-r--r--arch/arm/mach-ixp4xx/dsmg600-setup.c3
-rw-r--r--arch/arm/mach-ixp4xx/fsg-setup.c3
-rw-r--r--arch/arm/mach-ixp4xx/gateway7001-setup.c3
-rw-r--r--arch/arm/mach-ixp4xx/goramo_mlr.c3
-rw-r--r--arch/arm/mach-ixp4xx/gtwx5715-setup.c3
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/memory.h4
-rw-r--r--arch/arm/mach-ixp4xx/ixdp425-setup.c12
-rw-r--r--arch/arm/mach-ixp4xx/nas100d-setup.c3
-rw-r--r--arch/arm/mach-ixp4xx/nslu2-setup.c3
-rw-r--r--arch/arm/mach-ixp4xx/vulcan-setup.c3
-rw-r--r--arch/arm/mach-ixp4xx/wg302v2-setup.c3
-rw-r--r--arch/arm/mach-loki/Kconfig13
-rw-r--r--arch/arm/mach-loki/Makefile3
-rw-r--r--arch/arm/mach-loki/Makefile.boot3
-rw-r--r--arch/arm/mach-loki/addr-map.c122
-rw-r--r--arch/arm/mach-loki/common.c162
-rw-r--r--arch/arm/mach-loki/common.h37
-rw-r--r--arch/arm/mach-loki/include/mach/bridge-regs.h28
-rw-r--r--arch/arm/mach-loki/include/mach/debug-macro.S19
-rw-r--r--arch/arm/mach-loki/include/mach/entry-macro.S30
-rw-r--r--arch/arm/mach-loki/include/mach/hardware.h15
-rw-r--r--arch/arm/mach-loki/include/mach/io.h26
-rw-r--r--arch/arm/mach-loki/include/mach/irqs.h58
-rw-r--r--arch/arm/mach-loki/include/mach/loki.h83
-rw-r--r--arch/arm/mach-loki/include/mach/memory.h10
-rw-r--r--arch/arm/mach-loki/include/mach/system.h36
-rw-r--r--arch/arm/mach-loki/include/mach/timex.h11
-rw-r--r--arch/arm/mach-loki/include/mach/uncompress.h47
-rw-r--r--arch/arm/mach-loki/include/mach/vmalloc.h5
-rw-r--r--arch/arm/mach-loki/irq.c22
-rw-r--r--arch/arm/mach-loki/lb88rc8480-setup.c99
-rw-r--r--arch/arm/mach-lpc32xx/include/mach/entry-macro.S4
-rw-r--r--arch/arm/mach-lpc32xx/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-msm/platsmp.c19
-rw-r--r--arch/arm/mach-mx5/Kconfig4
-rw-r--r--arch/arm/mach-mx5/board-cpuimx51.c12
-rw-r--r--arch/arm/mach-mx5/board-mx51_3ds.c3
-rw-r--r--arch/arm/mach-mx5/board-mx51_babbage.c34
-rw-r--r--arch/arm/mach-mx5/board-mx51_efikamx.c15
-rw-r--r--arch/arm/mach-mx5/board-mx51_efikasb.c16
-rw-r--r--arch/arm/mach-mx5/clock-mx51-mx53.c4
-rw-r--r--arch/arm/mach-mx5/eukrea_mbimx51-baseboard.c24
-rw-r--r--arch/arm/mach-mx5/eukrea_mbimxsd-baseboard.c19
-rw-r--r--arch/arm/mach-mxs/Kconfig1
-rw-r--r--arch/arm/mach-mxs/devices/platform-mxsfb.c1
-rw-r--r--arch/arm/mach-mxs/include/mach/dma.h2
-rw-r--r--arch/arm/mach-mxs/mach-tx28.c13
-rw-r--r--arch/arm/mach-nuc93x/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-omap1/board-ams-delta.c4
-rw-r--r--arch/arm/mach-omap1/board-fsample.c4
-rw-r--r--arch/arm/mach-omap1/board-generic.c4
-rw-r--r--arch/arm/mach-omap1/board-h2.c4
-rw-r--r--arch/arm/mach-omap1/board-h3.c4
-rw-r--r--arch/arm/mach-omap1/board-htcherald.c4
-rw-r--r--arch/arm/mach-omap1/board-innovator.c4
-rw-r--r--arch/arm/mach-omap1/board-nokia770.c4
-rw-r--r--arch/arm/mach-omap1/board-osk.c4
-rw-r--r--arch/arm/mach-omap1/board-palmte.c4
-rw-r--r--arch/arm/mach-omap1/board-palmtt.c4
-rw-r--r--arch/arm/mach-omap1/board-palmz71.c4
-rw-r--r--arch/arm/mach-omap1/board-perseus2.c4
-rw-r--r--arch/arm/mach-omap1/board-sx1.c4
-rw-r--r--arch/arm/mach-omap1/board-voiceblue.c4
-rw-r--r--arch/arm/mach-omap1/irq.c2
-rw-r--r--arch/arm/mach-omap1/mcbsp.c4
-rw-r--r--arch/arm/mach-omap1/time.c6
-rw-r--r--arch/arm/mach-omap1/timer32k.c4
-rw-r--r--arch/arm/mach-omap2/Makefile20
-rw-r--r--arch/arm/mach-omap2/board-2430sdp.c4
-rw-r--r--arch/arm/mach-omap2/board-3430sdp.c93
-rw-r--r--arch/arm/mach-omap2/board-3630sdp.c4
-rw-r--r--arch/arm/mach-omap2/board-4430sdp.c156
-rw-r--r--arch/arm/mach-omap2/board-am3517crane.c4
-rw-r--r--arch/arm/mach-omap2/board-am3517evm.c4
-rw-r--r--arch/arm/mach-omap2/board-apollon.c4
-rw-r--r--arch/arm/mach-omap2/board-cm-t35.c81
-rw-r--r--arch/arm/mach-omap2/board-cm-t3517.c5
-rw-r--r--arch/arm/mach-omap2/board-devkit8000.c64
-rw-r--r--arch/arm/mach-omap2/board-flash.c4
-rw-r--r--arch/arm/mach-omap2/board-generic.c4
-rw-r--r--arch/arm/mach-omap2/board-h4.c4
-rw-r--r--arch/arm/mach-omap2/board-igep0020.c79
-rw-r--r--arch/arm/mach-omap2/board-ldp.c29
-rw-r--r--arch/arm/mach-omap2/board-n8x0.c12
-rw-r--r--arch/arm/mach-omap2/board-omap3beagle.c89
-rw-r--r--arch/arm/mach-omap2/board-omap3evm.c111
-rw-r--r--arch/arm/mach-omap2/board-omap3logic.c19
-rw-r--r--arch/arm/mach-omap2/board-omap3pandora.c119
-rw-r--r--arch/arm/mach-omap2/board-omap3stalker.c99
-rw-r--r--arch/arm/mach-omap2/board-omap3touchbook.c97
-rw-r--r--arch/arm/mach-omap2/board-omap4panda.c161
-rw-r--r--arch/arm/mach-omap2/board-overo.c83
-rw-r--r--arch/arm/mach-omap2/board-rm680.c12
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c67
-rw-r--r--arch/arm/mach-omap2/board-rx51.c4
-rw-r--r--arch/arm/mach-omap2/board-ti8168evm.c9
-rw-r--r--arch/arm/mach-omap2/board-zoom-peripherals.c128
-rw-r--r--arch/arm/mach-omap2/board-zoom.c8
-rw-r--r--arch/arm/mach-omap2/clock44xx.h7
-rw-r--r--arch/arm/mach-omap2/clock44xx_data.c237
-rw-r--r--arch/arm/mach-omap2/clockdomains44xx_data.c124
-rw-r--r--arch/arm/mach-omap2/cm-regbits-44xx.h623
-rw-r--r--arch/arm/mach-omap2/cm1_44xx.h64
-rw-r--r--arch/arm/mach-omap2/cm2_44xx.h73
-rw-r--r--arch/arm/mach-omap2/common-board-devices.c27
-rw-r--r--arch/arm/mach-omap2/common-board-devices.h26
-rw-r--r--arch/arm/mach-omap2/control.c7
-rw-r--r--arch/arm/mach-omap2/control.h6
-rw-r--r--arch/arm/mach-omap2/gpmc-nand.c10
-rw-r--r--arch/arm/mach-omap2/include/mach/entry-macro.S3
-rw-r--r--arch/arm/mach-omap2/io.c17
-rw-r--r--arch/arm/mach-omap2/irq.c32
-rw-r--r--arch/arm/mach-omap2/omap-smp.c8
-rw-r--r--arch/arm/mach-omap2/omap4-common.c10
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c223
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2420_data.c832
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2430_data.c910
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_interconnect_data.c173
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c322
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c130
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c150
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c654
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c472
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_common_data.c20
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_common_data.h93
-rw-r--r--arch/arm/mach-omap2/pm-debug.c372
-rw-r--r--arch/arm/mach-omap2/pm.h38
-rw-r--r--arch/arm/mach-omap2/pm24xx.c6
-rw-r--r--arch/arm/mach-omap2/pm34xx.c86
-rw-r--r--arch/arm/mach-omap2/powerdomains44xx_data.c18
-rw-r--r--arch/arm/mach-omap2/prcm_mpu44xx.h69
-rw-r--r--arch/arm/mach-omap2/prm44xx.h34
-rw-r--r--arch/arm/mach-omap2/sleep34xx.S518
-rw-r--r--arch/arm/mach-omap2/smartreflex.c38
-rw-r--r--arch/arm/mach-omap2/timer-gp.c266
-rw-r--r--arch/arm/mach-omap2/timer-gp.h16
-rw-r--r--arch/arm/mach-omap2/timer.c342
-rw-r--r--arch/arm/mach-omap2/twl-common.c304
-rw-r--r--arch/arm/mach-omap2/twl-common.h59
-rw-r--r--arch/arm/mach-orion5x/mpp.c2
-rw-r--r--arch/arm/mach-pnx4008/include/mach/entry-macro.S5
-rw-r--r--arch/arm/mach-pxa/cm-x2xx.c3
-rw-r--r--arch/arm/mach-pxa/cm-x300.c58
-rw-r--r--arch/arm/mach-pxa/hx4700.c70
-rw-r--r--arch/arm/mach-pxa/include/mach/magician.h3
-rw-r--r--arch/arm/mach-pxa/include/mach/memory.h4
-rw-r--r--arch/arm/mach-pxa/include/mach/pm.h4
-rw-r--r--arch/arm/mach-pxa/magician.c57
-rw-r--r--arch/arm/mach-pxa/mioa701.c70
-rw-r--r--arch/arm/mach-pxa/palmz72.c1
-rw-r--r--arch/arm/mach-pxa/pm.c1
-rw-r--r--arch/arm/mach-pxa/pxa25x.c3
-rw-r--r--arch/arm/mach-pxa/pxa27x.c11
-rw-r--r--arch/arm/mach-pxa/pxa3xx.c14
-rw-r--r--arch/arm/mach-pxa/saarb.c2
-rw-r--r--arch/arm/mach-pxa/sleep.S55
-rw-r--r--arch/arm/mach-pxa/zeus.c3
-rw-r--r--arch/arm/mach-realview/Kconfig1
-rw-r--r--arch/arm/mach-realview/include/mach/memory.h4
-rw-r--r--arch/arm/mach-realview/platsmp.c8
-rw-r--r--arch/arm/mach-realview/realview_eb.c3
-rw-r--r--arch/arm/mach-realview/realview_pb1176.c3
-rw-r--r--arch/arm/mach-realview/realview_pb11mp.c3
-rw-r--r--arch/arm/mach-realview/realview_pba8.c3
-rw-r--r--arch/arm/mach-realview/realview_pbx.c3
-rw-r--r--arch/arm/mach-s3c2400/Kconfig7
-rw-r--r--arch/arm/mach-s3c2400/Makefile15
-rw-r--r--arch/arm/mach-s3c2400/gpio.c42
-rw-r--r--arch/arm/mach-s3c2400/include/mach/map.h66
-rw-r--r--arch/arm/mach-s3c2410/include/mach/gpio-fns.h6
-rw-r--r--arch/arm/mach-s3c2410/include/mach/regs-gpio.h241
-rw-r--r--arch/arm/mach-s3c2410/include/mach/regs-mem.h28
-rw-r--r--arch/arm/mach-s3c2412/Kconfig2
-rw-r--r--arch/arm/mach-s3c2412/clock.c36
-rw-r--r--arch/arm/mach-s3c2412/pm.c6
-rw-r--r--arch/arm/mach-s3c2416/clock.c10
-rw-r--r--arch/arm/mach-s3c2416/pm.c6
-rw-r--r--arch/arm/mach-s3c2440/clock.c3
-rw-r--r--arch/arm/mach-s3c2443/clock.c16
-rw-r--r--arch/arm/mach-s3c24a0/include/mach/debug-macro.S27
-rw-r--r--arch/arm/mach-s3c24a0/include/mach/io.h18
-rw-r--r--arch/arm/mach-s3c24a0/include/mach/irqs.h117
-rw-r--r--arch/arm/mach-s3c24a0/include/mach/map.h86
-rw-r--r--arch/arm/mach-s3c24a0/include/mach/memory.h21
-rw-r--r--arch/arm/mach-s3c24a0/include/mach/regs-clock.h88
-rw-r--r--arch/arm/mach-s3c24a0/include/mach/regs-irq.h25
-rw-r--r--arch/arm/mach-s3c24a0/include/mach/system.h25
-rw-r--r--arch/arm/mach-s3c24a0/include/mach/tick.h15
-rw-r--r--arch/arm/mach-s3c24a0/include/mach/timex.h18
-rw-r--r--arch/arm/mach-s3c24a0/include/mach/vmalloc.h17
-rw-r--r--arch/arm/mach-s3c64xx/Kconfig1
-rw-r--r--arch/arm/mach-s3c64xx/Makefile4
-rw-r--r--arch/arm/mach-s3c64xx/clock.c86
-rw-r--r--arch/arm/mach-s3c64xx/cpufreq.c270
-rw-r--r--arch/arm/mach-s3c64xx/dev-onenand1.c10
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/clkdev.h7
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/regs-fb.h21
-rw-r--r--arch/arm/mach-s3c64xx/mach-anw6410.c2
-rw-r--r--arch/arm/mach-s3c64xx/mach-hmt.c2
-rw-r--r--arch/arm/mach-s3c64xx/mach-mini6410.c2
-rw-r--r--arch/arm/mach-s3c64xx/mach-ncp.c2
-rw-r--r--arch/arm/mach-s3c64xx/mach-real6410.c2
-rw-r--r--arch/arm/mach-s3c64xx/mach-smartq5.c2
-rw-r--r--arch/arm/mach-s3c64xx/mach-smartq7.c2
-rw-r--r--arch/arm/mach-s3c64xx/mach-smdk6410.c43
-rw-r--r--arch/arm/mach-s3c64xx/pm.c2
-rw-r--r--arch/arm/mach-s3c64xx/setup-fb-24bpp.c1
-rw-r--r--arch/arm/mach-s3c64xx/sleep.S23
-rw-r--r--arch/arm/mach-s5p64x0/Kconfig2
-rw-r--r--arch/arm/mach-s5p64x0/clock-s5p6440.c74
-rw-r--r--arch/arm/mach-s5p64x0/clock-s5p6450.c68
-rw-r--r--arch/arm/mach-s5p64x0/include/mach/clkdev.h7
-rw-r--r--arch/arm/mach-s5p64x0/mach-smdk6440.c54
-rw-r--r--arch/arm/mach-s5p64x0/mach-smdk6450.c54
-rw-r--r--arch/arm/mach-s5pc100/Kconfig1
-rw-r--r--arch/arm/mach-s5pc100/clock.c200
-rw-r--r--arch/arm/mach-s5pc100/include/mach/clkdev.h7
-rw-r--r--arch/arm/mach-s5pc100/include/mach/regs-fb.h105
-rw-r--r--arch/arm/mach-s5pc100/mach-smdkc100.c57
-rw-r--r--arch/arm/mach-s5pc100/setup-fb-24bpp.c1
-rw-r--r--arch/arm/mach-s5pv210/Kconfig1
-rw-r--r--arch/arm/mach-s5pv210/Makefile1
-rw-r--r--arch/arm/mach-s5pv210/clock.c167
-rw-r--r--arch/arm/mach-s5pv210/cpufreq.c485
-rw-r--r--arch/arm/mach-s5pv210/include/mach/clkdev.h7
-rw-r--r--arch/arm/mach-s5pv210/include/mach/regs-fb.h21
-rw-r--r--arch/arm/mach-s5pv210/mach-aquila.c2
-rw-r--r--arch/arm/mach-s5pv210/mach-goni.c2
-rw-r--r--arch/arm/mach-s5pv210/mach-smdkv210.c56
-rw-r--r--arch/arm/mach-s5pv210/pm.c2
-rw-r--r--arch/arm/mach-s5pv210/setup-fb-24bpp.c1
-rw-r--r--arch/arm/mach-s5pv210/sleep.S21
-rw-r--r--arch/arm/mach-sa1100/assabet.c3
-rw-r--r--arch/arm/mach-sa1100/badge4.c3
-rw-r--r--arch/arm/mach-sa1100/include/mach/memory.h4
-rw-r--r--arch/arm/mach-sa1100/jornada720.c3
-rw-r--r--arch/arm/mach-sa1100/pm.c7
-rw-r--r--arch/arm/mach-sa1100/sleep.S19
-rw-r--r--arch/arm/mach-shark/core.c1
-rw-r--r--arch/arm/mach-shark/include/mach/entry-macro.S10
-rw-r--r--arch/arm/mach-shark/include/mach/memory.h2
-rw-r--r--arch/arm/mach-shmobile/include/mach/sdhi-sh7372.h21
-rw-r--r--arch/arm/mach-shmobile/include/mach/sdhi.h16
-rw-r--r--arch/arm/mach-shmobile/platsmp.c5
-rw-r--r--arch/arm/mach-tegra/board-seaboard.c2
-rw-r--r--arch/arm/mach-tegra/board-trimslice-pinmux.c2
-rw-r--r--arch/arm/mach-tegra/platsmp.c8
-rw-r--r--arch/arm/mach-ux500/platsmp.c8
-rw-r--r--arch/arm/mach-vexpress/ct-ca9x4.c4
-rw-r--r--arch/arm/mm/abort-ev4.S17
-rw-r--r--arch/arm/mm/abort-ev4t.S17
-rw-r--r--arch/arm/mm/abort-ev5t.S19
-rw-r--r--arch/arm/mm/abort-ev5tj.S25
-rw-r--r--arch/arm/mm/abort-ev6.S25
-rw-r--r--arch/arm/mm/abort-ev7.S25
-rw-r--r--arch/arm/mm/abort-lv4t.S141
-rw-r--r--arch/arm/mm/abort-macro.S34
-rw-r--r--arch/arm/mm/abort-nommu.S10
-rw-r--r--arch/arm/mm/alignment.c3
-rw-r--r--arch/arm/mm/cache-fa.S15
-rw-r--r--arch/arm/mm/cache-v3.S15
-rw-r--r--arch/arm/mm/cache-v4.S15
-rw-r--r--arch/arm/mm/cache-v4wb.S15
-rw-r--r--arch/arm/mm/cache-v4wt.S15
-rw-r--r--arch/arm/mm/cache-v6.S15
-rw-r--r--arch/arm/mm/cache-v7.S15
-rw-r--r--arch/arm/mm/copypage-v6.c1
-rw-r--r--arch/arm/mm/dma-mapping.c35
-rw-r--r--arch/arm/mm/fault.c6
-rw-r--r--arch/arm/mm/init.c47
-rw-r--r--arch/arm/mm/mm.h6
-rw-r--r--arch/arm/mm/pabort-legacy.S10
-rw-r--r--arch/arm/mm/pabort-v6.S10
-rw-r--r--arch/arm/mm/pabort-v7.S11
-rw-r--r--arch/arm/mm/proc-arm1020.S45
-rw-r--r--arch/arm/mm/proc-arm1020e.S52
-rw-r--r--arch/arm/mm/proc-arm1022.S52
-rw-r--r--arch/arm/mm/proc-arm1026.S53
-rw-r--r--arch/arm/mm/proc-arm6_7.S256
-rw-r--r--arch/arm/mm/proc-arm720.S85
-rw-r--r--arch/arm/mm/proc-arm740.S42
-rw-r--r--arch/arm/mm/proc-arm7tdmi.S216
-rw-r--r--arch/arm/mm/proc-arm920.S56
-rw-r--r--arch/arm/mm/proc-arm922.S53
-rw-r--r--arch/arm/mm/proc-arm925.S88
-rw-r--r--arch/arm/mm/proc-arm926.S54
-rw-r--r--arch/arm/mm/proc-arm940.S51
-rw-r--r--arch/arm/mm/proc-arm946.S53
-rw-r--r--arch/arm/mm/proc-arm9tdmi.S78
-rw-r--r--arch/arm/mm/proc-fa526.S38
-rw-r--r--arch/arm/mm/proc-feroceon.S202
-rw-r--r--arch/arm/mm/proc-macros.S68
-rw-r--r--arch/arm/mm/proc-mohawk.S61
-rw-r--r--arch/arm/mm/proc-sa110.S39
-rw-r--r--arch/arm/mm/proc-sa1100.S91
-rw-r--r--arch/arm/mm/proc-v6.S42
-rw-r--r--arch/arm/mm/proc-v7.S139
-rw-r--r--arch/arm/mm/proc-xsc3.S93
-rw-r--r--arch/arm/mm/proc-xscale.S510
-rw-r--r--arch/arm/mm/tlb-fa.S12
-rw-r--r--arch/arm/mm/tlb-v3.S8
-rw-r--r--arch/arm/mm/tlb-v4.S8
-rw-r--r--arch/arm/mm/tlb-v4wb.S8
-rw-r--r--arch/arm/mm/tlb-v4wbi.S8
-rw-r--r--arch/arm/mm/tlb-v6.S12
-rw-r--r--arch/arm/mm/tlb-v7.S15
-rw-r--r--arch/arm/plat-mxc/avic.c12
-rw-r--r--arch/arm/plat-mxc/devices/platform-imx-dma.c14
-rw-r--r--arch/arm/plat-mxc/devices/platform-imx-ssi.c2
-rw-r--r--arch/arm/plat-mxc/include/mach/debug-macro.S10
-rw-r--r--arch/arm/plat-mxc/include/mach/entry-macro.S4
-rw-r--r--arch/arm/plat-mxc/include/mach/hardware.h28
-rw-r--r--arch/arm/plat-mxc/include/mach/iomux-mx25.h2
-rw-r--r--arch/arm/plat-mxc/include/mach/iomux-mx53.h128
-rw-r--r--arch/arm/plat-mxc/include/mach/iomux-v1.h4
-rw-r--r--arch/arm/plat-mxc/include/mach/iomux-v3.h2
-rw-r--r--arch/arm/plat-mxc/include/mach/iomux.h26
-rw-r--r--arch/arm/plat-mxc/include/mach/mx53.h26
-rw-r--r--arch/arm/plat-mxc/include/mach/mxc.h8
-rw-r--r--arch/arm/plat-mxc/include/mach/timex.h13
-rw-r--r--arch/arm/plat-mxc/iomux-v1.c34
-rw-r--r--arch/arm/plat-mxc/pwm.c8
-rw-r--r--arch/arm/plat-mxc/tzic.c4
-rw-r--r--arch/arm/plat-omap/Kconfig3
-rw-r--r--arch/arm/plat-omap/counter_32k.c123
-rw-r--r--arch/arm/plat-omap/dmtimer.c209
-rw-r--r--arch/arm/plat-omap/include/plat/clock.h2
-rw-r--r--arch/arm/plat-omap/include/plat/common.h6
-rw-r--r--arch/arm/plat-omap/include/plat/dmtimer.h251
-rw-r--r--arch/arm/plat-omap/include/plat/irqs.h6
-rw-r--r--arch/arm/plat-omap/include/plat/mcbsp.h74
-rw-r--r--arch/arm/plat-omap/include/plat/nand.h6
-rw-r--r--arch/arm/plat-omap/include/plat/omap-pm.h8
-rw-r--r--arch/arm/plat-omap/include/plat/omap_hwmod.h20
-rw-r--r--arch/arm/plat-omap/mcbsp.c599
-rw-r--r--arch/arm/plat-omap/omap_device.c15
-rw-r--r--arch/arm/plat-omap/sram.c15
-rw-r--r--arch/arm/plat-s3c24xx/Kconfig2
-rw-r--r--arch/arm/plat-s3c24xx/clock-dclk.c4
-rw-r--r--arch/arm/plat-s3c24xx/cpu.c15
-rw-r--r--arch/arm/plat-s3c24xx/devs.c38
-rw-r--r--arch/arm/plat-s3c24xx/include/mach/clkdev.h7
-rw-r--r--arch/arm/plat-s3c24xx/include/plat/regs-iis.h9
-rw-r--r--arch/arm/plat-s3c24xx/include/plat/regs-spi.h1
-rw-r--r--arch/arm/plat-s3c24xx/include/plat/s3c2400.h31
-rw-r--r--arch/arm/plat-s3c24xx/s3c2410-clock.c21
-rw-r--r--arch/arm/plat-s3c24xx/s3c2443-clock.c39
-rw-r--r--arch/arm/plat-s3c24xx/sleep.S25
-rw-r--r--arch/arm/plat-s5p/clock.c35
-rw-r--r--arch/arm/plat-s5p/include/plat/s5p-clock.h5
-rw-r--r--arch/arm/plat-s5p/s5p-time.c29
-rw-r--r--arch/arm/plat-samsung/Kconfig6
-rw-r--r--arch/arm/plat-samsung/Makefile1
-rw-r--r--arch/arm/plat-samsung/clock.c98
-rw-r--r--arch/arm/plat-samsung/dev-backlight.c149
-rw-r--r--arch/arm/plat-samsung/dev-fb.c14
-rw-r--r--arch/arm/plat-samsung/dev-hwmon.c14
-rw-r--r--arch/arm/plat-samsung/dev-i2c0.c14
-rw-r--r--arch/arm/plat-samsung/dev-i2c1.c24
-rw-r--r--arch/arm/plat-samsung/dev-i2c2.c24
-rw-r--r--arch/arm/plat-samsung/dev-i2c3.c24
-rw-r--r--arch/arm/plat-samsung/dev-i2c4.c24
-rw-r--r--arch/arm/plat-samsung/dev-i2c5.c24
-rw-r--r--arch/arm/plat-samsung/dev-i2c6.c24
-rw-r--r--arch/arm/plat-samsung/dev-i2c7.c24
-rw-r--r--arch/arm/plat-samsung/dev-nand.c9
-rw-r--r--arch/arm/plat-samsung/dev-ts.c14
-rw-r--r--arch/arm/plat-samsung/dev-usb.c9
-rw-r--r--arch/arm/plat-samsung/include/plat/backlight.h26
-rw-r--r--arch/arm/plat-samsung/include/plat/clock.h3
-rw-r--r--arch/arm/plat-samsung/include/plat/gpio-cfg-helpers.h2
-rw-r--r--arch/arm/plat-samsung/include/plat/iic.h2
-rw-r--r--arch/arm/plat-samsung/include/plat/pm.h5
-rw-r--r--arch/arm/plat-samsung/include/plat/regs-serial.h8
-rw-r--r--arch/arm/plat-samsung/pm.c11
-rw-r--r--arch/arm/plat-samsung/pwm-clock.c10
-rw-r--r--arch/arm/plat-samsung/time.c2
-rw-r--r--arch/arm/vfp/vfphw.S63
-rw-r--r--arch/arm/vfp/vfpmodule.c133
-rw-r--r--arch/avr32/include/asm/delay.h27
-rw-r--r--arch/avr32/kernel/module.c20
-rw-r--r--arch/blackfin/Kconfig10
-rw-r--r--arch/blackfin/configs/BF561-EZKIT_defconfig8
-rw-r--r--arch/blackfin/include/asm/Kbuild43
-rw-r--r--arch/blackfin/include/asm/atomic.h13
-rw-r--r--arch/blackfin/include/asm/auxvec.h1
-rw-r--r--arch/blackfin/include/asm/bitsperlong.h1
-rw-r--r--arch/blackfin/include/asm/blackfin.h6
-rw-r--r--arch/blackfin/include/asm/bugs.h1
-rw-r--r--arch/blackfin/include/asm/cputime.h1
-rw-r--r--arch/blackfin/include/asm/current.h1
-rw-r--r--arch/blackfin/include/asm/device.h1
-rw-r--r--arch/blackfin/include/asm/div64.h1
-rw-r--r--arch/blackfin/include/asm/dpmc.h27
-rw-r--r--arch/blackfin/include/asm/emergency-restart.h1
-rw-r--r--arch/blackfin/include/asm/errno.h1
-rw-r--r--arch/blackfin/include/asm/fb.h1
-rw-r--r--arch/blackfin/include/asm/futex.h1
-rw-r--r--arch/blackfin/include/asm/gpio.h64
-rw-r--r--arch/blackfin/include/asm/gptimers.h19
-rw-r--r--arch/blackfin/include/asm/hw_irq.h1
-rw-r--r--arch/blackfin/include/asm/ioctl.h1
-rw-r--r--arch/blackfin/include/asm/ipcbuf.h1
-rw-r--r--arch/blackfin/include/asm/irq_regs.h1
-rw-r--r--arch/blackfin/include/asm/irqflags.h42
-rw-r--r--arch/blackfin/include/asm/kdebug.h1
-rw-r--r--arch/blackfin/include/asm/kmap_types.h1
-rw-r--r--arch/blackfin/include/asm/local.h1
-rw-r--r--arch/blackfin/include/asm/local64.h1
-rw-r--r--arch/blackfin/include/asm/mman.h1
-rw-r--r--arch/blackfin/include/asm/module.h8
-rw-r--r--arch/blackfin/include/asm/msgbuf.h1
-rw-r--r--arch/blackfin/include/asm/mutex.h77
-rw-r--r--arch/blackfin/include/asm/page.h8
-rw-r--r--arch/blackfin/include/asm/param.h1
-rw-r--r--arch/blackfin/include/asm/pda.h10
-rw-r--r--arch/blackfin/include/asm/percpu.h1
-rw-r--r--arch/blackfin/include/asm/pgalloc.h1
-rw-r--r--arch/blackfin/include/asm/resource.h1
-rw-r--r--arch/blackfin/include/asm/scatterlist.h6
-rw-r--r--arch/blackfin/include/asm/sections.h8
-rw-r--r--arch/blackfin/include/asm/sembuf.h1
-rw-r--r--arch/blackfin/include/asm/serial.h1
-rw-r--r--arch/blackfin/include/asm/setup.h1
-rw-r--r--arch/blackfin/include/asm/shmbuf.h1
-rw-r--r--arch/blackfin/include/asm/shmparam.h1
-rw-r--r--arch/blackfin/include/asm/sigcontext.h8
-rw-r--r--arch/blackfin/include/asm/socket.h1
-rw-r--r--arch/blackfin/include/asm/sockios.h1
-rw-r--r--arch/blackfin/include/asm/spinlock.h8
-rw-r--r--arch/blackfin/include/asm/statfs.h1
-rw-r--r--arch/blackfin/include/asm/termbits.h1
-rw-r--r--arch/blackfin/include/asm/termios.h1
-rw-r--r--arch/blackfin/include/asm/topology.h1
-rw-r--r--arch/blackfin/include/asm/types.h1
-rw-r--r--arch/blackfin/include/asm/ucontext.h1
-rw-r--r--arch/blackfin/include/asm/unaligned.h1
-rw-r--r--arch/blackfin/include/asm/user.h1
-rw-r--r--arch/blackfin/include/asm/xor.h1
-rw-r--r--arch/blackfin/kernel/Makefile1
-rw-r--r--arch/blackfin/kernel/asm-offsets.c10
-rw-r--r--arch/blackfin/kernel/bfin_gpio.c26
-rw-r--r--arch/blackfin/kernel/debug-mmrs.c109
-rw-r--r--arch/blackfin/kernel/gptimers.c93
-rw-r--r--arch/blackfin/kernel/module.c21
-rw-r--r--arch/blackfin/kernel/process.c1
-rw-r--r--arch/blackfin/kernel/pwm.c100
-rw-r--r--arch/blackfin/kernel/reboot.c4
-rw-r--r--arch/blackfin/kernel/setup.c16
-rw-r--r--arch/blackfin/kernel/time.c4
-rw-r--r--arch/blackfin/kernel/vmlinux.lds.S1
-rw-r--r--arch/blackfin/mach-bf518/Kconfig78
-rw-r--r--arch/blackfin/mach-bf518/boards/ezbrd.c59
-rw-r--r--arch/blackfin/mach-bf518/boards/tcm-bf518.c47
-rw-r--r--arch/blackfin/mach-bf518/include/mach/anomaly.h24
-rw-r--r--arch/blackfin/mach-bf518/include/mach/portmux.h54
-rw-r--r--arch/blackfin/mach-bf527/boards/ad7160eval.c19
-rw-r--r--arch/blackfin/mach-bf527/boards/cm_bf527.c55
-rw-r--r--arch/blackfin/mach-bf527/boards/ezbrd.c62
-rw-r--r--arch/blackfin/mach-bf527/boards/ezkit.c98
-rw-r--r--arch/blackfin/mach-bf527/boards/tll6527m.c70
-rw-r--r--arch/blackfin/mach-bf527/include/mach/anomaly.h34
-rw-r--r--arch/blackfin/mach-bf533/boards/H8606.c28
-rw-r--r--arch/blackfin/mach-bf533/boards/blackstamp.c10
-rw-r--r--arch/blackfin/mach-bf533/boards/cm_bf533.c29
-rw-r--r--arch/blackfin/mach-bf533/boards/ezkit.c36
-rw-r--r--arch/blackfin/mach-bf533/boards/ip0x.c1
-rw-r--r--arch/blackfin/mach-bf533/boards/stamp.c78
-rw-r--r--arch/blackfin/mach-bf533/include/mach/anomaly.h19
-rw-r--r--arch/blackfin/mach-bf537/boards/cm_bf537e.c51
-rw-r--r--arch/blackfin/mach-bf537/boards/cm_bf537u.c63
-rw-r--r--arch/blackfin/mach-bf537/boards/dnp5370.c2
-rw-r--r--arch/blackfin/mach-bf537/boards/minotaur.c2
-rw-r--r--arch/blackfin/mach-bf537/boards/pnav10.c38
-rw-r--r--arch/blackfin/mach-bf537/boards/stamp.c176
-rw-r--r--arch/blackfin/mach-bf537/boards/tcm_bf537.c51
-rw-r--r--arch/blackfin/mach-bf537/include/mach/anomaly.h34
-rw-r--r--arch/blackfin/mach-bf538/boards/ezkit.c25
-rw-r--r--arch/blackfin/mach-bf538/ext-gpio.c37
-rw-r--r--arch/blackfin/mach-bf538/include/mach/anomaly.h38
-rw-r--r--arch/blackfin/mach-bf538/include/mach/gpio.h3
-rw-r--r--arch/blackfin/mach-bf548/boards/cm_bf548.c15
-rw-r--r--arch/blackfin/mach-bf548/boards/ezkit.c32
-rw-r--r--arch/blackfin/mach-bf548/include/mach/anomaly.h220
-rw-r--r--arch/blackfin/mach-bf548/include/mach/gpio.h2
-rw-r--r--arch/blackfin/mach-bf548/include/mach/irq.h2
-rw-r--r--arch/blackfin/mach-bf561/boards/acvilon.c9
-rw-r--r--arch/blackfin/mach-bf561/boards/cm_bf561.c58
-rw-r--r--arch/blackfin/mach-bf561/boards/ezkit.c41
-rw-r--r--arch/blackfin/mach-bf561/include/mach/anomaly.h132
-rw-r--r--arch/blackfin/mach-bf561/include/mach/gpio.h6
-rw-r--r--arch/blackfin/mach-bf561/secondary.S152
-rw-r--r--arch/blackfin/mach-common/dpmc_modes.S1016
-rw-r--r--arch/blackfin/mach-common/head.S36
-rw-r--r--arch/blackfin/mach-common/ints-priority.c41
-rw-r--r--arch/blackfin/mach-common/smp.c17
-rw-r--r--arch/cris/kernel/module.c43
-rw-r--r--arch/frv/kernel/module.c57
-rw-r--r--arch/h8300/kernel/module.c45
-rw-r--r--arch/ia64/include/asm/paravirt.h4
-rw-r--r--arch/ia64/kernel/module.c16
-rw-r--r--arch/ia64/kernel/paravirt.c2
-rw-r--r--arch/ia64/kvm/Kconfig1
-rw-r--r--arch/m32r/include/asm/delay.h27
-rw-r--r--arch/m32r/kernel/module.c38
-rw-r--r--arch/m68k/kernel/module_mm.c27
-rw-r--r--arch/m68k/kernel/module_no.c34
-rw-r--r--arch/microblaze/kernel/module.c35
-rw-r--r--arch/mips/Kconfig16
-rw-r--r--arch/mips/kernel/module.c20
-rw-r--r--arch/mn10300/kernel/module.c61
-rw-r--r--arch/openrisc/Kconfig207
-rw-r--r--arch/openrisc/Makefile55
-rw-r--r--arch/openrisc/README.openrisc99
-rw-r--r--arch/openrisc/TODO.openrisc16
-rw-r--r--arch/openrisc/boot/Makefile15
-rw-r--r--arch/openrisc/boot/dts/or1ksim.dts50
-rw-r--r--arch/openrisc/configs/or1ksim_defconfig65
-rw-r--r--arch/openrisc/include/asm/Kbuild64
-rw-r--r--arch/openrisc/include/asm/asm-offsets.h1
-rw-r--r--arch/openrisc/include/asm/bitops.h59
-rw-r--r--arch/openrisc/include/asm/bitops/__ffs.h33
-rw-r--r--arch/openrisc/include/asm/bitops/__fls.h33
-rw-r--r--arch/openrisc/include/asm/bitops/ffs.h32
-rw-r--r--arch/openrisc/include/asm/bitops/fls.h33
-rw-r--r--arch/openrisc/include/asm/byteorder.h1
-rw-r--r--arch/openrisc/include/asm/cache.h29
-rw-r--r--arch/openrisc/include/asm/cpuinfo.h34
-rw-r--r--arch/openrisc/include/asm/delay.h24
-rw-r--r--arch/openrisc/include/asm/dma-mapping.h134
-rw-r--r--arch/openrisc/include/asm/elf.h108
-rw-r--r--arch/openrisc/include/asm/fixmap.h87
-rw-r--r--arch/openrisc/include/asm/gpio.h65
-rw-r--r--arch/openrisc/include/asm/io.h51
-rw-r--r--arch/openrisc/include/asm/irq.h27
-rw-r--r--arch/openrisc/include/asm/irqflags.h29
-rw-r--r--arch/openrisc/include/asm/linkage.h25
-rw-r--r--arch/openrisc/include/asm/memblock.h24
-rw-r--r--arch/openrisc/include/asm/mmu.h26
-rw-r--r--arch/openrisc/include/asm/mmu_context.h43
-rw-r--r--arch/openrisc/include/asm/mutex.h27
-rw-r--r--arch/openrisc/include/asm/page.h110
-rw-r--r--arch/openrisc/include/asm/param.h26
-rw-r--r--arch/openrisc/include/asm/pgalloc.h102
-rw-r--r--arch/openrisc/include/asm/pgtable.h463
-rw-r--r--arch/openrisc/include/asm/processor.h113
-rw-r--r--arch/openrisc/include/asm/prom.h77
-rw-r--r--arch/openrisc/include/asm/ptrace.h131
-rw-r--r--arch/openrisc/include/asm/serial.h36
-rw-r--r--arch/openrisc/include/asm/sigcontext.h38
-rw-r--r--arch/openrisc/include/asm/spinlock.h24
-rw-r--r--arch/openrisc/include/asm/spr.h42
-rw-r--r--arch/openrisc/include/asm/spr_defs.h604
-rw-r--r--arch/openrisc/include/asm/syscall.h77
-rw-r--r--arch/openrisc/include/asm/syscalls.h27
-rw-r--r--arch/openrisc/include/asm/system.h35
-rw-r--r--arch/openrisc/include/asm/thread_info.h134
-rw-r--r--arch/openrisc/include/asm/timex.h36
-rw-r--r--arch/openrisc/include/asm/tlb.h34
-rw-r--r--arch/openrisc/include/asm/tlbflush.h55
-rw-r--r--arch/openrisc/include/asm/uaccess.h355
-rw-r--r--arch/openrisc/include/asm/unaligned.h51
-rw-r--r--arch/openrisc/include/asm/unistd.h31
-rw-r--r--arch/openrisc/kernel/Makefile14
-rw-r--r--arch/openrisc/kernel/asm-offsets.c70
-rw-r--r--arch/openrisc/kernel/dma.c191
-rw-r--r--arch/openrisc/kernel/entry.S1128
-rw-r--r--arch/openrisc/kernel/head.S1607
-rw-r--r--arch/openrisc/kernel/idle.c77
-rw-r--r--arch/openrisc/kernel/init_task.c41
-rw-r--r--arch/openrisc/kernel/irq.c172
-rw-r--r--arch/openrisc/kernel/module.c72
-rw-r--r--arch/openrisc/kernel/or32_ksyms.c46
-rw-r--r--arch/openrisc/kernel/process.c311
-rw-r--r--arch/openrisc/kernel/prom.c108
-rw-r--r--arch/openrisc/kernel/ptrace.c211
-rw-r--r--arch/openrisc/kernel/setup.c381
-rw-r--r--arch/openrisc/kernel/signal.c396
-rw-r--r--arch/openrisc/kernel/sys_call_table.c28
-rw-r--r--arch/openrisc/kernel/sys_or32.c57
-rw-r--r--arch/openrisc/kernel/time.c181
-rw-r--r--arch/openrisc/kernel/traps.c366
-rw-r--r--arch/openrisc/kernel/vmlinux.h12
-rw-r--r--arch/openrisc/kernel/vmlinux.lds.S115
-rw-r--r--arch/openrisc/lib/Makefile5
-rw-r--r--arch/openrisc/lib/delay.c60
-rw-r--r--arch/openrisc/lib/string.S204
-rw-r--r--arch/openrisc/mm/Makefile5
-rw-r--r--arch/openrisc/mm/fault.c338
-rw-r--r--arch/openrisc/mm/init.c283
-rw-r--r--arch/openrisc/mm/ioremap.c137
-rw-r--r--arch/openrisc/mm/tlb.c193
-rw-r--r--arch/parisc/kernel/module.c12
-rw-r--r--arch/powerpc/include/asm/cputable.h14
-rw-r--r--arch/powerpc/include/asm/exception-64s.h136
-rw-r--r--arch/powerpc/include/asm/hvcall.h5
-rw-r--r--arch/powerpc/include/asm/kvm.h15
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h4
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h196
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h4
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h41
-rw-r--r--arch/powerpc/include/asm/kvm_booke.h4
-rw-r--r--arch/powerpc/include/asm/kvm_e500.h30
-rw-r--r--arch/powerpc/include/asm/kvm_host.h169
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h41
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h10
-rw-r--r--arch/powerpc/include/asm/paca.h3
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h28
-rw-r--r--arch/powerpc/include/asm/reg.h25
-rw-r--r--arch/powerpc/include/asm/reg_booke.h1
-rw-r--r--arch/powerpc/kernel/asm-offsets.c190
-rw-r--r--arch/powerpc/kernel/cpu_setup_power7.S22
-rw-r--r--arch/powerpc/kernel/cpu_setup_ppc970.S26
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S228
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S8
-rw-r--r--arch/powerpc/kernel/idle_power7.S2
-rw-r--r--arch/powerpc/kernel/module.c18
-rw-r--r--arch/powerpc/kernel/module_32.c11
-rw-r--r--arch/powerpc/kernel/module_64.c10
-rw-r--r--arch/powerpc/kernel/paca.c2
-rw-r--r--arch/powerpc/kernel/process.c4
-rw-r--r--arch/powerpc/kernel/setup-common.c3
-rw-r--r--arch/powerpc/kernel/setup_64.c3
-rw-r--r--arch/powerpc/kernel/smp.c1
-rw-r--r--arch/powerpc/kernel/traps.c5
-rw-r--r--arch/powerpc/kvm/44x_tlb.c4
-rw-r--r--arch/powerpc/kvm/Kconfig35
-rw-r--r--arch/powerpc/kvm/Makefile27
-rw-r--r--arch/powerpc/kvm/book3s.c1007
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c54
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c180
-rw-r--r--arch/powerpc/kvm/book3s_64_vio_hv.c73
-rw-r--r--arch/powerpc/kvm/book3s_exports.c9
-rw-r--r--arch/powerpc/kvm/book3s_hv.c1269
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c155
-rw-r--r--arch/powerpc/kvm/book3s_hv_interrupts.S166
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c370
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S1345
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S21
-rw-r--r--arch/powerpc/kvm/book3s_mmu_hpte.c71
-rw-r--r--arch/powerpc/kvm/book3s_pr.c1029
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S102
-rw-r--r--arch/powerpc/kvm/book3s_segment.S117
-rw-r--r--arch/powerpc/kvm/booke.c132
-rw-r--r--arch/powerpc/kvm/booke.h23
-rw-r--r--arch/powerpc/kvm/booke_interrupts.S66
-rw-r--r--arch/powerpc/kvm/e500.c7
-rw-r--r--arch/powerpc/kvm/e500_emulate.c4
-rw-r--r--arch/powerpc/kvm/e500_tlb.c800
-rw-r--r--arch/powerpc/kvm/e500_tlb.h13
-rw-r--r--arch/powerpc/kvm/powerpc.c78
-rw-r--r--arch/powerpc/kvm/timing.c9
-rw-r--r--arch/powerpc/kvm/trace.h4
-rw-r--r--arch/powerpc/mm/hash_native_64.c6
-rw-r--r--arch/powerpc/platforms/iseries/exception.S2
-rw-r--r--arch/powerpc/platforms/iseries/exception.h4
-rw-r--r--arch/powerpc/sysdev/xics/icp-native.c9
-rw-r--r--arch/s390/boot/compressed/head31.S4
-rw-r--r--arch/s390/boot/compressed/head64.S4
-rw-r--r--arch/s390/crypto/sha256_s390.c66
-rw-r--r--arch/s390/include/asm/irqflags.h16
-rw-r--r--arch/s390/include/asm/kvm_host.h12
-rw-r--r--arch/s390/include/asm/linkage.h5
-rw-r--r--arch/s390/include/asm/lowcore.h2
-rw-r--r--arch/s390/include/asm/mmu.h4
-rw-r--r--arch/s390/include/asm/pgalloc.h7
-rw-r--r--arch/s390/include/asm/pgtable.h42
-rw-r--r--arch/s390/include/asm/processor.h1
-rw-r--r--arch/s390/include/asm/thread_info.h2
-rw-r--r--arch/s390/include/asm/tlbflush.h2
-rw-r--r--arch/s390/kernel/asm-offsets.c2
-rw-r--r--arch/s390/kernel/base.S25
-rw-r--r--arch/s390/kernel/compat_wrapper.S836
-rw-r--r--arch/s390/kernel/entry.S32
-rw-r--r--arch/s390/kernel/entry.h7
-rw-r--r--arch/s390/kernel/entry64.S111
-rw-r--r--arch/s390/kernel/head.S7
-rw-r--r--arch/s390/kernel/head31.S13
-rw-r--r--arch/s390/kernel/head64.S13
-rw-r--r--arch/s390/kernel/irq.c83
-rw-r--r--arch/s390/kernel/mcount.S16
-rw-r--r--arch/s390/kernel/mcount64.S16
-rw-r--r--arch/s390/kernel/module.c20
-rw-r--r--arch/s390/kernel/reipl.S5
-rw-r--r--arch/s390/kernel/reipl64.S5
-rw-r--r--arch/s390/kernel/relocate_kernel.S6
-rw-r--r--arch/s390/kernel/relocate_kernel64.S6
-rw-r--r--arch/s390/kernel/s390_ksyms.c4
-rw-r--r--arch/s390/kernel/sclp.S5
-rw-r--r--arch/s390/kernel/smp.c3
-rw-r--r--arch/s390/kernel/switch_cpu.S8
-rw-r--r--arch/s390/kernel/switch_cpu64.S8
-rw-r--r--arch/s390/kernel/swsusp_asm64.S8
-rw-r--r--arch/s390/kernel/traps.c36
-rw-r--r--arch/s390/kvm/Kconfig1
-rw-r--r--arch/s390/kvm/Makefile2
-rw-r--r--arch/s390/kvm/gaccess.h243
-rw-r--r--arch/s390/kvm/intercept.c35
-rw-r--r--arch/s390/kvm/interrupt.c4
-rw-r--r--arch/s390/kvm/kvm-s390.c48
-rw-r--r--arch/s390/kvm/kvm-s390.h28
-rw-r--r--arch/s390/kvm/priv.c49
-rw-r--r--arch/s390/kvm/sie64a.S98
-rw-r--r--arch/s390/kvm/sigp.c6
-rw-r--r--arch/s390/lib/qrnnd.S5
-rw-r--r--arch/s390/mm/fault.c18
-rw-r--r--arch/s390/mm/hugetlbpage.c2
-rw-r--r--arch/s390/mm/pgtable.c421
-rw-r--r--arch/s390/mm/vmem.c8
-rw-r--r--arch/score/kernel/module.c29
-rw-r--r--arch/sh/Kconfig16
-rw-r--r--arch/sh/include/asm/delay.h27
-rw-r--r--arch/sh/kernel/module.c35
-rw-r--r--arch/sparc/kernel/module.c28
-rw-r--r--arch/tile/kernel/module.c31
-rw-r--r--arch/tile/kvm/Kconfig1
-rw-r--r--arch/um/sys-i386/Makefile3
-rw-r--r--arch/unicore32/kernel/module.c35
-rw-r--r--arch/x86/Kconfig28
-rw-r--r--arch/x86/Kconfig.cpu3
-rw-r--r--arch/x86/boot/Makefile9
-rw-r--r--arch/x86/boot/tools/build.c33
-rw-r--r--arch/x86/crypto/ghash-clmulni-intel_glue.c2
-rw-r--r--arch/x86/include/asm/apb_timer.h22
-rw-r--r--arch/x86/include/asm/cmpxchg_32.h48
-rw-r--r--arch/x86/include/asm/cmpxchg_64.h45
-rw-r--r--arch/x86/include/asm/cpufeature.h2
-rw-r--r--arch/x86/include/asm/delay.h25
-rw-r--r--arch/x86/include/asm/kvm_emulate.h52
-rw-r--r--arch/x86/include/asm/kvm_host.h46
-rw-r--r--arch/x86/include/asm/kvm_para.h20
-rw-r--r--arch/x86/include/asm/msr-index.h12
-rw-r--r--arch/x86/include/asm/paravirt.h9
-rw-r--r--arch/x86/include/asm/paravirt_types.h1
-rw-r--r--arch/x86/include/asm/processor-flags.h1
-rw-r--r--arch/x86/include/asm/vmx.h43
-rw-r--r--arch/x86/include/asm/xen/hypercall.h22
-rw-r--r--arch/x86/include/asm/xen/trace_types.h18
-rw-r--r--arch/x86/kernel/apb_timer.c409
-rw-r--r--arch/x86/kernel/apic/apic.c22
-rw-r--r--arch/x86/kernel/cpu/bugs.c4
-rw-r--r--arch/x86/kernel/cpu/hypervisor.c4
-rw-r--r--arch/x86/kernel/kvm.c72
-rw-r--r--arch/x86/kernel/kvmclock.c2
-rw-r--r--arch/x86/kernel/module.c37
-rw-r--r--arch/x86/kernel/paravirt.c9
-rw-r--r--arch/x86/kernel/quirks.c5
-rw-r--r--arch/x86/kernel/relocate_kernel_32.S2
-rw-r--r--arch/x86/kernel/relocate_kernel_64.S2
-rw-r--r--arch/x86/kernel/tsc.c24
-rw-r--r--arch/x86/kvm/Kconfig2
-rw-r--r--arch/x86/kvm/emulate.c1749
-rw-r--r--arch/x86/kvm/mmu.c1226
-rw-r--r--arch/x86/kvm/mmu.h25
-rw-r--r--arch/x86/kvm/mmu_audit.c12
-rw-r--r--arch/x86/kvm/mmutrace.h48
-rw-r--r--arch/x86/kvm/paging_tmpl.h258
-rw-r--r--arch/x86/kvm/svm.c6
-rw-r--r--arch/x86/kvm/trace.h31
-rw-r--r--arch/x86/kvm/vmx.c2784
-rw-r--r--arch/x86/kvm/x86.c374
-rw-r--r--arch/x86/kvm/x86.h44
-rw-r--r--arch/x86/xen/Makefile2
-rw-r--r--arch/x86/xen/enlighten.c16
-rw-r--r--arch/x86/xen/mmu.c139
-rw-r--r--arch/x86/xen/multicalls.c169
-rw-r--r--arch/x86/xen/multicalls.h6
-rw-r--r--arch/x86/xen/trace.c61
-rw-r--r--arch/xtensa/kernel/module.c43
886 files changed, 37816 insertions, 25532 deletions
diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
index ebc3c894b5a2..2fd00b7077e4 100644
--- a/arch/alpha/kernel/module.c
+++ b/arch/alpha/kernel/module.c
@@ -29,20 +29,6 @@
#define DEBUGP(fmt...)
#endif
-void *
-module_alloc(unsigned long size)
-{
- if (size == 0)
- return NULL;
- return vmalloc(size);
-}
-
-void
-module_free(struct module *mod, void *module_region)
-{
- vfree(module_region);
-}
-
/* Allocate the GOT at the end of the core sections. */
struct got_entry {
@@ -156,14 +142,6 @@ module_frob_arch_sections(Elf64_Ehdr *hdr, Elf64_Shdr *sechdrs,
}
int
-apply_relocate(Elf64_Shdr *sechdrs, const char *strtab, unsigned int symindex,
- unsigned int relsec, struct module *me)
-{
- printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
- return -ENOEXEC;
-}
-
-int
apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
unsigned int symindex, unsigned int relsec,
struct module *me)
@@ -302,15 +280,3 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
return 0;
}
-
-int
-module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
- struct module *me)
-{
- return 0;
-}
-
-void
-module_arch_cleanup(struct module *mod)
-{
-}
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index e04fa9d7637c..9cb1f4bd7618 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -10,7 +10,7 @@ config ARM
select GENERIC_ATOMIC64 if (CPU_V6 || !CPU_32v6K || !AEABI)
select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
select HAVE_ARCH_KGDB
- select HAVE_KPROBES if (!XIP_KERNEL && !THUMB2_KERNEL)
+ select HAVE_KPROBES if !XIP_KERNEL
select HAVE_KRETPROBES if (HAVE_KPROBES)
select HAVE_FUNCTION_TRACER if (!XIP_KERNEL)
select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL)
@@ -37,6 +37,9 @@ config ARM
Europe. There is an ARM Linux project with a web page at
<http://www.arm.linux.org.uk/>.
+config ARM_HAS_SG_CHAIN
+ bool
+
config HAVE_PWM
bool
@@ -490,14 +493,6 @@ config ARCH_KIRKWOOD
Support for the following Marvell Kirkwood series SoCs:
88F6180, 88F6192 and 88F6281.
-config ARCH_LOKI
- bool "Marvell Loki (88RC8480)"
- select CPU_FEROCEON
- select GENERIC_CLOCKEVENTS
- select PLAT_ORION
- help
- Support for the Marvell Loki (88RC8480) SoC.
-
config ARCH_LPC32XX
bool "NXP LPC32XX"
select CLKSRC_MMIO
@@ -683,6 +678,7 @@ config ARCH_S3C2410
select GENERIC_GPIO
select ARCH_HAS_CPUFREQ
select HAVE_CLK
+ select CLKDEV_LOOKUP
select ARCH_USES_GETTIMEOFFSET
select HAVE_S3C2410_I2C if I2C
help
@@ -700,6 +696,7 @@ config ARCH_S3C64XX
select CPU_V6
select ARM_VIC
select HAVE_CLK
+ select CLKDEV_LOOKUP
select NO_IOPORT
select ARCH_USES_GETTIMEOFFSET
select ARCH_HAS_CPUFREQ
@@ -724,6 +721,8 @@ config ARCH_S5P64X0
select CPU_V6
select GENERIC_GPIO
select HAVE_CLK
+ select CLKDEV_LOOKUP
+ select CLKSRC_MMIO
select HAVE_S3C2410_WATCHDOG if WATCHDOG
select GENERIC_CLOCKEVENTS
select HAVE_SCHED_CLOCK
@@ -737,6 +736,7 @@ config ARCH_S5PC100
bool "Samsung S5PC100"
select GENERIC_GPIO
select HAVE_CLK
+ select CLKDEV_LOOKUP
select CPU_V7
select ARM_L1_CACHE_SHIFT_6
select ARCH_USES_GETTIMEOFFSET
@@ -752,6 +752,8 @@ config ARCH_S5PV210
select ARCH_SPARSEMEM_ENABLE
select GENERIC_GPIO
select HAVE_CLK
+ select CLKDEV_LOOKUP
+ select CLKSRC_MMIO
select ARM_L1_CACHE_SHIFT_6
select ARCH_HAS_CPUFREQ
select GENERIC_CLOCKEVENTS
@@ -768,6 +770,7 @@ config ARCH_EXYNOS4
select ARCH_SPARSEMEM_ENABLE
select GENERIC_GPIO
select HAVE_CLK
+ select CLKDEV_LOOKUP
select ARCH_HAS_CPUFREQ
select GENERIC_CLOCKEVENTS
select HAVE_S3C_RTC if RTC_CLASS
@@ -853,6 +856,7 @@ config ARCH_OMAP
select HAVE_CLK
select ARCH_REQUIRE_GPIOLIB
select ARCH_HAS_CPUFREQ
+ select CLKSRC_MMIO
select GENERIC_CLOCKEVENTS
select HAVE_SCHED_CLOCK
select ARCH_HAS_HOLES_MEMORYMODEL
@@ -925,8 +929,6 @@ source "arch/arm/mach-kirkwood/Kconfig"
source "arch/arm/mach-ks8695/Kconfig"
-source "arch/arm/mach-loki/Kconfig"
-
source "arch/arm/mach-lpc32xx/Kconfig"
source "arch/arm/mach-msm/Kconfig"
@@ -970,7 +972,6 @@ source "arch/arm/plat-spear/Kconfig"
source "arch/arm/plat-tcc/Kconfig"
if ARCH_S3C2410
-source "arch/arm/mach-s3c2400/Kconfig"
source "arch/arm/mach-s3c2410/Kconfig"
source "arch/arm/mach-s3c2412/Kconfig"
source "arch/arm/mach-s3c2416/Kconfig"
@@ -1347,7 +1348,6 @@ config SMP_ON_UP
config HAVE_ARM_SCU
bool
- depends on SMP
help
This option enables support for the ARM system coherency unit
@@ -1716,17 +1716,34 @@ config ZBOOT_ROM
Say Y here if you intend to execute your compressed kernel image
(zImage) directly from ROM or flash. If unsure, say N.
+choice
+ prompt "Include SD/MMC loader in zImage (EXPERIMENTAL)"
+ depends on ZBOOT_ROM && ARCH_SH7372 && EXPERIMENTAL
+ default ZBOOT_ROM_NONE
+ help
+ Include experimental SD/MMC loading code in the ROM-able zImage.
+ With this enabled it is possible to write the the ROM-able zImage
+ kernel image to an MMC or SD card and boot the kernel straight
+ from the reset vector. At reset the processor Mask ROM will load
+ the first part of the the ROM-able zImage which in turn loads the
+ rest the kernel image to RAM.
+
+config ZBOOT_ROM_NONE
+ bool "No SD/MMC loader in zImage (EXPERIMENTAL)"
+ help
+ Do not load image from SD or MMC
+
config ZBOOT_ROM_MMCIF
bool "Include MMCIF loader in zImage (EXPERIMENTAL)"
- depends on ZBOOT_ROM && ARCH_SH7372 && EXPERIMENTAL
help
- Say Y here to include experimental MMCIF loading code in the
- ROM-able zImage. With this enabled it is possible to write the
- the ROM-able zImage kernel image to an MMC card and boot the
- kernel straight from the reset vector. At reset the processor
- Mask ROM will load the first part of the the ROM-able zImage
- which in turn loads the rest the kernel image to RAM using the
- MMCIF hardware block.
+ Load image from MMCIF hardware block.
+
+config ZBOOT_ROM_SH_MOBILE_SDHI
+ bool "Include SuperH Mobile SDHI loader in zImage (EXPERIMENTAL)"
+ help
+ Load image from SDHI hardware block
+
+endchoice
config CMDLINE
string "Default kernel command string"
@@ -1876,10 +1893,6 @@ config CPU_FREQ_PXA
default y
select CPU_FREQ_DEFAULT_GOV_USERSPACE
-config CPU_FREQ_S3C64XX
- bool "CPUfreq support for Samsung S3C64XX CPUs"
- depends on CPU_FREQ && CPU_S3C6410
-
config CPU_FREQ_S3C
bool
help
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index f5b2b390c8f2..206c34ecb9e3 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -150,7 +150,6 @@ machine-$(CONFIG_ARCH_IXP23XX) := ixp23xx
machine-$(CONFIG_ARCH_IXP4XX) := ixp4xx
machine-$(CONFIG_ARCH_KIRKWOOD) := kirkwood
machine-$(CONFIG_ARCH_KS8695) := ks8695
-machine-$(CONFIG_ARCH_LOKI) := loki
machine-$(CONFIG_ARCH_LPC32XX) := lpc32xx
machine-$(CONFIG_ARCH_MMP) := mmp
machine-$(CONFIG_ARCH_MSM) := msm
@@ -172,8 +171,7 @@ machine-$(CONFIG_ARCH_PNX4008) := pnx4008
machine-$(CONFIG_ARCH_PXA) := pxa
machine-$(CONFIG_ARCH_REALVIEW) := realview
machine-$(CONFIG_ARCH_RPC) := rpc
-machine-$(CONFIG_ARCH_S3C2410) := s3c2410 s3c2400 s3c2412 s3c2416 s3c2440 s3c2443
-machine-$(CONFIG_ARCH_S3C24A0) := s3c24a0
+machine-$(CONFIG_ARCH_S3C2410) := s3c2410 s3c2412 s3c2416 s3c2440 s3c2443
machine-$(CONFIG_ARCH_S3C64XX) := s3c64xx
machine-$(CONFIG_ARCH_S5P64X0) := s5p64x0
machine-$(CONFIG_ARCH_S5PC100) := s5pc100
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 23aad0722303..0c74a6fab952 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -6,13 +6,19 @@
OBJS =
-# Ensure that mmcif loader code appears early in the image
+# Ensure that MMCIF loader code appears early in the image
# to minimise that number of bocks that have to be read in
# order to load it.
ifeq ($(CONFIG_ZBOOT_ROM_MMCIF),y)
-ifeq ($(CONFIG_ARCH_SH7372),y)
OBJS += mmcif-sh7372.o
endif
+
+# Ensure that SDHI loader code appears early in the image
+# to minimise that number of bocks that have to be read in
+# order to load it.
+ifeq ($(CONFIG_ZBOOT_ROM_SH_MOBILE_SDHI),y)
+OBJS += sdhi-shmobile.o
+OBJS += sdhi-sh7372.o
endif
AFLAGS_head.o += -DTEXT_OFFSET=$(TEXT_OFFSET)
diff --git a/arch/arm/boot/compressed/head-shmobile.S b/arch/arm/boot/compressed/head-shmobile.S
index c943d2e7da9d..fe3719b516fd 100644
--- a/arch/arm/boot/compressed/head-shmobile.S
+++ b/arch/arm/boot/compressed/head-shmobile.S
@@ -25,14 +25,14 @@
/* load board-specific initialization code */
#include <mach/zboot.h>
-#ifdef CONFIG_ZBOOT_ROM_MMCIF
- /* Load image from MMC */
- adr sp, __tmp_stack + 128
+#if defined(CONFIG_ZBOOT_ROM_MMCIF) || defined(CONFIG_ZBOOT_ROM_SH_MOBILE_SDHI)
+ /* Load image from MMC/SD */
+ adr sp, __tmp_stack + 256
ldr r0, __image_start
ldr r1, __image_end
subs r1, r1, r0
ldr r0, __load_base
- bl mmcif_loader
+ bl mmc_loader
/* Jump to loaded code */
ldr r0, __loaded
@@ -51,9 +51,9 @@ __loaded:
.long __continue
.align
__tmp_stack:
- .space 128
+ .space 256
__continue:
-#endif /* CONFIG_ZBOOT_ROM_MMCIF */
+#endif /* CONFIG_ZBOOT_ROM_MMC || CONFIG_ZBOOT_ROM_SH_MOBILE_SDHI */
b 1f
__atags:@ tag #1
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 940b20178107..e95a5989602a 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -353,7 +353,8 @@ not_relocated: mov r0, #0
mov r0, #0 @ must be zero
mov r1, r7 @ restore architecture number
mov r2, r8 @ restore atags pointer
- mov pc, r4 @ call kernel
+ ARM( mov pc, r4 ) @ call kernel
+ THUMB( bx r4 ) @ entry point is always ARM
.align 2
.type LC0, #object
diff --git a/arch/arm/boot/compressed/mmcif-sh7372.c b/arch/arm/boot/compressed/mmcif-sh7372.c
index 7453c8337b83..b6f61d9a5a1b 100644
--- a/arch/arm/boot/compressed/mmcif-sh7372.c
+++ b/arch/arm/boot/compressed/mmcif-sh7372.c
@@ -40,7 +40,7 @@
* to an MMC card
* # dd if=vrl4.out of=/dev/sdx bs=512 seek=1
*/
-asmlinkage void mmcif_loader(unsigned char *buf, unsigned long len)
+asmlinkage void mmc_loader(unsigned char *buf, unsigned long len)
{
mmc_init_progress();
mmc_update_progress(MMC_PROGRESS_ENTER);
diff --git a/arch/arm/boot/compressed/sdhi-sh7372.c b/arch/arm/boot/compressed/sdhi-sh7372.c
new file mode 100644
index 000000000000..d403a8b24d7f
--- /dev/null
+++ b/arch/arm/boot/compressed/sdhi-sh7372.c
@@ -0,0 +1,95 @@
+/*
+ * SuperH Mobile SDHI
+ *
+ * Copyright (C) 2010 Magnus Damm
+ * Copyright (C) 2010 Kuninori Morimoto
+ * Copyright (C) 2010 Simon Horman
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Parts inspired by u-boot
+ */
+
+#include <linux/io.h>
+#include <mach/mmc.h>
+#include <linux/mmc/boot.h>
+#include <linux/mmc/tmio.h>
+
+#include "sdhi-shmobile.h"
+
+#define PORT179CR 0xe60520b3
+#define PORT180CR 0xe60520b4
+#define PORT181CR 0xe60520b5
+#define PORT182CR 0xe60520b6
+#define PORT183CR 0xe60520b7
+#define PORT184CR 0xe60520b8
+
+#define SMSTPCR3 0xe615013c
+
+#define CR_INPUT_ENABLE 0x10
+#define CR_FUNCTION1 0x01
+
+#define SDHI1_BASE (void __iomem *)0xe6860000
+#define SDHI_BASE SDHI1_BASE
+
+/* SuperH Mobile SDHI loader
+ *
+ * loads the zImage from an SD card starting from block 0
+ * on physical partition 1
+ *
+ * The image must be start with a vrl4 header and
+ * the zImage must start at offset 512 of the image. That is,
+ * at block 1 (=byte 512) of physical partition 1
+ *
+ * Use the following line to write the vrl4 formated zImage
+ * to an SD card
+ * # dd if=vrl4.out of=/dev/sdx bs=512
+ */
+asmlinkage void mmc_loader(unsigned short *buf, unsigned long len)
+{
+ int high_capacity;
+
+ mmc_init_progress();
+
+ mmc_update_progress(MMC_PROGRESS_ENTER);
+ /* Initialise SDHI1 */
+ /* PORT184CR: GPIO_FN_SDHICMD1 Control */
+ __raw_writeb(CR_FUNCTION1, PORT184CR);
+ /* PORT179CR: GPIO_FN_SDHICLK1 Control */
+ __raw_writeb(CR_INPUT_ENABLE|CR_FUNCTION1, PORT179CR);
+ /* PORT181CR: GPIO_FN_SDHID1_3 Control */
+ __raw_writeb(CR_FUNCTION1, PORT183CR);
+ /* PORT182CR: GPIO_FN_SDHID1_2 Control */
+ __raw_writeb(CR_FUNCTION1, PORT182CR);
+ /* PORT183CR: GPIO_FN_SDHID1_1 Control */
+ __raw_writeb(CR_FUNCTION1, PORT181CR);
+ /* PORT180CR: GPIO_FN_SDHID1_0 Control */
+ __raw_writeb(CR_FUNCTION1, PORT180CR);
+
+ /* Enable clock to SDHI1 hardware block */
+ __raw_writel(__raw_readl(SMSTPCR3) & ~(1 << 13), SMSTPCR3);
+
+ /* setup SDHI hardware */
+ mmc_update_progress(MMC_PROGRESS_INIT);
+ high_capacity = sdhi_boot_init(SDHI_BASE);
+ if (high_capacity < 0)
+ goto err;
+
+ mmc_update_progress(MMC_PROGRESS_LOAD);
+ /* load kernel */
+ if (sdhi_boot_do_read(SDHI_BASE, high_capacity,
+ 0, /* Kernel is at block 1 */
+ (len + TMIO_BBS - 1) / TMIO_BBS, buf))
+ goto err;
+
+ /* Disable clock to SDHI1 hardware block */
+ __raw_writel(__raw_readl(SMSTPCR3) & (1 << 13), SMSTPCR3);
+
+ mmc_update_progress(MMC_PROGRESS_DONE);
+
+ return;
+err:
+ for(;;);
+}
diff --git a/arch/arm/boot/compressed/sdhi-shmobile.c b/arch/arm/boot/compressed/sdhi-shmobile.c
new file mode 100644
index 000000000000..bd3d46980955
--- /dev/null
+++ b/arch/arm/boot/compressed/sdhi-shmobile.c
@@ -0,0 +1,449 @@
+/*
+ * SuperH Mobile SDHI
+ *
+ * Copyright (C) 2010 Magnus Damm
+ * Copyright (C) 2010 Kuninori Morimoto
+ * Copyright (C) 2010 Simon Horman
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Parts inspired by u-boot
+ */
+
+#include <linux/io.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/core.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sd.h>
+#include <linux/mmc/tmio.h>
+#include <mach/sdhi.h>
+
+#define OCR_FASTBOOT (1<<29)
+#define OCR_HCS (1<<30)
+#define OCR_BUSY (1<<31)
+
+#define RESP_CMD12 0x00000030
+
+static inline u16 sd_ctrl_read16(void __iomem *base, int addr)
+{
+ return __raw_readw(base + addr);
+}
+
+static inline u32 sd_ctrl_read32(void __iomem *base, int addr)
+{
+ return __raw_readw(base + addr) |
+ __raw_readw(base + addr + 2) << 16;
+}
+
+static inline void sd_ctrl_write16(void __iomem *base, int addr, u16 val)
+{
+ __raw_writew(val, base + addr);
+}
+
+static inline void sd_ctrl_write32(void __iomem *base, int addr, u32 val)
+{
+ __raw_writew(val, base + addr);
+ __raw_writew(val >> 16, base + addr + 2);
+}
+
+#define ALL_ERROR (TMIO_STAT_CMD_IDX_ERR | TMIO_STAT_CRCFAIL | \
+ TMIO_STAT_STOPBIT_ERR | TMIO_STAT_DATATIMEOUT | \
+ TMIO_STAT_RXOVERFLOW | TMIO_STAT_TXUNDERRUN | \
+ TMIO_STAT_CMDTIMEOUT | TMIO_STAT_ILL_ACCESS | \
+ TMIO_STAT_ILL_FUNC)
+
+static int sdhi_intr(void __iomem *base)
+{
+ unsigned long state = sd_ctrl_read32(base, CTL_STATUS);
+
+ if (state & ALL_ERROR) {
+ sd_ctrl_write32(base, CTL_STATUS, ~ALL_ERROR);
+ sd_ctrl_write32(base, CTL_IRQ_MASK,
+ ALL_ERROR |
+ sd_ctrl_read32(base, CTL_IRQ_MASK));
+ return -EINVAL;
+ }
+ if (state & TMIO_STAT_CMDRESPEND) {
+ sd_ctrl_write32(base, CTL_STATUS, ~TMIO_STAT_CMDRESPEND);
+ sd_ctrl_write32(base, CTL_IRQ_MASK,
+ TMIO_STAT_CMDRESPEND |
+ sd_ctrl_read32(base, CTL_IRQ_MASK));
+ return 0;
+ }
+ if (state & TMIO_STAT_RXRDY) {
+ sd_ctrl_write32(base, CTL_STATUS, ~TMIO_STAT_RXRDY);
+ sd_ctrl_write32(base, CTL_IRQ_MASK,
+ TMIO_STAT_RXRDY | TMIO_STAT_TXUNDERRUN |
+ sd_ctrl_read32(base, CTL_IRQ_MASK));
+ return 0;
+ }
+ if (state & TMIO_STAT_DATAEND) {
+ sd_ctrl_write32(base, CTL_STATUS, ~TMIO_STAT_DATAEND);
+ sd_ctrl_write32(base, CTL_IRQ_MASK,
+ TMIO_STAT_DATAEND |
+ sd_ctrl_read32(base, CTL_IRQ_MASK));
+ return 0;
+ }
+
+ return -EAGAIN;
+}
+
+static int sdhi_boot_wait_resp_end(void __iomem *base)
+{
+ int err = -EAGAIN, timeout = 10000000;
+
+ while (timeout--) {
+ err = sdhi_intr(base);
+ if (err != -EAGAIN)
+ break;
+ udelay(1);
+ }
+
+ return err;
+}
+
+/* SDHI_CLK_CTRL */
+#define CLK_MMC_ENABLE (1 << 8)
+#define CLK_MMC_INIT (1 << 6) /* clk / 256 */
+
+static void sdhi_boot_mmc_clk_stop(void __iomem *base)
+{
+ sd_ctrl_write16(base, CTL_CLK_AND_WAIT_CTL, 0x0000);
+ msleep(10);
+ sd_ctrl_write16(base, CTL_SD_CARD_CLK_CTL, ~CLK_MMC_ENABLE &
+ sd_ctrl_read16(base, CTL_SD_CARD_CLK_CTL));
+ msleep(10);
+}
+
+static void sdhi_boot_mmc_clk_start(void __iomem *base)
+{
+ sd_ctrl_write16(base, CTL_SD_CARD_CLK_CTL, CLK_MMC_ENABLE |
+ sd_ctrl_read16(base, CTL_SD_CARD_CLK_CTL));
+ msleep(10);
+ sd_ctrl_write16(base, CTL_CLK_AND_WAIT_CTL, CLK_MMC_ENABLE);
+ msleep(10);
+}
+
+static void sdhi_boot_reset(void __iomem *base)
+{
+ sd_ctrl_write16(base, CTL_RESET_SD, 0x0000);
+ msleep(10);
+ sd_ctrl_write16(base, CTL_RESET_SD, 0x0001);
+ msleep(10);
+}
+
+/* Set MMC clock / power.
+ * Note: This controller uses a simple divider scheme therefore it cannot
+ * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as
+ * MMC wont run that fast, it has to be clocked at 12MHz which is the next
+ * slowest setting.
+ */
+static int sdhi_boot_mmc_set_ios(void __iomem *base, struct mmc_ios *ios)
+{
+ if (sd_ctrl_read32(base, CTL_STATUS) & TMIO_STAT_CMD_BUSY)
+ return -EBUSY;
+
+ if (ios->clock)
+ sd_ctrl_write16(base, CTL_SD_CARD_CLK_CTL,
+ ios->clock | CLK_MMC_ENABLE);
+
+ /* Power sequence - OFF -> ON -> UP */
+ switch (ios->power_mode) {
+ case MMC_POWER_OFF: /* power down SD bus */
+ sdhi_boot_mmc_clk_stop(base);
+ break;
+ case MMC_POWER_ON: /* power up SD bus */
+ break;
+ case MMC_POWER_UP: /* start bus clock */
+ sdhi_boot_mmc_clk_start(base);
+ break;
+ }
+
+ switch (ios->bus_width) {
+ case MMC_BUS_WIDTH_1:
+ sd_ctrl_write16(base, CTL_SD_MEM_CARD_OPT, 0x80e0);
+ break;
+ case MMC_BUS_WIDTH_4:
+ sd_ctrl_write16(base, CTL_SD_MEM_CARD_OPT, 0x00e0);
+ break;
+ }
+
+ /* Let things settle. delay taken from winCE driver */
+ udelay(140);
+
+ return 0;
+}
+
+/* These are the bitmasks the tmio chip requires to implement the MMC response
+ * types. Note that R1 and R6 are the same in this scheme. */
+#define RESP_NONE 0x0300
+#define RESP_R1 0x0400
+#define RESP_R1B 0x0500
+#define RESP_R2 0x0600
+#define RESP_R3 0x0700
+#define DATA_PRESENT 0x0800
+#define TRANSFER_READ 0x1000
+
+static int sdhi_boot_request(void __iomem *base, struct mmc_command *cmd)
+{
+ int err, c = cmd->opcode;
+
+ switch (mmc_resp_type(cmd)) {
+ case MMC_RSP_NONE: c |= RESP_NONE; break;
+ case MMC_RSP_R1: c |= RESP_R1; break;
+ case MMC_RSP_R1B: c |= RESP_R1B; break;
+ case MMC_RSP_R2: c |= RESP_R2; break;
+ case MMC_RSP_R3: c |= RESP_R3; break;
+ default:
+ return -EINVAL;
+ }
+
+ /* No interrupts so this may not be cleared */
+ sd_ctrl_write32(base, CTL_STATUS, ~TMIO_STAT_CMDRESPEND);
+
+ sd_ctrl_write32(base, CTL_IRQ_MASK, TMIO_STAT_CMDRESPEND |
+ sd_ctrl_read32(base, CTL_IRQ_MASK));
+ sd_ctrl_write32(base, CTL_ARG_REG, cmd->arg);
+ sd_ctrl_write16(base, CTL_SD_CMD, c);
+
+
+ sd_ctrl_write32(base, CTL_IRQ_MASK,
+ ~(TMIO_STAT_CMDRESPEND | ALL_ERROR) &
+ sd_ctrl_read32(base, CTL_IRQ_MASK));
+
+ err = sdhi_boot_wait_resp_end(base);
+ if (err)
+ return err;
+
+ cmd->resp[0] = sd_ctrl_read32(base, CTL_RESPONSE);
+
+ return 0;
+}
+
+static int sdhi_boot_do_read_single(void __iomem *base, int high_capacity,
+ unsigned long block, unsigned short *buf)
+{
+ int err, i;
+
+ /* CMD17 - Read */
+ {
+ struct mmc_command cmd;
+
+ cmd.opcode = MMC_READ_SINGLE_BLOCK | \
+ TRANSFER_READ | DATA_PRESENT;
+ if (high_capacity)
+ cmd.arg = block;
+ else
+ cmd.arg = block * TMIO_BBS;
+ cmd.flags = MMC_RSP_R1;
+ err = sdhi_boot_request(base, &cmd);
+ if (err)
+ return err;
+ }
+
+ sd_ctrl_write32(base, CTL_IRQ_MASK,
+ ~(TMIO_STAT_DATAEND | TMIO_STAT_RXRDY |
+ TMIO_STAT_TXUNDERRUN) &
+ sd_ctrl_read32(base, CTL_IRQ_MASK));
+ err = sdhi_boot_wait_resp_end(base);
+ if (err)
+ return err;
+
+ sd_ctrl_write16(base, CTL_SD_XFER_LEN, TMIO_BBS);
+ for (i = 0; i < TMIO_BBS / sizeof(*buf); i++)
+ *buf++ = sd_ctrl_read16(base, RESP_CMD12);
+
+ err = sdhi_boot_wait_resp_end(base);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int sdhi_boot_do_read(void __iomem *base, int high_capacity,
+ unsigned long offset, unsigned short count,
+ unsigned short *buf)
+{
+ unsigned long i;
+ int err = 0;
+
+ for (i = 0; i < count; i++) {
+ err = sdhi_boot_do_read_single(base, high_capacity, offset + i,
+ buf + (i * TMIO_BBS /
+ sizeof(*buf)));
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+#define VOLTAGES (MMC_VDD_32_33 | MMC_VDD_33_34)
+
+int sdhi_boot_init(void __iomem *base)
+{
+ bool sd_v2 = false, sd_v1_0 = false;
+ unsigned short cid;
+ int err, high_capacity = 0;
+
+ sdhi_boot_mmc_clk_stop(base);
+ sdhi_boot_reset(base);
+
+ /* mmc0: clock 400000Hz busmode 1 powermode 2 cs 0 Vdd 21 width 0 timing 0 */
+ {
+ struct mmc_ios ios;
+ ios.power_mode = MMC_POWER_ON;
+ ios.bus_width = MMC_BUS_WIDTH_1;
+ ios.clock = CLK_MMC_INIT;
+ err = sdhi_boot_mmc_set_ios(base, &ios);
+ if (err)
+ return err;
+ }
+
+ /* CMD0 */
+ {
+ struct mmc_command cmd;
+ msleep(1);
+ cmd.opcode = MMC_GO_IDLE_STATE;
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_NONE;
+ err = sdhi_boot_request(base, &cmd);
+ if (err)
+ return err;
+ msleep(2);
+ }
+
+ /* CMD8 - Test for SD version 2 */
+ {
+ struct mmc_command cmd;
+ cmd.opcode = SD_SEND_IF_COND;
+ cmd.arg = (VOLTAGES != 0) << 8 | 0xaa;
+ cmd.flags = MMC_RSP_R1;
+ err = sdhi_boot_request(base, &cmd); /* Ignore error */
+ if ((cmd.resp[0] & 0xff) == 0xaa)
+ sd_v2 = true;
+ }
+
+ /* CMD55 - Get OCR (SD) */
+ {
+ int timeout = 1000;
+ struct mmc_command cmd;
+
+ cmd.arg = 0;
+
+ do {
+ cmd.opcode = MMC_APP_CMD;
+ cmd.flags = MMC_RSP_R1;
+ cmd.arg = 0;
+ err = sdhi_boot_request(base, &cmd);
+ if (err)
+ break;
+
+ cmd.opcode = SD_APP_OP_COND;
+ cmd.flags = MMC_RSP_R3;
+ cmd.arg = (VOLTAGES & 0xff8000);
+ if (sd_v2)
+ cmd.arg |= OCR_HCS;
+ cmd.arg |= OCR_FASTBOOT;
+ err = sdhi_boot_request(base, &cmd);
+ if (err)
+ break;
+
+ msleep(1);
+ } while((!(cmd.resp[0] & OCR_BUSY)) && --timeout);
+
+ if (!err && timeout) {
+ if (!sd_v2)
+ sd_v1_0 = true;
+ high_capacity = (cmd.resp[0] & OCR_HCS) == OCR_HCS;
+ }
+ }
+
+ /* CMD1 - Get OCR (MMC) */
+ if (!sd_v2 && !sd_v1_0) {
+ int timeout = 1000;
+ struct mmc_command cmd;
+
+ do {
+ cmd.opcode = MMC_SEND_OP_COND;
+ cmd.arg = VOLTAGES | OCR_HCS;
+ cmd.flags = MMC_RSP_R3;
+ err = sdhi_boot_request(base, &cmd);
+ if (err)
+ return err;
+
+ msleep(1);
+ } while((!(cmd.resp[0] & OCR_BUSY)) && --timeout);
+
+ if (!timeout)
+ return -EAGAIN;
+
+ high_capacity = (cmd.resp[0] & OCR_HCS) == OCR_HCS;
+ }
+
+ /* CMD2 - Get CID */
+ {
+ struct mmc_command cmd;
+ cmd.opcode = MMC_ALL_SEND_CID;
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_R2;
+ err = sdhi_boot_request(base, &cmd);
+ if (err)
+ return err;
+ }
+
+ /* CMD3
+ * MMC: Set the relative address
+ * SD: Get the relative address
+ * Also puts the card into the standby state
+ */
+ {
+ struct mmc_command cmd;
+ cmd.opcode = MMC_SET_RELATIVE_ADDR;
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_R1;
+ err = sdhi_boot_request(base, &cmd);
+ if (err)
+ return err;
+ cid = cmd.resp[0] >> 16;
+ }
+
+ /* CMD9 - Get CSD */
+ {
+ struct mmc_command cmd;
+ cmd.opcode = MMC_SEND_CSD;
+ cmd.arg = cid << 16;
+ cmd.flags = MMC_RSP_R2;
+ err = sdhi_boot_request(base, &cmd);
+ if (err)
+ return err;
+ }
+
+ /* CMD7 - Select the card */
+ {
+ struct mmc_command cmd;
+ cmd.opcode = MMC_SELECT_CARD;
+ //cmd.arg = rca << 16;
+ cmd.arg = cid << 16;
+ //cmd.flags = MMC_RSP_R1B;
+ cmd.flags = MMC_RSP_R1;
+ err = sdhi_boot_request(base, &cmd);
+ if (err)
+ return err;
+ }
+
+ /* CMD16 - Set the block size */
+ {
+ struct mmc_command cmd;
+ cmd.opcode = MMC_SET_BLOCKLEN;
+ cmd.arg = TMIO_BBS;
+ cmd.flags = MMC_RSP_R1;
+ err = sdhi_boot_request(base, &cmd);
+ if (err)
+ return err;
+ }
+
+ return high_capacity;
+}
diff --git a/arch/arm/boot/compressed/sdhi-shmobile.h b/arch/arm/boot/compressed/sdhi-shmobile.h
new file mode 100644
index 000000000000..92eaa09f985e
--- /dev/null
+++ b/arch/arm/boot/compressed/sdhi-shmobile.h
@@ -0,0 +1,11 @@
+#ifndef SDHI_MOBILE_H
+#define SDHI_MOBILE_H
+
+#include <linux/compiler.h>
+
+int sdhi_boot_do_read(void __iomem *base, int high_capacity,
+ unsigned long offset, unsigned short count,
+ unsigned short *buf);
+int sdhi_boot_init(void __iomem *base);
+
+#endif
diff --git a/arch/arm/boot/compressed/vmlinux.lds.in b/arch/arm/boot/compressed/vmlinux.lds.in
index ea80abe78844..4e728834a1b9 100644
--- a/arch/arm/boot/compressed/vmlinux.lds.in
+++ b/arch/arm/boot/compressed/vmlinux.lds.in
@@ -33,20 +33,24 @@ SECTIONS
*(.text.*)
*(.fixup)
*(.gnu.warning)
+ *(.glue_7t)
+ *(.glue_7)
+ }
+ .rodata : {
*(.rodata)
*(.rodata.*)
- *(.glue_7)
- *(.glue_7t)
+ }
+ .piggydata : {
*(.piggydata)
- . = ALIGN(4);
}
+ . = ALIGN(4);
_etext = .;
+ .got.plt : { *(.got.plt) }
_got_start = .;
.got : { *(.got) }
_got_end = .;
- .got.plt : { *(.got.plt) }
_edata = .;
. = BSS_START;
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index 841df7d21c2f..595ecd290ebf 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -79,6 +79,8 @@ struct dmabounce_device_info {
struct dmabounce_pool large;
rwlock_t lock;
+
+ int (*needs_bounce)(struct device *, dma_addr_t, size_t);
};
#ifdef STATS
@@ -210,114 +212,91 @@ static struct safe_buffer *find_safe_buffer_dev(struct device *dev,
if (!dev || !dev->archdata.dmabounce)
return NULL;
if (dma_mapping_error(dev, dma_addr)) {
- if (dev)
- dev_err(dev, "Trying to %s invalid mapping\n", where);
- else
- pr_err("unknown device: Trying to %s invalid mapping\n", where);
+ dev_err(dev, "Trying to %s invalid mapping\n", where);
return NULL;
}
return find_safe_buffer(dev->archdata.dmabounce, dma_addr);
}
-static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
- enum dma_data_direction dir)
+static int needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
{
- struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
- dma_addr_t dma_addr;
- int needs_bounce = 0;
-
- if (device_info)
- DO_STATS ( device_info->map_op_count++ );
-
- dma_addr = virt_to_dma(dev, ptr);
+ if (!dev || !dev->archdata.dmabounce)
+ return 0;
if (dev->dma_mask) {
- unsigned long mask = *dev->dma_mask;
- unsigned long limit;
+ unsigned long limit, mask = *dev->dma_mask;
limit = (mask + 1) & ~mask;
if (limit && size > limit) {
dev_err(dev, "DMA mapping too big (requested %#x "
"mask %#Lx)\n", size, *dev->dma_mask);
- return ~0;
+ return -E2BIG;
}
- /*
- * Figure out if we need to bounce from the DMA mask.
- */
- needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask;
+ /* Figure out if we need to bounce from the DMA mask. */
+ if ((dma_addr | (dma_addr + size - 1)) & ~mask)
+ return 1;
}
- if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) {
- struct safe_buffer *buf;
+ return !!dev->archdata.dmabounce->needs_bounce(dev, dma_addr, size);
+}
- buf = alloc_safe_buffer(device_info, ptr, size, dir);
- if (buf == 0) {
- dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
- __func__, ptr);
- return ~0;
- }
+static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
+ enum dma_data_direction dir)
+{
+ struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
+ struct safe_buffer *buf;
- dev_dbg(dev,
- "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
- __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
- buf->safe, buf->safe_dma_addr);
+ if (device_info)
+ DO_STATS ( device_info->map_op_count++ );
- if ((dir == DMA_TO_DEVICE) ||
- (dir == DMA_BIDIRECTIONAL)) {
- dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
- __func__, ptr, buf->safe, size);
- memcpy(buf->safe, ptr, size);
- }
- ptr = buf->safe;
+ buf = alloc_safe_buffer(device_info, ptr, size, dir);
+ if (buf == NULL) {
+ dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
+ __func__, ptr);
+ return ~0;
+ }
- dma_addr = buf->safe_dma_addr;
- } else {
- /*
- * We don't need to sync the DMA buffer since
- * it was allocated via the coherent allocators.
- */
- __dma_single_cpu_to_dev(ptr, size, dir);
+ dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
+ __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
+ buf->safe, buf->safe_dma_addr);
+
+ if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
+ dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
+ __func__, ptr, buf->safe, size);
+ memcpy(buf->safe, ptr, size);
}
- return dma_addr;
+ return buf->safe_dma_addr;
}
-static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
+static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
size_t size, enum dma_data_direction dir)
{
- struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap");
-
- if (buf) {
- BUG_ON(buf->size != size);
- BUG_ON(buf->direction != dir);
+ BUG_ON(buf->size != size);
+ BUG_ON(buf->direction != dir);
- dev_dbg(dev,
- "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
- __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
- buf->safe, buf->safe_dma_addr);
+ dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
+ __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
+ buf->safe, buf->safe_dma_addr);
- DO_STATS(dev->archdata.dmabounce->bounce_count++);
+ DO_STATS(dev->archdata.dmabounce->bounce_count++);
- if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
- void *ptr = buf->ptr;
+ if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
+ void *ptr = buf->ptr;
- dev_dbg(dev,
- "%s: copy back safe %p to unsafe %p size %d\n",
- __func__, buf->safe, ptr, size);
- memcpy(ptr, buf->safe, size);
+ dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
+ __func__, buf->safe, ptr, size);
+ memcpy(ptr, buf->safe, size);
- /*
- * Since we may have written to a page cache page,
- * we need to ensure that the data will be coherent
- * with user mappings.
- */
- __cpuc_flush_dcache_area(ptr, size);
- }
- free_safe_buffer(dev->archdata.dmabounce, buf);
- } else {
- __dma_single_dev_to_cpu(dma_to_virt(dev, dma_addr), size, dir);
+ /*
+ * Since we may have written to a page cache page,
+ * we need to ensure that the data will be coherent
+ * with user mappings.
+ */
+ __cpuc_flush_dcache_area(ptr, size);
}
+ free_safe_buffer(dev->archdata.dmabounce, buf);
}
/* ************************************************** */
@@ -328,45 +307,28 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
* substitute the safe buffer for the unsafe one.
* (basically move the buffer from an unsafe area to a safe one)
*/
-dma_addr_t __dma_map_single(struct device *dev, void *ptr, size_t size,
- enum dma_data_direction dir)
-{
- dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
- __func__, ptr, size, dir);
-
- BUG_ON(!valid_dma_direction(dir));
-
- return map_single(dev, ptr, size, dir);
-}
-EXPORT_SYMBOL(__dma_map_single);
-
-/*
- * see if a mapped address was really a "safe" buffer and if so, copy
- * the data from the safe buffer back to the unsafe buffer and free up
- * the safe buffer. (basically return things back to the way they
- * should be)
- */
-void __dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction dir)
-{
- dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
- __func__, (void *) dma_addr, size, dir);
-
- unmap_single(dev, dma_addr, size, dir);
-}
-EXPORT_SYMBOL(__dma_unmap_single);
-
dma_addr_t __dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir)
{
+ dma_addr_t dma_addr;
+ int ret;
+
dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
__func__, page, offset, size, dir);
- BUG_ON(!valid_dma_direction(dir));
+ dma_addr = pfn_to_dma(dev, page_to_pfn(page)) + offset;
+
+ ret = needs_bounce(dev, dma_addr, size);
+ if (ret < 0)
+ return ~0;
+
+ if (ret == 0) {
+ __dma_page_cpu_to_dev(page, offset, size, dir);
+ return dma_addr;
+ }
if (PageHighMem(page)) {
- dev_err(dev, "DMA buffer bouncing of HIGHMEM pages "
- "is not supported\n");
+ dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n");
return ~0;
}
@@ -383,10 +345,19 @@ EXPORT_SYMBOL(__dma_map_page);
void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir)
{
- dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
- __func__, (void *) dma_addr, size, dir);
+ struct safe_buffer *buf;
+
+ dev_dbg(dev, "%s(dma=%#x,size=%d,dir=%x)\n",
+ __func__, dma_addr, size, dir);
+
+ buf = find_safe_buffer_dev(dev, dma_addr, __func__);
+ if (!buf) {
+ __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, dma_addr)),
+ dma_addr & ~PAGE_MASK, size, dir);
+ return;
+ }
- unmap_single(dev, dma_addr, size, dir);
+ unmap_single(dev, buf, size, dir);
}
EXPORT_SYMBOL(__dma_unmap_page);
@@ -461,7 +432,8 @@ static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
}
int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
- unsigned long large_buffer_size)
+ unsigned long large_buffer_size,
+ int (*needs_bounce_fn)(struct device *, dma_addr_t, size_t))
{
struct dmabounce_device_info *device_info;
int ret;
@@ -497,6 +469,7 @@ int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
device_info->dev = dev;
INIT_LIST_HEAD(&device_info->safe_buffers);
rwlock_init(&device_info->lock);
+ device_info->needs_bounce = needs_bounce_fn;
#ifdef STATS
device_info->total_allocs = 0;
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index 4ddd0a6ac7ff..7bdd91766d65 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -179,22 +179,21 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
{
void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
unsigned int shift = (d->irq % 4) * 8;
- unsigned int cpu = cpumask_first(mask_val);
+ unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
u32 val, mask, bit;
- if (cpu >= 8)
+ if (cpu >= 8 || cpu >= nr_cpu_ids)
return -EINVAL;
mask = 0xff << shift;
bit = 1 << (cpu + shift);
spin_lock(&irq_controller_lock);
- d->node = cpu;
val = readl_relaxed(reg) & ~mask;
writel_relaxed(val | bit, reg);
spin_unlock(&irq_controller_lock);
- return 0;
+ return IRQ_SET_MASK_OK;
}
#endif
diff --git a/arch/arm/common/it8152.c b/arch/arm/common/it8152.c
index 7a21927c52e1..14ad62e16dd1 100644
--- a/arch/arm/common/it8152.c
+++ b/arch/arm/common/it8152.c
@@ -243,6 +243,12 @@ static struct resource it8152_mem = {
* ITE8152 chip can address up to 64MByte, so all the devices
* connected to ITE8152 (PCI and USB) should have limited DMA window
*/
+static int it8152_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
+{
+ dev_dbg(dev, "%s: dma_addr %08x, size %08x\n",
+ __func__, dma_addr, size);
+ return (dma_addr + size - PHYS_OFFSET) >= SZ_64M;
+}
/*
* Setup DMA mask to 64MB on devices connected to ITE8152. Ignore all
@@ -254,7 +260,7 @@ static int it8152_pci_platform_notify(struct device *dev)
if (dev->dma_mask)
*dev->dma_mask = (SZ_64M - 1) | PHYS_OFFSET;
dev->coherent_dma_mask = (SZ_64M - 1) | PHYS_OFFSET;
- dmabounce_register_dev(dev, 2048, 4096);
+ dmabounce_register_dev(dev, 2048, 4096, it8152_needs_bounce);
}
return 0;
}
@@ -267,14 +273,6 @@ static int it8152_pci_platform_notify_remove(struct device *dev)
return 0;
}
-int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
-{
- dev_dbg(dev, "%s: dma_addr %08x, size %08x\n",
- __func__, dma_addr, size);
- return (dev->bus == &pci_bus_type) &&
- ((dma_addr + size - PHYS_OFFSET) >= SZ_64M);
-}
-
int dma_set_coherent_mask(struct device *dev, u64 mask)
{
if (mask >= PHYS_OFFSET + SZ_64M - 1)
diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c
index 9c49a46a2b7a..0569de6acfba 100644
--- a/arch/arm/common/sa1111.c
+++ b/arch/arm/common/sa1111.c
@@ -579,7 +579,36 @@ sa1111_configure_smc(struct sa1111 *sachip, int sdram, unsigned int drac,
sachip->dev->coherent_dma_mask &= sa1111_dma_mask[drac >> 2];
}
+#endif
+#ifdef CONFIG_DMABOUNCE
+/*
+ * According to the "Intel StrongARM SA-1111 Microprocessor Companion
+ * Chip Specification Update" (June 2000), erratum #7, there is a
+ * significant bug in the SA1111 SDRAM shared memory controller. If
+ * an access to a region of memory above 1MB relative to the bank base,
+ * it is important that address bit 10 _NOT_ be asserted. Depending
+ * on the configuration of the RAM, bit 10 may correspond to one
+ * of several different (processor-relative) address bits.
+ *
+ * This routine only identifies whether or not a given DMA address
+ * is susceptible to the bug.
+ *
+ * This should only get called for sa1111_device types due to the
+ * way we configure our device dma_masks.
+ */
+static int sa1111_needs_bounce(struct device *dev, dma_addr_t addr, size_t size)
+{
+ /*
+ * Section 4.6 of the "Intel StrongARM SA-1111 Development Module
+ * User's Guide" mentions that jumpers R51 and R52 control the
+ * target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or
+ * SDRAM bank 1 on Neponset). The default configuration selects
+ * Assabet, so any address in bank 1 is necessarily invalid.
+ */
+ return (machine_is_assabet() || machine_is_pfs168()) &&
+ (addr >= 0xc8000000 || (addr + size) >= 0xc8000000);
+}
#endif
static void sa1111_dev_release(struct device *_dev)
@@ -644,7 +673,8 @@ sa1111_init_one_child(struct sa1111 *sachip, struct resource *parent,
dev->dev.dma_mask = &dev->dma_mask;
if (dev->dma_mask != 0xffffffffUL) {
- ret = dmabounce_register_dev(&dev->dev, 1024, 4096);
+ ret = dmabounce_register_dev(&dev->dev, 1024, 4096,
+ sa1111_needs_bounce);
if (ret) {
dev_err(&dev->dev, "SA1111: Failed to register"
" with dmabounce\n");
@@ -818,34 +848,6 @@ static void __sa1111_remove(struct sa1111 *sachip)
kfree(sachip);
}
-/*
- * According to the "Intel StrongARM SA-1111 Microprocessor Companion
- * Chip Specification Update" (June 2000), erratum #7, there is a
- * significant bug in the SA1111 SDRAM shared memory controller. If
- * an access to a region of memory above 1MB relative to the bank base,
- * it is important that address bit 10 _NOT_ be asserted. Depending
- * on the configuration of the RAM, bit 10 may correspond to one
- * of several different (processor-relative) address bits.
- *
- * This routine only identifies whether or not a given DMA address
- * is susceptible to the bug.
- *
- * This should only get called for sa1111_device types due to the
- * way we configure our device dma_masks.
- */
-int dma_needs_bounce(struct device *dev, dma_addr_t addr, size_t size)
-{
- /*
- * Section 4.6 of the "Intel StrongARM SA-1111 Development Module
- * User's Guide" mentions that jumpers R51 and R52 control the
- * target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or
- * SDRAM bank 1 on Neponset). The default configuration selects
- * Assabet, so any address in bank 1 is necessarily invalid.
- */
- return ((machine_is_assabet() || machine_is_pfs168()) &&
- (addr >= 0xc8000000 || (addr + size) >= 0xc8000000));
-}
-
struct sa1111_save_data {
unsigned int skcr;
unsigned int skpcr;
diff --git a/arch/arm/configs/cm_x300_defconfig b/arch/arm/configs/cm_x300_defconfig
index 921e56a7572c..f4b767256f95 100644
--- a/arch/arm/configs/cm_x300_defconfig
+++ b/arch/arm/configs/cm_x300_defconfig
@@ -5,7 +5,6 @@ CONFIG_SYSVIPC=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=18
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_MODULES=y
@@ -13,6 +12,7 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_ARCH_PXA=y
+CONFIG_GPIO_PCA953X=y
CONFIG_MACH_CM_X300=y
CONFIG_NO_HZ=y
CONFIG_AEABI=y
@@ -23,7 +23,6 @@ CONFIG_CMDLINE="root=/dev/mtdblock5 rootfstype=ubifs console=ttyS2,38400"
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_FPE_NWFPE=y
-CONFIG_PM=y
CONFIG_APM_EMULATION=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -40,8 +39,8 @@ CONFIG_IP_PNP_RARP=y
# CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set
CONFIG_BT=m
-CONFIG_BT_L2CAP=m
-CONFIG_BT_SCO=m
+CONFIG_BT_L2CAP=y
+CONFIG_BT_SCO=y
CONFIG_BT_RFCOMM=m
CONFIG_BT_RFCOMM_TTY=y
CONFIG_BT_BNEP=m
@@ -60,7 +59,6 @@ CONFIG_MTD_NAND_PXA3xx=y
CONFIG_MTD_UBI=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
-# CONFIG_MISC_DEVICES is not set
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_NETDEVICES=y
@@ -81,16 +79,15 @@ CONFIG_TOUCHSCREEN_WM97XX=m
# CONFIG_TOUCHSCREEN_WM9705 is not set
# CONFIG_TOUCHSCREEN_WM9713 is not set
# CONFIG_SERIO is not set
+# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_PXA=y
CONFIG_SERIAL_PXA_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
# CONFIG_HW_RANDOM is not set
CONFIG_I2C=y
CONFIG_I2C_PXA=y
CONFIG_SPI=y
CONFIG_SPI_GPIO=y
CONFIG_GPIO_SYSFS=y
-CONFIG_GPIO_PCA953X=y
# CONFIG_HWMON is not set
CONFIG_PMIC_DA903X=y
CONFIG_REGULATOR=y
@@ -102,7 +99,6 @@ CONFIG_LCD_CLASS_DEVICE=y
CONFIG_LCD_TDO24M=y
# CONFIG_BACKLIGHT_GENERIC is not set
CONFIG_BACKLIGHT_DA903X=m
-# CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
CONFIG_FONTS=y
@@ -131,7 +127,6 @@ CONFIG_HID_GREENASIA=y
CONFIG_HID_SMARTJOYPLUS=y
CONFIG_HID_TOPSEED=y
CONFIG_HID_THRUSTMASTER=y
-CONFIG_HID_WACOM=m
CONFIG_HID_ZEROPLUS=y
CONFIG_USB=y
CONFIG_USB_DEVICEFS=y
@@ -152,7 +147,6 @@ CONFIG_RTC_DRV_PXA=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_FS_XATTR is not set
-CONFIG_INOTIFY=y
CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=m
CONFIG_TMPFS=y
@@ -164,7 +158,6 @@ CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
-CONFIG_SMB_FS=m
CONFIG_CIFS=m
CONFIG_CIFS_WEAK_PW_HASH=y
CONFIG_PARTITION_ADVANCED=y
@@ -172,9 +165,7 @@ CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_ISO8859_1=m
CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y
-# CONFIG_DETECT_SOFTLOCKUP is not set
# CONFIG_SCHED_DEBUG is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_FTRACE is not set
CONFIG_DEBUG_USER=y
@@ -182,7 +173,6 @@ CONFIG_DEBUG_LL=y
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_AES=m
-CONFIG_CRYPTO_ARC4=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC_T10DIF=y
diff --git a/arch/arm/configs/loki_defconfig b/arch/arm/configs/loki_defconfig
deleted file mode 100644
index 1ba752b2dc6d..000000000000
--- a/arch/arm/configs/loki_defconfig
+++ /dev/null
@@ -1,120 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EXPERT=y
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_ARCH_LOKI=y
-CONFIG_MACH_LB88RC8480=y
-# CONFIG_CPU_FEROCEON_OLD_ID is not set
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_PREEMPT=y
-CONFIG_AEABI=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-# CONFIG_IPV6 is not set
-CONFIG_NET_PKTGEN=m
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLOCK=y
-CONFIG_FTL=y
-CONFIG_NFTL=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_JEDECPROBE=y
-CONFIG_MTD_CFI_ADV_OPTIONS=y
-CONFIG_MTD_CFI_GEOMETRY=y
-CONFIG_MTD_CFI_I4=y
-CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_CFI_STAA=y
-CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_M25P80=y
-CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_VERIFY_WRITE=y
-CONFIG_MTD_NAND_ORION=y
-CONFIG_BLK_DEV_LOOP=y
-# CONFIG_MISC_DEVICES is not set
-# CONFIG_SCSI_PROC_FS is not set
-CONFIG_BLK_DEV_SD=y
-CONFIG_BLK_DEV_SR=m
-CONFIG_CHR_DEV_SG=m
-CONFIG_ATA=y
-CONFIG_SATA_MV=y
-CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
-CONFIG_MV643XX_ETH=y
-# CONFIG_NETDEV_10000 is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_RUNTIME_UARTS=2
-CONFIG_LEGACY_PTY_COUNT=16
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_MV64XXX=y
-CONFIG_SPI=y
-# CONFIG_HWMON is not set
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
-CONFIG_USB_PRINTER=y
-CONFIG_USB_STORAGE=y
-CONFIG_USB_STORAGE_DATAFAB=y
-CONFIG_USB_STORAGE_FREECOM=y
-CONFIG_USB_STORAGE_SDDR09=y
-CONFIG_USB_STORAGE_SDDR55=y
-CONFIG_USB_STORAGE_JUMPSHOT=y
-CONFIG_NEW_LEDS=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_FS_XATTR is not set
-CONFIG_XFS_FS=y
-CONFIG_INOTIFY=y
-CONFIG_ISO9660_FS=y
-CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=y
-CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
-CONFIG_JFFS2_FS=y
-CONFIG_CRAMFS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_BSD_DISKLABEL=y
-CONFIG_MINIX_SUBPARTITION=y
-CONFIG_SOLARIS_X86_PARTITION=y
-CONFIG_UNIXWARE_DISKLABEL=y
-CONFIG_LDM_PARTITION=y
-CONFIG_LDM_DEBUG=y
-CONFIG_SUN_PARTITION=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_CODEPAGE_850=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_NLS_ISO8859_2=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_DEBUG_USER=y
-CONFIG_CRYPTO_CBC=m
-CONFIG_CRYPTO_ECB=m
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRC_CCITT=y
-CONFIG_CRC16=y
-CONFIG_LIBCRC32C=y
diff --git a/arch/arm/configs/mx51_defconfig b/arch/arm/configs/mx51_defconfig
index 0ace16cba9b5..88c5802a2351 100644
--- a/arch/arm/configs/mx51_defconfig
+++ b/arch/arm/configs/mx51_defconfig
@@ -106,6 +106,7 @@ CONFIG_GPIO_SYSFS=y
CONFIG_USB=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_MXC=y
+CONFIG_USB_STORAGE=y
CONFIG_MMC=y
CONFIG_MMC_BLOCK=m
CONFIG_MMC_SDHCI=m
@@ -145,7 +146,7 @@ CONFIG_ROOT_NFS=y
CONFIG_NLS_DEFAULT="cp437"
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
-CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_UTF8=y
CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig
index 2bf224310fb4..5a6ff7c605df 100644
--- a/arch/arm/configs/mxs_defconfig
+++ b/arch/arm/configs/mxs_defconfig
@@ -89,7 +89,7 @@ CONFIG_DISPLAY_SUPPORT=m
# CONFIG_USB_SUPPORT is not set
CONFIG_MMC=y
CONFIG_MMC_MXS=y
-CONFIG_RTC_CLASS=m
+CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1307=m
CONFIG_DMADEVICES=y
CONFIG_MXS_DMA=y
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 65c3f2474f5e..29035e86a59d 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -293,4 +293,13 @@
.macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort
.endm
+
+/* Utility macro for declaring string literals */
+ .macro string name:req, string
+ .type \name , #object
+\name:
+ .asciz "\string"
+ .size \name , . - \name
+ .endm
+
#endif /* __ASM_ASSEMBLER_H__ */
diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h
index b4892a06442c..f4280593dfa3 100644
--- a/arch/arm/include/asm/bitops.h
+++ b/arch/arm/include/asm/bitops.h
@@ -26,8 +26,8 @@
#include <linux/compiler.h>
#include <asm/system.h>
-#define smp_mb__before_clear_bit() mb()
-#define smp_mb__after_clear_bit() mb()
+#define smp_mb__before_clear_bit() smp_mb()
+#define smp_mb__after_clear_bit() smp_mb()
/*
* These functions are the basis of our bit ops.
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 4fff837363ed..7a21d0bf7134 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -115,39 +115,8 @@ static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
___dma_page_dev_to_cpu(page, off, size, dir);
}
-/*
- * Return whether the given device DMA address mask can be supported
- * properly. For example, if your device can only drive the low 24-bits
- * during bus mastering, then you would pass 0x00ffffff as the mask
- * to this function.
- *
- * FIXME: This should really be a platform specific issue - we should
- * return false if GFP_DMA allocations may not satisfy the supplied 'mask'.
- */
-static inline int dma_supported(struct device *dev, u64 mask)
-{
- if (mask < ISA_DMA_THRESHOLD)
- return 0;
- return 1;
-}
-
-static inline int dma_set_mask(struct device *dev, u64 dma_mask)
-{
-#ifdef CONFIG_DMABOUNCE
- if (dev->archdata.dmabounce) {
- if (dma_mask >= ISA_DMA_THRESHOLD)
- return 0;
- else
- return -EIO;
- }
-#endif
- if (!dev->dma_mask || !dma_supported(dev, dma_mask))
- return -EIO;
-
- *dev->dma_mask = dma_mask;
-
- return 0;
-}
+extern int dma_supported(struct device *, u64);
+extern int dma_set_mask(struct device *, u64);
/*
* DMA errors are defined by all-bits-set in the DMA address.
@@ -256,14 +225,14 @@ int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
* @dev: valid struct device pointer
* @small_buf_size: size of buffers to use with small buffer pool
* @large_buf_size: size of buffers to use with large buffer pool (can be 0)
+ * @needs_bounce_fn: called to determine whether buffer needs bouncing
*
* This function should be called by low-level platform code to register
* a device as requireing DMA buffer bouncing. The function will allocate
* appropriate DMA pools for the device.
- *
*/
extern int dmabounce_register_dev(struct device *, unsigned long,
- unsigned long);
+ unsigned long, int (*)(struct device *, dma_addr_t, size_t));
/**
* dmabounce_unregister_dev
@@ -277,31 +246,9 @@ extern int dmabounce_register_dev(struct device *, unsigned long,
*/
extern void dmabounce_unregister_dev(struct device *);
-/**
- * dma_needs_bounce
- *
- * @dev: valid struct device pointer
- * @dma_handle: dma_handle of unbounced buffer
- * @size: size of region being mapped
- *
- * Platforms that utilize the dmabounce mechanism must implement
- * this function.
- *
- * The dmabounce routines call this function whenever a dma-mapping
- * is requested to determine whether a given buffer needs to be bounced
- * or not. The function must return 0 if the buffer is OK for
- * DMA access and 1 if the buffer needs to be bounced.
- *
- */
-extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
-
/*
* The DMA API, implemented by dmabounce.c. See below for descriptions.
*/
-extern dma_addr_t __dma_map_single(struct device *, void *, size_t,
- enum dma_data_direction);
-extern void __dma_unmap_single(struct device *, dma_addr_t, size_t,
- enum dma_data_direction);
extern dma_addr_t __dma_map_page(struct device *, struct page *,
unsigned long, size_t, enum dma_data_direction);
extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
@@ -328,13 +275,6 @@ static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
}
-static inline dma_addr_t __dma_map_single(struct device *dev, void *cpu_addr,
- size_t size, enum dma_data_direction dir)
-{
- __dma_single_cpu_to_dev(cpu_addr, size, dir);
- return virt_to_dma(dev, cpu_addr);
-}
-
static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir)
{
@@ -342,12 +282,6 @@ static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
return pfn_to_dma(dev, page_to_pfn(page)) + offset;
}
-static inline void __dma_unmap_single(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir)
-{
- __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir);
-}
-
static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir)
{
@@ -373,14 +307,18 @@ static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
size_t size, enum dma_data_direction dir)
{
+ unsigned long offset;
+ struct page *page;
dma_addr_t addr;
+ BUG_ON(!virt_addr_valid(cpu_addr));
+ BUG_ON(!virt_addr_valid(cpu_addr + size - 1));
BUG_ON(!valid_dma_direction(dir));
- addr = __dma_map_single(dev, cpu_addr, size, dir);
- debug_dma_map_page(dev, virt_to_page(cpu_addr),
- (unsigned long)cpu_addr & ~PAGE_MASK, size,
- dir, addr, true);
+ page = virt_to_page(cpu_addr);
+ offset = (unsigned long)cpu_addr & ~PAGE_MASK;
+ addr = __dma_map_page(dev, page, offset, size, dir);
+ debug_dma_map_page(dev, page, offset, size, dir, addr, true);
return addr;
}
@@ -430,7 +368,7 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir)
{
debug_dma_unmap_page(dev, handle, size, dir, true);
- __dma_unmap_single(dev, handle, size, dir);
+ __dma_unmap_page(dev, handle, size, dir);
}
/**
diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h
index 42005542932b..628670e9d7c9 100644
--- a/arch/arm/include/asm/dma.h
+++ b/arch/arm/include/asm/dma.h
@@ -1,15 +1,16 @@
#ifndef __ASM_ARM_DMA_H
#define __ASM_ARM_DMA_H
-#include <asm/memory.h>
-
/*
* This is the maximum virtual address which can be DMA'd from.
*/
-#ifndef ARM_DMA_ZONE_SIZE
-#define MAX_DMA_ADDRESS 0xffffffff
+#ifndef CONFIG_ZONE_DMA
+#define MAX_DMA_ADDRESS 0xffffffffUL
#else
-#define MAX_DMA_ADDRESS (PAGE_OFFSET + ARM_DMA_ZONE_SIZE)
+#define MAX_DMA_ADDRESS ({ \
+ extern unsigned long arm_dma_zone_size; \
+ arm_dma_zone_size ? \
+ (PAGE_OFFSET + arm_dma_zone_size) : 0xffffffffUL; })
#endif
#ifdef CONFIG_ISA_DMA_API
diff --git a/arch/arm/include/asm/entry-macro-multi.S b/arch/arm/include/asm/entry-macro-multi.S
index 2da8547de6d6..2f1e2098dfe7 100644
--- a/arch/arm/include/asm/entry-macro-multi.S
+++ b/arch/arm/include/asm/entry-macro-multi.S
@@ -4,8 +4,8 @@
* Interrupt handling. Preserves r7, r8, r9
*/
.macro arch_irq_handler_default
- get_irqnr_preamble r5, lr
-1: get_irqnr_and_base r0, r6, r5, lr
+ get_irqnr_preamble r6, lr
+1: get_irqnr_and_base r0, r2, r6, lr
movne r1, sp
@
@ routine called with r0 = irq number, r1 = struct pt_regs *
@@ -17,17 +17,17 @@
/*
* XXX
*
- * this macro assumes that irqstat (r6) and base (r5) are
+ * this macro assumes that irqstat (r2) and base (r6) are
* preserved from get_irqnr_and_base above
*/
- ALT_SMP(test_for_ipi r0, r6, r5, lr)
+ ALT_SMP(test_for_ipi r0, r2, r6, lr)
ALT_UP_B(9997f)
movne r1, sp
adrne lr, BSYM(1b)
bne do_IPI
#ifdef CONFIG_LOCAL_TIMERS
- test_for_ltirq r0, r6, r5, lr
+ test_for_ltirq r0, r2, r6, lr
movne r0, sp
adrne lr, BSYM(1b)
bne do_local_timer
@@ -40,7 +40,7 @@
.align 5
.global \symbol_name
\symbol_name:
- mov r4, lr
+ mov r8, lr
arch_irq_handler_default
- mov pc, r4
+ mov pc, r8
.endm
diff --git a/arch/arm/include/asm/hardware/scoop.h b/arch/arm/include/asm/hardware/scoop.h
index ebb3ceaa8fac..58cdf5d84122 100644
--- a/arch/arm/include/asm/hardware/scoop.h
+++ b/arch/arm/include/asm/hardware/scoop.h
@@ -61,7 +61,6 @@ struct scoop_pcmcia_dev {
struct scoop_pcmcia_config {
struct scoop_pcmcia_dev *devs;
int num_devs;
- void (*pcmcia_init)(void);
void (*power_ctrl)(struct device *scoop, unsigned short cpr, int nr);
};
diff --git a/arch/arm/include/asm/hwcap.h b/arch/arm/include/asm/hwcap.h
index c1062c317103..c93a22a8b924 100644
--- a/arch/arm/include/asm/hwcap.h
+++ b/arch/arm/include/asm/hwcap.h
@@ -4,22 +4,26 @@
/*
* HWCAP flags - for elf_hwcap (in kernel) and AT_HWCAP
*/
-#define HWCAP_SWP 1
-#define HWCAP_HALF 2
-#define HWCAP_THUMB 4
-#define HWCAP_26BIT 8 /* Play it safe */
-#define HWCAP_FAST_MULT 16
-#define HWCAP_FPA 32
-#define HWCAP_VFP 64
-#define HWCAP_EDSP 128
-#define HWCAP_JAVA 256
-#define HWCAP_IWMMXT 512
-#define HWCAP_CRUNCH 1024
-#define HWCAP_THUMBEE 2048
-#define HWCAP_NEON 4096
-#define HWCAP_VFPv3 8192
-#define HWCAP_VFPv3D16 16384
-#define HWCAP_TLS 32768
+#define HWCAP_SWP (1 << 0)
+#define HWCAP_HALF (1 << 1)
+#define HWCAP_THUMB (1 << 2)
+#define HWCAP_26BIT (1 << 3) /* Play it safe */
+#define HWCAP_FAST_MULT (1 << 4)
+#define HWCAP_FPA (1 << 5)
+#define HWCAP_VFP (1 << 6)
+#define HWCAP_EDSP (1 << 7)
+#define HWCAP_JAVA (1 << 8)
+#define HWCAP_IWMMXT (1 << 9)
+#define HWCAP_CRUNCH (1 << 10)
+#define HWCAP_THUMBEE (1 << 11)
+#define HWCAP_NEON (1 << 12)
+#define HWCAP_VFPv3 (1 << 13)
+#define HWCAP_VFPv3D16 (1 << 14)
+#define HWCAP_TLS (1 << 15)
+#define HWCAP_VFPv4 (1 << 16)
+#define HWCAP_IDIVA (1 << 17)
+#define HWCAP_IDIVT (1 << 18)
+#define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT)
#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
/*
diff --git a/arch/arm/include/asm/kprobes.h b/arch/arm/include/asm/kprobes.h
index e46bdd0097eb..feec86768f9c 100644
--- a/arch/arm/include/asm/kprobes.h
+++ b/arch/arm/include/asm/kprobes.h
@@ -24,12 +24,6 @@
#define MAX_INSN_SIZE 2
#define MAX_STACK_SIZE 64 /* 32 would probably be OK */
-/*
- * This undefined instruction must be unique and
- * reserved solely for kprobes' use.
- */
-#define KPROBE_BREAKPOINT_INSTRUCTION 0xe7f001f8
-
#define regs_return_value(regs) ((regs)->ARM_r0)
#define flush_insn_slot(p) do { } while (0)
#define kretprobe_blacklist_size 0
@@ -38,14 +32,17 @@ typedef u32 kprobe_opcode_t;
struct kprobe;
typedef void (kprobe_insn_handler_t)(struct kprobe *, struct pt_regs *);
-
typedef unsigned long (kprobe_check_cc)(unsigned long);
+typedef void (kprobe_insn_singlestep_t)(struct kprobe *, struct pt_regs *);
+typedef void (kprobe_insn_fn_t)(void);
/* Architecture specific copy of original instruction. */
struct arch_specific_insn {
- kprobe_opcode_t *insn;
- kprobe_insn_handler_t *insn_handler;
- kprobe_check_cc *insn_check_cc;
+ kprobe_opcode_t *insn;
+ kprobe_insn_handler_t *insn_handler;
+ kprobe_check_cc *insn_check_cc;
+ kprobe_insn_singlestep_t *insn_singlestep;
+ kprobe_insn_fn_t *insn_fn;
};
struct prev_kprobe {
@@ -62,20 +59,9 @@ struct kprobe_ctlblk {
};
void arch_remove_kprobe(struct kprobe *);
-void kretprobe_trampoline(void);
-
int kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr);
int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data);
-enum kprobe_insn {
- INSN_REJECTED,
- INSN_GOOD,
- INSN_GOOD_NO_SLOT
-};
-
-enum kprobe_insn arm_kprobe_decode_insn(kprobe_opcode_t,
- struct arch_specific_insn *);
-void __init arm_kprobe_decode_init(void);
#endif /* _ARM_KPROBES_H */
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 946f4d778f71..3281fb4b12e3 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -23,6 +23,10 @@ struct machine_desc {
unsigned int nr_irqs; /* number of IRQs */
+#ifdef CONFIG_ZONE_DMA
+ unsigned long dma_zone_size; /* size of DMA-able area */
+#endif
+
unsigned int video_start; /* start of video RAM */
unsigned int video_end; /* end of video RAM */
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index af44a8fb3480..b8de516e600e 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -204,18 +204,6 @@ static inline unsigned long __phys_to_virt(unsigned long x)
#endif
/*
- * The DMA mask corresponding to the maximum bus address allocatable
- * using GFP_DMA. The default here places no restriction on DMA
- * allocations. This must be the smallest DMA mask in the system,
- * so a successful GFP_DMA allocation will always satisfy this.
- */
-#ifndef ARM_DMA_ZONE_SIZE
-#define ISA_DMA_THRESHOLD (0xffffffffULL)
-#else
-#define ISA_DMA_THRESHOLD (PHYS_OFFSET + ARM_DMA_ZONE_SIZE - 1)
-#endif
-
-/*
* PFNs are used to describe any physical page; this means
* PFN 0 == physical address 0.
*
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
index c4aa4e8c6af9..0f8e3827a89b 100644
--- a/arch/arm/include/asm/perf_event.h
+++ b/arch/arm/include/asm/perf_event.h
@@ -24,6 +24,8 @@ enum arm_perf_pmu_ids {
ARM_PERF_PMU_ID_V6MP,
ARM_PERF_PMU_ID_CA8,
ARM_PERF_PMU_ID_CA9,
+ ARM_PERF_PMU_ID_CA5,
+ ARM_PERF_PMU_ID_CA15,
ARM_NUM_PMU_IDS,
};
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index 7544ce6b481a..67c70a31a1be 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -52,7 +52,7 @@ reserve_pmu(enum arm_pmu_type device);
* a cookie.
*/
extern int
-release_pmu(struct platform_device *pdev);
+release_pmu(enum arm_pmu_type type);
/**
* init_pmu() - Initialise the PMU.
diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
index 8ec535e11fd7..633d1cb84d87 100644
--- a/arch/arm/include/asm/proc-fns.h
+++ b/arch/arm/include/asm/proc-fns.h
@@ -82,13 +82,13 @@ extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
#else
-#define cpu_proc_init() processor._proc_init()
-#define cpu_proc_fin() processor._proc_fin()
-#define cpu_reset(addr) processor.reset(addr)
-#define cpu_do_idle() processor._do_idle()
-#define cpu_dcache_clean_area(addr,sz) processor.dcache_clean_area(addr,sz)
-#define cpu_set_pte_ext(ptep,pte,ext) processor.set_pte_ext(ptep,pte,ext)
-#define cpu_do_switch_mm(pgd,mm) processor.switch_mm(pgd,mm)
+#define cpu_proc_init processor._proc_init
+#define cpu_proc_fin processor._proc_fin
+#define cpu_reset processor.reset
+#define cpu_do_idle processor._do_idle
+#define cpu_dcache_clean_area processor.dcache_clean_area
+#define cpu_set_pte_ext processor.set_pte_ext
+#define cpu_do_switch_mm processor.switch_mm
#endif
extern void cpu_resume(void);
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index 312d10877bd7..96187ff58c24 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -69,8 +69,9 @@
#define PSR_c 0x000000ff /* Control */
/*
- * ARMv7 groups of APSR bits
+ * ARMv7 groups of PSR bits
*/
+#define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
#define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
#define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
@@ -200,6 +201,14 @@ extern unsigned long profile_pc(struct pt_regs *regs);
#define PREDICATE_ALWAYS 0xe0000000
/*
+ * True if instr is a 32-bit thumb instruction. This works if instr
+ * is the first or only half-word of a thumb instruction. It also works
+ * when instr holds all 32-bits of a wide thumb instruction if stored
+ * in the form (first_half<<16)|(second_half)
+ */
+#define is_wide_instruction(instr) ((unsigned)(instr) >= 0xe800)
+
+/*
* kprobe-based event tracer support
*/
#include <linux/stddef.h>
diff --git a/arch/arm/include/asm/scatterlist.h b/arch/arm/include/asm/scatterlist.h
index 2f87870d9347..cefdb8f898a1 100644
--- a/arch/arm/include/asm/scatterlist.h
+++ b/arch/arm/include/asm/scatterlist.h
@@ -1,6 +1,10 @@
#ifndef _ASMARM_SCATTERLIST_H
#define _ASMARM_SCATTERLIST_H
+#ifdef CONFIG_ARM_HAS_SG_CHAIN
+#define ARCH_HAS_SG_CHAIN
+#endif
+
#include <asm/memory.h>
#include <asm/types.h>
#include <asm-generic/scatterlist.h>
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h
index ee2ad8ae07af..915696dd9c7c 100644
--- a/arch/arm/include/asm/setup.h
+++ b/arch/arm/include/asm/setup.h
@@ -187,12 +187,16 @@ struct tagtable {
#define __tag __used __attribute__((__section__(".taglist.init")))
#define __tagtable(tag, fn) \
-static struct tagtable __tagtable_##fn __tag = { tag, fn }
+static const struct tagtable __tagtable_##fn __tag = { tag, fn }
/*
* Memory map description
*/
-#define NR_BANKS 8
+#ifdef CONFIG_ARCH_EP93XX
+# define NR_BANKS 16
+#else
+# define NR_BANKS 8
+#endif
struct membank {
phys_addr_t start;
diff --git a/arch/arm/include/asm/suspend.h b/arch/arm/include/asm/suspend.h
new file mode 100644
index 000000000000..b0e4e1a02318
--- /dev/null
+++ b/arch/arm/include/asm/suspend.h
@@ -0,0 +1,22 @@
+#ifndef __ASM_ARM_SUSPEND_H
+#define __ASM_ARM_SUSPEND_H
+
+#include <asm/memory.h>
+#include <asm/tlbflush.h>
+
+extern void cpu_resume(void);
+
+/*
+ * Hide the first two arguments to __cpu_suspend - these are an implementation
+ * detail which platform code shouldn't have to know about.
+ */
+static inline int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
+{
+ extern int __cpu_suspend(int, long, unsigned long,
+ int (*)(unsigned long));
+ int ret = __cpu_suspend(0, PHYS_OFFSET - PAGE_OFFSET, arg, fn);
+ flush_tlb_all();
+ return ret;
+}
+
+#endif
diff --git a/arch/arm/include/asm/tcm.h b/arch/arm/include/asm/tcm.h
index 5929ef5d927a..8578d726ad78 100644
--- a/arch/arm/include/asm/tcm.h
+++ b/arch/arm/include/asm/tcm.h
@@ -27,5 +27,7 @@
void *tcm_alloc(size_t len);
void tcm_free(void *addr, size_t len);
+bool tcm_dtcm_present(void);
+bool tcm_itcm_present(void);
#endif
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index d2005de383b8..8077145698ff 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -34,16 +34,12 @@
#define TLB_V6_D_ASID (1 << 17)
#define TLB_V6_I_ASID (1 << 18)
-#define TLB_BTB (1 << 28)
-
/* Unified Inner Shareable TLB operations (ARMv7 MP extensions) */
#define TLB_V7_UIS_PAGE (1 << 19)
#define TLB_V7_UIS_FULL (1 << 20)
#define TLB_V7_UIS_ASID (1 << 21)
-/* Inner Shareable BTB operation (ARMv7 MP extensions) */
-#define TLB_V7_IS_BTB (1 << 22)
-
+#define TLB_BARRIER (1 << 28)
#define TLB_L2CLEAN_FR (1 << 29) /* Feroceon */
#define TLB_DCLEAN (1 << 30)
#define TLB_WB (1 << 31)
@@ -58,7 +54,7 @@
* v4wb - ARMv4 with write buffer without I TLB flush entry instruction
* v4wbi - ARMv4 with write buffer with I TLB flush entry instruction
* fr - Feroceon (v4wbi with non-outer-cacheable page table walks)
- * fa - Faraday (v4 with write buffer with UTLB and branch target buffer (BTB))
+ * fa - Faraday (v4 with write buffer with UTLB)
* v6wbi - ARMv6 with write buffer with I TLB flush entry instruction
* v7wbi - identical to v6wbi
*/
@@ -99,7 +95,7 @@
# define v4_always_flags (-1UL)
#endif
-#define fa_tlb_flags (TLB_WB | TLB_BTB | TLB_DCLEAN | \
+#define fa_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
TLB_V4_U_FULL | TLB_V4_U_PAGE)
#ifdef CONFIG_CPU_TLB_FA
@@ -166,7 +162,7 @@
# define v4wb_always_flags (-1UL)
#endif
-#define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BTB | \
+#define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
TLB_V6_I_FULL | TLB_V6_D_FULL | \
TLB_V6_I_PAGE | TLB_V6_D_PAGE | \
TLB_V6_I_ASID | TLB_V6_D_ASID)
@@ -184,9 +180,9 @@
# define v6wbi_always_flags (-1UL)
#endif
-#define v7wbi_tlb_flags_smp (TLB_WB | TLB_DCLEAN | TLB_V7_IS_BTB | \
+#define v7wbi_tlb_flags_smp (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID)
-#define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BTB | \
+#define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
TLB_V6_U_FULL | TLB_V6_U_PAGE | TLB_V6_U_ASID)
#ifdef CONFIG_CPU_TLB_V7
@@ -341,15 +337,7 @@ static inline void local_flush_tlb_all(void)
if (tlb_flag(TLB_V7_UIS_FULL))
asm("mcr p15, 0, %0, c8, c3, 0" : : "r" (zero) : "cc");
- if (tlb_flag(TLB_BTB)) {
- /* flush the branch target cache */
- asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
- dsb();
- isb();
- }
- if (tlb_flag(TLB_V7_IS_BTB)) {
- /* flush the branch target cache */
- asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc");
+ if (tlb_flag(TLB_BARRIER)) {
dsb();
isb();
}
@@ -389,17 +377,8 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
asm("mcr p15, 0, %0, c8, c3, 2" : : "r" (asid) : "cc");
#endif
- if (tlb_flag(TLB_BTB)) {
- /* flush the branch target cache */
- asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
- dsb();
- }
- if (tlb_flag(TLB_V7_IS_BTB)) {
- /* flush the branch target cache */
- asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc");
+ if (tlb_flag(TLB_BARRIER))
dsb();
- isb();
- }
}
static inline void
@@ -439,17 +418,8 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (uaddr) : "cc");
#endif
- if (tlb_flag(TLB_BTB)) {
- /* flush the branch target cache */
- asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
- dsb();
- }
- if (tlb_flag(TLB_V7_IS_BTB)) {
- /* flush the branch target cache */
- asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc");
+ if (tlb_flag(TLB_BARRIER))
dsb();
- isb();
- }
}
static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
@@ -482,15 +452,7 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
if (tlb_flag(TLB_V7_UIS_PAGE))
asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (kaddr) : "cc");
- if (tlb_flag(TLB_BTB)) {
- /* flush the branch target cache */
- asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
- dsb();
- isb();
- }
- if (tlb_flag(TLB_V7_IS_BTB)) {
- /* flush the branch target cache */
- asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc");
+ if (tlb_flag(TLB_BARRIER)) {
dsb();
isb();
}
diff --git a/arch/arm/include/asm/traps.h b/arch/arm/include/asm/traps.h
index f90756dc16dc..5b29a6673625 100644
--- a/arch/arm/include/asm/traps.h
+++ b/arch/arm/include/asm/traps.h
@@ -3,6 +3,9 @@
#include <linux/list.h>
+struct pt_regs;
+struct task_struct;
+
struct undef_hook {
struct list_head node;
u32 instr_mask;
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index a5b31af5c2b8..f7887dc53c1f 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -37,7 +37,12 @@ obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
-obj-$(CONFIG_KPROBES) += kprobes.o kprobes-decode.o
+obj-$(CONFIG_KPROBES) += kprobes.o kprobes-common.o
+ifdef CONFIG_THUMB2_KERNEL
+obj-$(CONFIG_KPROBES) += kprobes-thumb.o
+else
+obj-$(CONFIG_KPROBES) += kprobes-arm.o
+endif
obj-$(CONFIG_ATAGS_PROC) += atags.o
obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o
obj-$(CONFIG_ARM_THUMBEE) += thumbee.o
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 927522cfc12e..16baba2e4369 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -59,6 +59,9 @@ int main(void)
DEFINE(TI_TP_VALUE, offsetof(struct thread_info, tp_value));
DEFINE(TI_FPSTATE, offsetof(struct thread_info, fpstate));
DEFINE(TI_VFPSTATE, offsetof(struct thread_info, vfpstate));
+#ifdef CONFIG_SMP
+ DEFINE(VFP_CPU, offsetof(union vfp_state, hard.cpu));
+#endif
#ifdef CONFIG_ARM_THUMBEE
DEFINE(TI_THUMBEE_STATE, offsetof(struct thread_info, thumbee_state));
#endif
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 90c62cd51ca9..a87cbf889ff4 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -29,21 +29,53 @@
#include <asm/entry-macro-multi.S>
/*
- * Interrupt handling. Preserves r7, r8, r9
+ * Interrupt handling.
*/
.macro irq_handler
#ifdef CONFIG_MULTI_IRQ_HANDLER
- ldr r5, =handle_arch_irq
+ ldr r1, =handle_arch_irq
mov r0, sp
- ldr r5, [r5]
+ ldr r1, [r1]
adr lr, BSYM(9997f)
- teq r5, #0
- movne pc, r5
+ teq r1, #0
+ movne pc, r1
#endif
arch_irq_handler_default
9997:
.endm
+ .macro pabt_helper
+ @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
+#ifdef MULTI_PABORT
+ ldr ip, .LCprocfns
+ mov lr, pc
+ ldr pc, [ip, #PROCESSOR_PABT_FUNC]
+#else
+ bl CPU_PABORT_HANDLER
+#endif
+ .endm
+
+ .macro dabt_helper
+
+ @
+ @ Call the processor-specific abort handler:
+ @
+ @ r2 - pt_regs
+ @ r4 - aborted context pc
+ @ r5 - aborted context psr
+ @
+ @ The abort handler must return the aborted address in r0, and
+ @ the fault status register in r1. r9 must be preserved.
+ @
+#ifdef MULTI_DABORT
+ ldr ip, .LCprocfns
+ mov lr, pc
+ ldr pc, [ip, #PROCESSOR_DABT_FUNC]
+#else
+ bl CPU_DABORT_HANDLER
+#endif
+ .endm
+
#ifdef CONFIG_KPROBES
.section .kprobes.text,"ax",%progbits
#else
@@ -126,106 +158,74 @@ ENDPROC(__und_invalid)
SPFIX( subeq sp, sp, #4 )
stmia sp, {r1 - r12}
- ldmia r0, {r1 - r3}
- add r5, sp, #S_SP - 4 @ here for interlock avoidance
- mov r4, #-1 @ "" "" "" ""
- add r0, sp, #(S_FRAME_SIZE + \stack_hole - 4)
- SPFIX( addeq r0, r0, #4 )
- str r1, [sp, #-4]! @ save the "real" r0 copied
+ ldmia r0, {r3 - r5}
+ add r7, sp, #S_SP - 4 @ here for interlock avoidance
+ mov r6, #-1 @ "" "" "" ""
+ add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
+ SPFIX( addeq r2, r2, #4 )
+ str r3, [sp, #-4]! @ save the "real" r0 copied
@ from the exception stack
- mov r1, lr
+ mov r3, lr
@
@ We are now ready to fill in the remaining blanks on the stack:
@
- @ r0 - sp_svc
- @ r1 - lr_svc
- @ r2 - lr_<exception>, already fixed up for correct return/restart
- @ r3 - spsr_<exception>
- @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
+ @ r2 - sp_svc
+ @ r3 - lr_svc
+ @ r4 - lr_<exception>, already fixed up for correct return/restart
+ @ r5 - spsr_<exception>
+ @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
@
- stmia r5, {r0 - r4}
+ stmia r7, {r2 - r6}
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+ bl trace_hardirqs_off
+#endif
.endm
.align 5
__dabt_svc:
svc_entry
-
- @
- @ get ready to re-enable interrupts if appropriate
- @
- mrs r9, cpsr
- tst r3, #PSR_I_BIT
- biceq r9, r9, #PSR_I_BIT
-
- @
- @ Call the processor-specific abort handler:
- @
- @ r2 - aborted context pc
- @ r3 - aborted context cpsr
- @
- @ The abort handler must return the aborted address in r0, and
- @ the fault status register in r1. r9 must be preserved.
- @
-#ifdef MULTI_DABORT
- ldr r4, .LCprocfns
- mov lr, pc
- ldr pc, [r4, #PROCESSOR_DABT_FUNC]
-#else
- bl CPU_DABORT_HANDLER
-#endif
-
- @
- @ set desired IRQ state, then call main handler
- @
- debug_entry r1
- msr cpsr_c, r9
mov r2, sp
- bl do_DataAbort
+ dabt_helper
@
@ IRQs off again before pulling preserved data off the stack
@
disable_irq_notrace
- @
- @ restore SPSR and restart the instruction
- @
- ldr r2, [sp, #S_PSR]
- svc_exit r2 @ return from exception
+#ifdef CONFIG_TRACE_IRQFLAGS
+ tst r5, #PSR_I_BIT
+ bleq trace_hardirqs_on
+ tst r5, #PSR_I_BIT
+ blne trace_hardirqs_off
+#endif
+ svc_exit r5 @ return from exception
UNWIND(.fnend )
ENDPROC(__dabt_svc)
.align 5
__irq_svc:
svc_entry
+ irq_handler
-#ifdef CONFIG_TRACE_IRQFLAGS
- bl trace_hardirqs_off
-#endif
#ifdef CONFIG_PREEMPT
get_thread_info tsk
ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
- add r7, r8, #1 @ increment it
- str r7, [tsk, #TI_PREEMPT]
-#endif
-
- irq_handler
-#ifdef CONFIG_PREEMPT
- str r8, [tsk, #TI_PREEMPT] @ restore preempt count
ldr r0, [tsk, #TI_FLAGS] @ get flags
teq r8, #0 @ if preempt count != 0
movne r0, #0 @ force flags to 0
tst r0, #_TIF_NEED_RESCHED
blne svc_preempt
#endif
- ldr r4, [sp, #S_PSR] @ irqs are already disabled
+
#ifdef CONFIG_TRACE_IRQFLAGS
- tst r4, #PSR_I_BIT
- bleq trace_hardirqs_on
+ @ The parent context IRQs must have been enabled to get here in
+ @ the first place, so there's no point checking the PSR I bit.
+ bl trace_hardirqs_on
#endif
- svc_exit r4 @ return from exception
+ svc_exit r5 @ return from exception
UNWIND(.fnend )
ENDPROC(__irq_svc)
@@ -251,7 +251,6 @@ __und_svc:
#else
svc_entry
#endif
-
@
@ call emulation code, which returns using r9 if it has emulated
@ the instruction, or the more conventional lr if we are to treat
@@ -260,15 +259,16 @@ __und_svc:
@ r0 - instruction
@
#ifndef CONFIG_THUMB2_KERNEL
- ldr r0, [r2, #-4]
+ ldr r0, [r4, #-4]
#else
- ldrh r0, [r2, #-2] @ Thumb instruction at LR - 2
+ ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2
and r9, r0, #0xf800
cmp r9, #0xe800 @ 32-bit instruction if xx >= 0
- ldrhhs r9, [r2] @ bottom 16 bits
+ ldrhhs r9, [r4] @ bottom 16 bits
orrhs r0, r9, r0, lsl #16
#endif
adr r9, BSYM(1f)
+ mov r2, r4
bl call_fpe
mov r0, sp @ struct pt_regs *regs
@@ -282,45 +282,35 @@ __und_svc:
@
@ restore SPSR and restart the instruction
@
- ldr r2, [sp, #S_PSR] @ Get SVC cpsr
- svc_exit r2 @ return from exception
+ ldr r5, [sp, #S_PSR] @ Get SVC cpsr
+#ifdef CONFIG_TRACE_IRQFLAGS
+ tst r5, #PSR_I_BIT
+ bleq trace_hardirqs_on
+ tst r5, #PSR_I_BIT
+ blne trace_hardirqs_off
+#endif
+ svc_exit r5 @ return from exception
UNWIND(.fnend )
ENDPROC(__und_svc)
.align 5
__pabt_svc:
svc_entry
-
- @
- @ re-enable interrupts if appropriate
- @
- mrs r9, cpsr
- tst r3, #PSR_I_BIT
- biceq r9, r9, #PSR_I_BIT
-
- mov r0, r2 @ pass address of aborted instruction.
-#ifdef MULTI_PABORT
- ldr r4, .LCprocfns
- mov lr, pc
- ldr pc, [r4, #PROCESSOR_PABT_FUNC]
-#else
- bl CPU_PABORT_HANDLER
-#endif
- debug_entry r1
- msr cpsr_c, r9 @ Maybe enable interrupts
mov r2, sp @ regs
- bl do_PrefetchAbort @ call abort handler
+ pabt_helper
@
@ IRQs off again before pulling preserved data off the stack
@
disable_irq_notrace
- @
- @ restore SPSR and restart the instruction
- @
- ldr r2, [sp, #S_PSR]
- svc_exit r2 @ return from exception
+#ifdef CONFIG_TRACE_IRQFLAGS
+ tst r5, #PSR_I_BIT
+ bleq trace_hardirqs_on
+ tst r5, #PSR_I_BIT
+ blne trace_hardirqs_off
+#endif
+ svc_exit r5 @ return from exception
UNWIND(.fnend )
ENDPROC(__pabt_svc)
@@ -351,23 +341,23 @@ ENDPROC(__pabt_svc)
ARM( stmib sp, {r1 - r12} )
THUMB( stmia sp, {r0 - r12} )
- ldmia r0, {r1 - r3}
+ ldmia r0, {r3 - r5}
add r0, sp, #S_PC @ here for interlock avoidance
- mov r4, #-1 @ "" "" "" ""
+ mov r6, #-1 @ "" "" "" ""
- str r1, [sp] @ save the "real" r0 copied
+ str r3, [sp] @ save the "real" r0 copied
@ from the exception stack
@
@ We are now ready to fill in the remaining blanks on the stack:
@
- @ r2 - lr_<exception>, already fixed up for correct return/restart
- @ r3 - spsr_<exception>
- @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
+ @ r4 - lr_<exception>, already fixed up for correct return/restart
+ @ r5 - spsr_<exception>
+ @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
@
@ Also, separately save sp_usr and lr_usr
@
- stmia r0, {r2 - r4}
+ stmia r0, {r4 - r6}
ARM( stmdb r0, {sp, lr}^ )
THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
@@ -380,10 +370,14 @@ ENDPROC(__pabt_svc)
@ Clear FP to mark the first stack frame
@
zero_fp
+
+#ifdef CONFIG_IRQSOFF_TRACER
+ bl trace_hardirqs_off
+#endif
.endm
.macro kuser_cmpxchg_check
-#if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
+#if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
#ifndef CONFIG_MMU
#warning "NPTL on non MMU needs fixing"
#else
@@ -391,8 +385,8 @@ ENDPROC(__pabt_svc)
@ if it was interrupted in a critical region. Here we
@ perform a quick test inline since it should be false
@ 99.9999% of the time. The rest is done out of line.
- cmp r2, #TASK_SIZE
- blhs kuser_cmpxchg_fixup
+ cmp r4, #TASK_SIZE
+ blhs kuser_cmpxchg64_fixup
#endif
#endif
.endm
@@ -401,32 +395,9 @@ ENDPROC(__pabt_svc)
__dabt_usr:
usr_entry
kuser_cmpxchg_check
-
- @
- @ Call the processor-specific abort handler:
- @
- @ r2 - aborted context pc
- @ r3 - aborted context cpsr
- @
- @ The abort handler must return the aborted address in r0, and
- @ the fault status register in r1.
- @
-#ifdef MULTI_DABORT
- ldr r4, .LCprocfns
- mov lr, pc
- ldr pc, [r4, #PROCESSOR_DABT_FUNC]
-#else
- bl CPU_DABORT_HANDLER
-#endif
-
- @
- @ IRQs on, then call the main handler
- @
- debug_entry r1
- enable_irq
mov r2, sp
- adr lr, BSYM(ret_from_exception)
- b do_DataAbort
+ dabt_helper
+ b ret_from_exception
UNWIND(.fnend )
ENDPROC(__dabt_usr)
@@ -434,28 +405,8 @@ ENDPROC(__dabt_usr)
__irq_usr:
usr_entry
kuser_cmpxchg_check
-
-#ifdef CONFIG_IRQSOFF_TRACER
- bl trace_hardirqs_off
-#endif
-
- get_thread_info tsk
-#ifdef CONFIG_PREEMPT
- ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
- add r7, r8, #1 @ increment it
- str r7, [tsk, #TI_PREEMPT]
-#endif
-
irq_handler
-#ifdef CONFIG_PREEMPT
- ldr r0, [tsk, #TI_PREEMPT]
- str r8, [tsk, #TI_PREEMPT]
- teq r0, r7
- ARM( strne r0, [r0, -r0] )
- THUMB( movne r0, #0 )
- THUMB( strne r0, [r0] )
-#endif
-
+ get_thread_info tsk
mov why, #0
b ret_to_user_from_irq
UNWIND(.fnend )
@@ -467,6 +418,9 @@ ENDPROC(__irq_usr)
__und_usr:
usr_entry
+ mov r2, r4
+ mov r3, r5
+
@
@ fall through to the emulation code, which returns using r9 if
@ it has emulated the instruction, or the more conventional lr
@@ -682,19 +636,8 @@ ENDPROC(__und_usr_unknown)
.align 5
__pabt_usr:
usr_entry
-
- mov r0, r2 @ pass address of aborted instruction.
-#ifdef MULTI_PABORT
- ldr r4, .LCprocfns
- mov lr, pc
- ldr pc, [r4, #PROCESSOR_PABT_FUNC]
-#else
- bl CPU_PABORT_HANDLER
-#endif
- debug_entry r1
- enable_irq @ Enable interrupts
mov r2, sp @ regs
- bl do_PrefetchAbort @ call abort handler
+ pabt_helper
UNWIND(.fnend )
/* fall through */
/*
@@ -758,31 +701,12 @@ ENDPROC(__switch_to)
/*
* User helpers.
*
- * These are segment of kernel provided user code reachable from user space
- * at a fixed address in kernel memory. This is used to provide user space
- * with some operations which require kernel help because of unimplemented
- * native feature and/or instructions in many ARM CPUs. The idea is for
- * this code to be executed directly in user mode for best efficiency but
- * which is too intimate with the kernel counter part to be left to user
- * libraries. In fact this code might even differ from one CPU to another
- * depending on the available instruction set and restrictions like on
- * SMP systems. In other words, the kernel reserves the right to change
- * this code as needed without warning. Only the entry points and their
- * results are guaranteed to be stable.
- *
* Each segment is 32-byte aligned and will be moved to the top of the high
* vector page. New segments (if ever needed) must be added in front of
* existing ones. This mechanism should be used only for things that are
* really small and justified, and not be abused freely.
*
- * User space is expected to implement those things inline when optimizing
- * for a processor that has the necessary native support, but only if such
- * resulting binaries are already to be incompatible with earlier ARM
- * processors due to the use of unsupported instructions other than what
- * is provided here. In other words don't make binaries unable to run on
- * earlier processors just for the sake of not using these kernel helpers
- * if your compiled code is not going to use the new instructions for other
- * purpose.
+ * See Documentation/arm/kernel_user_helpers.txt for formal definitions.
*/
THUMB( .arm )
@@ -799,96 +723,103 @@ ENDPROC(__switch_to)
__kuser_helper_start:
/*
- * Reference prototype:
- *
- * void __kernel_memory_barrier(void)
- *
- * Input:
- *
- * lr = return address
- *
- * Output:
- *
- * none
- *
- * Clobbered:
- *
- * none
- *
- * Definition and user space usage example:
- *
- * typedef void (__kernel_dmb_t)(void);
- * #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0)
- *
- * Apply any needed memory barrier to preserve consistency with data modified
- * manually and __kuser_cmpxchg usage.
- *
- * This could be used as follows:
- *
- * #define __kernel_dmb() \
- * asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \
- * : : : "r0", "lr","cc" )
+ * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular
+ * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point.
*/
-__kuser_memory_barrier: @ 0xffff0fa0
+__kuser_cmpxchg64: @ 0xffff0f60
+
+#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
+
+ /*
+ * Poor you. No fast solution possible...
+ * The kernel itself must perform the operation.
+ * A special ghost syscall is used for that (see traps.c).
+ */
+ stmfd sp!, {r7, lr}
+ ldr r7, 1f @ it's 20 bits
+ swi __ARM_NR_cmpxchg64
+ ldmfd sp!, {r7, pc}
+1: .word __ARM_NR_cmpxchg64
+
+#elif defined(CONFIG_CPU_32v6K)
+
+ stmfd sp!, {r4, r5, r6, r7}
+ ldrd r4, r5, [r0] @ load old val
+ ldrd r6, r7, [r1] @ load new val
+ smp_dmb arm
+1: ldrexd r0, r1, [r2] @ load current val
+ eors r3, r0, r4 @ compare with oldval (1)
+ eoreqs r3, r1, r5 @ compare with oldval (2)
+ strexdeq r3, r6, r7, [r2] @ store newval if eq
+ teqeq r3, #1 @ success?
+ beq 1b @ if no then retry
smp_dmb arm
+ rsbs r0, r3, #0 @ set returned val and C flag
+ ldmfd sp!, {r4, r5, r6, r7}
+ bx lr
+
+#elif !defined(CONFIG_SMP)
+
+#ifdef CONFIG_MMU
+
+ /*
+ * The only thing that can break atomicity in this cmpxchg64
+ * implementation is either an IRQ or a data abort exception
+ * causing another process/thread to be scheduled in the middle of
+ * the critical sequence. The same strategy as for cmpxchg is used.
+ */
+ stmfd sp!, {r4, r5, r6, lr}
+ ldmia r0, {r4, r5} @ load old val
+ ldmia r1, {r6, lr} @ load new val
+1: ldmia r2, {r0, r1} @ load current val
+ eors r3, r0, r4 @ compare with oldval (1)
+ eoreqs r3, r1, r5 @ compare with oldval (2)
+2: stmeqia r2, {r6, lr} @ store newval if eq
+ rsbs r0, r3, #0 @ set return val and C flag
+ ldmfd sp!, {r4, r5, r6, pc}
+
+ .text
+kuser_cmpxchg64_fixup:
+ @ Called from kuser_cmpxchg_fixup.
+ @ r4 = address of interrupted insn (must be preserved).
+ @ sp = saved regs. r7 and r8 are clobbered.
+ @ 1b = first critical insn, 2b = last critical insn.
+ @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
+ mov r7, #0xffff0fff
+ sub r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
+ subs r8, r4, r7
+ rsbcss r8, r8, #(2b - 1b)
+ strcs r7, [sp, #S_PC]
+#if __LINUX_ARM_ARCH__ < 6
+ bcc kuser_cmpxchg32_fixup
+#endif
+ mov pc, lr
+ .previous
+
+#else
+#warning "NPTL on non MMU needs fixing"
+ mov r0, #-1
+ adds r0, r0, #0
usr_ret lr
+#endif
+
+#else
+#error "incoherent kernel configuration"
+#endif
+
+ /* pad to next slot */
+ .rept (16 - (. - __kuser_cmpxchg64)/4)
+ .word 0
+ .endr
.align 5
-/*
- * Reference prototype:
- *
- * int __kernel_cmpxchg(int oldval, int newval, int *ptr)
- *
- * Input:
- *
- * r0 = oldval
- * r1 = newval
- * r2 = ptr
- * lr = return address
- *
- * Output:
- *
- * r0 = returned value (zero or non-zero)
- * C flag = set if r0 == 0, clear if r0 != 0
- *
- * Clobbered:
- *
- * r3, ip, flags
- *
- * Definition and user space usage example:
- *
- * typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
- * #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
- *
- * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
- * Return zero if *ptr was changed or non-zero if no exchange happened.
- * The C flag is also set if *ptr was changed to allow for assembly
- * optimization in the calling code.
- *
- * Notes:
- *
- * - This routine already includes memory barriers as needed.
- *
- * For example, a user space atomic_add implementation could look like this:
- *
- * #define atomic_add(ptr, val) \
- * ({ register unsigned int *__ptr asm("r2") = (ptr); \
- * register unsigned int __result asm("r1"); \
- * asm volatile ( \
- * "1: @ atomic_add\n\t" \
- * "ldr r0, [r2]\n\t" \
- * "mov r3, #0xffff0fff\n\t" \
- * "add lr, pc, #4\n\t" \
- * "add r1, r0, %2\n\t" \
- * "add pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \
- * "bcc 1b" \
- * : "=&r" (__result) \
- * : "r" (__ptr), "rIL" (val) \
- * : "r0","r3","ip","lr","cc","memory" ); \
- * __result; })
- */
+__kuser_memory_barrier: @ 0xffff0fa0
+ smp_dmb arm
+ usr_ret lr
+
+ .align 5
__kuser_cmpxchg: @ 0xffff0fc0
@@ -925,15 +856,15 @@ __kuser_cmpxchg: @ 0xffff0fc0
usr_ret lr
.text
-kuser_cmpxchg_fixup:
+kuser_cmpxchg32_fixup:
@ Called from kuser_cmpxchg_check macro.
- @ r2 = address of interrupted insn (must be preserved).
+ @ r4 = address of interrupted insn (must be preserved).
@ sp = saved regs. r7 and r8 are clobbered.
@ 1b = first critical insn, 2b = last critical insn.
- @ If r2 >= 1b and r2 <= 2b then saved pc_usr is set to 1b.
+ @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
mov r7, #0xffff0fff
sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
- subs r8, r2, r7
+ subs r8, r4, r7
rsbcss r8, r8, #(2b - 1b)
strcs r7, [sp, #S_PC]
mov pc, lr
@@ -963,39 +894,6 @@ kuser_cmpxchg_fixup:
.align 5
-/*
- * Reference prototype:
- *
- * int __kernel_get_tls(void)
- *
- * Input:
- *
- * lr = return address
- *
- * Output:
- *
- * r0 = TLS value
- *
- * Clobbered:
- *
- * none
- *
- * Definition and user space usage example:
- *
- * typedef int (__kernel_get_tls_t)(void);
- * #define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0)
- *
- * Get the TLS value as previously set via the __ARM_NR_set_tls syscall.
- *
- * This could be used as follows:
- *
- * #define __kernel_get_tls() \
- * ({ register unsigned int __val asm("r0"); \
- * asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \
- * : "=r" (__val) : : "lr","cc" ); \
- * __val; })
- */
-
__kuser_get_tls: @ 0xffff0fe0
ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
usr_ret lr
@@ -1004,19 +902,6 @@ __kuser_get_tls: @ 0xffff0fe0
.word 0 @ 0xffff0ff0 software TLS value, then
.endr @ pad up to __kuser_helper_version
-/*
- * Reference declaration:
- *
- * extern unsigned int __kernel_helper_version;
- *
- * Definition and user space usage example:
- *
- * #define __kernel_helper_version (*(unsigned int *)0xffff0ffc)
- *
- * User space may read this to determine the curent number of helpers
- * available.
- */
-
__kuser_helper_version: @ 0xffff0ffc
.word ((__kuser_helper_end - __kuser_helper_start) >> 5)
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 051166c2a932..9a8531eadd3d 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -121,15 +121,13 @@
.endm
#else /* CONFIG_THUMB2_KERNEL */
.macro svc_exit, rpsr
+ ldr lr, [sp, #S_SP] @ top of the stack
+ ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
clrex @ clear the exclusive monitor
- ldr r0, [sp, #S_SP] @ top of the stack
- ldr r1, [sp, #S_PC] @ return address
- tst r0, #4 @ orig stack 8-byte aligned?
- stmdb r0, {r1, \rpsr} @ rfe context
+ stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context
ldmia sp, {r0 - r12}
- ldr lr, [sp, #S_LR]
- addeq sp, sp, #S_FRAME_SIZE - 8 @ aligned
- addne sp, sp, #S_FRAME_SIZE - 4 @ not aligned
+ mov sp, lr
+ ldr lr, [sp], #4
rfeia sp!
.endm
@@ -165,25 +163,6 @@
.endm
#endif /* !CONFIG_THUMB2_KERNEL */
- @
- @ Debug exceptions are taken as prefetch or data aborts.
- @ We must disable preemption during the handler so that
- @ we can access the debug registers safely.
- @
- .macro debug_entry, fsr
-#if defined(CONFIG_HAVE_HW_BREAKPOINT) && defined(CONFIG_PREEMPT)
- ldr r4, =0x40f @ mask out fsr.fs
- and r5, r4, \fsr
- cmp r5, #2 @ debug exception
- bne 1f
- get_thread_info r10
- ldr r6, [r10, #TI_PREEMPT] @ get preempt count
- add r11, r6, #1 @ increment it
- str r11, [r10, #TI_PREEMPT]
-1:
-#endif
- .endm
-
/*
* These are the registers used in the syscall handler, and allow us to
* have in theory up to 7 arguments to a function - r0 to r6.
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 6b1e0ad9ec3b..d46f25968bec 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -32,8 +32,16 @@
* numbers for r1.
*
*/
+ .arm
+
__HEAD
ENTRY(stext)
+
+ THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM.
+ THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
+ THUMB( .thumb ) @ switch to Thumb now.
+ THUMB(1: )
+
setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode
@ and irqs disabled
#ifndef CONFIG_CPU_CP15
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 278c1b0ebb2e..742b6108a001 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -71,8 +71,16 @@
* crap here - that's what the boot loader (or in extreme, well justified
* circumstances, zImage) is for.
*/
+ .arm
+
__HEAD
ENTRY(stext)
+
+ THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM.
+ THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
+ THUMB( .thumb ) @ switch to Thumb now.
+ THUMB(1: )
+
setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode
@ and irqs disabled
mrc p15, 0, r9, c0, c0 @ get processor id
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 87acc25d7a3e..a927ca1f5566 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -796,7 +796,7 @@ unlock:
/*
* Called from either the Data Abort Handler [watchpoint] or the
- * Prefetch Abort Handler [breakpoint] with preemption disabled.
+ * Prefetch Abort Handler [breakpoint] with interrupts disabled.
*/
static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
struct pt_regs *regs)
@@ -804,8 +804,10 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
int ret = 0;
u32 dscr;
- /* We must be called with preemption disabled. */
- WARN_ON(preemptible());
+ preempt_disable();
+
+ if (interrupts_enabled(regs))
+ local_irq_enable();
/* We only handle watchpoints and hardware breakpoints. */
ARM_DBG_READ(c1, 0, dscr);
@@ -824,10 +826,6 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
ret = 1; /* Unhandled fault. */
}
- /*
- * Re-enable preemption after it was disabled in the
- * low-level exception handling code.
- */
preempt_enable();
return ret;
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 83bbad03fcc6..0f928a131af8 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -131,54 +131,63 @@ int __init arch_probe_nr_irqs(void)
#ifdef CONFIG_HOTPLUG_CPU
-static bool migrate_one_irq(struct irq_data *d)
+static bool migrate_one_irq(struct irq_desc *desc)
{
- unsigned int cpu = cpumask_any_and(d->affinity, cpu_online_mask);
+ struct irq_data *d = irq_desc_get_irq_data(desc);
+ const struct cpumask *affinity = d->affinity;
+ struct irq_chip *c;
bool ret = false;
- if (cpu >= nr_cpu_ids) {
- cpu = cpumask_any(cpu_online_mask);
+ /*
+ * If this is a per-CPU interrupt, or the affinity does not
+ * include this CPU, then we have nothing to do.
+ */
+ if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
+ return false;
+
+ if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
+ affinity = cpu_online_mask;
ret = true;
}
- pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", d->irq, d->node, cpu);
-
- d->chip->irq_set_affinity(d, cpumask_of(cpu), true);
+ c = irq_data_get_irq_chip(d);
+ if (c->irq_set_affinity)
+ c->irq_set_affinity(d, affinity, true);
+ else
+ pr_debug("IRQ%u: unable to set affinity\n", d->irq);
return ret;
}
/*
- * The CPU has been marked offline. Migrate IRQs off this CPU. If
- * the affinity settings do not allow other CPUs, force them onto any
+ * The current CPU has been marked offline. Migrate IRQs off this CPU.
+ * If the affinity settings do not allow other CPUs, force them onto any
* available CPU.
+ *
+ * Note: we must iterate over all IRQs, whether they have an attached
+ * action structure or not, as we need to get chained interrupts too.
*/
void migrate_irqs(void)
{
- unsigned int i, cpu = smp_processor_id();
+ unsigned int i;
struct irq_desc *desc;
unsigned long flags;
local_irq_save(flags);
for_each_irq_desc(i, desc) {
- struct irq_data *d = &desc->irq_data;
bool affinity_broken = false;
- raw_spin_lock(&desc->lock);
- do {
- if (desc->action == NULL)
- break;
-
- if (d->node != cpu)
- break;
+ if (!desc)
+ continue;
- affinity_broken = migrate_one_irq(d);
- } while (0);
+ raw_spin_lock(&desc->lock);
+ affinity_broken = migrate_one_irq(desc);
raw_spin_unlock(&desc->lock);
if (affinity_broken && printk_ratelimit())
- pr_warning("IRQ%u no longer affine to CPU%u\n", i, cpu);
+ pr_warning("IRQ%u no longer affine to CPU%u\n", i,
+ smp_processor_id());
}
local_irq_restore(flags);
diff --git a/arch/arm/kernel/kprobes-arm.c b/arch/arm/kernel/kprobes-arm.c
new file mode 100644
index 000000000000..79203ee1d039
--- /dev/null
+++ b/arch/arm/kernel/kprobes-arm.c
@@ -0,0 +1,999 @@
+/*
+ * arch/arm/kernel/kprobes-decode.c
+ *
+ * Copyright (C) 2006, 2007 Motorola Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+/*
+ * We do not have hardware single-stepping on ARM, This
+ * effort is further complicated by the ARM not having a
+ * "next PC" register. Instructions that change the PC
+ * can't be safely single-stepped in a MP environment, so
+ * we have a lot of work to do:
+ *
+ * In the prepare phase:
+ * *) If it is an instruction that does anything
+ * with the CPU mode, we reject it for a kprobe.
+ * (This is out of laziness rather than need. The
+ * instructions could be simulated.)
+ *
+ * *) Otherwise, decode the instruction rewriting its
+ * registers to take fixed, ordered registers and
+ * setting a handler for it to run the instruction.
+ *
+ * In the execution phase by an instruction's handler:
+ *
+ * *) If the PC is written to by the instruction, the
+ * instruction must be fully simulated in software.
+ *
+ * *) Otherwise, a modified form of the instruction is
+ * directly executed. Its handler calls the
+ * instruction in insn[0]. In insn[1] is a
+ * "mov pc, lr" to return.
+ *
+ * Before calling, load up the reordered registers
+ * from the original instruction's registers. If one
+ * of the original input registers is the PC, compute
+ * and adjust the appropriate input register.
+ *
+ * After call completes, copy the output registers to
+ * the original instruction's original registers.
+ *
+ * We don't use a real breakpoint instruction since that
+ * would have us in the kernel go from SVC mode to SVC
+ * mode losing the link register. Instead we use an
+ * undefined instruction. To simplify processing, the
+ * undefined instruction used for kprobes must be reserved
+ * exclusively for kprobes use.
+ *
+ * TODO: ifdef out some instruction decoding based on architecture.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+
+#include "kprobes.h"
+
+#define sign_extend(x, signbit) ((x) | (0 - ((x) & (1 << (signbit)))))
+
+#define branch_displacement(insn) sign_extend(((insn) & 0xffffff) << 2, 25)
+
+#if __LINUX_ARM_ARCH__ >= 6
+#define BLX(reg) "blx "reg" \n\t"
+#else
+#define BLX(reg) "mov lr, pc \n\t" \
+ "mov pc, "reg" \n\t"
+#endif
+
+/*
+ * To avoid the complications of mimicing single-stepping on a
+ * processor without a Next-PC or a single-step mode, and to
+ * avoid having to deal with the side-effects of boosting, we
+ * simulate or emulate (almost) all ARM instructions.
+ *
+ * "Simulation" is where the instruction's behavior is duplicated in
+ * C code. "Emulation" is where the original instruction is rewritten
+ * and executed, often by altering its registers.
+ *
+ * By having all behavior of the kprobe'd instruction completed before
+ * returning from the kprobe_handler(), all locks (scheduler and
+ * interrupt) can safely be released. There is no need for secondary
+ * breakpoints, no race with MP or preemptable kernels, nor having to
+ * clean up resources counts at a later time impacting overall system
+ * performance. By rewriting the instruction, only the minimum registers
+ * need to be loaded and saved back optimizing performance.
+ *
+ * Calling the insnslot_*_rwflags version of a function doesn't hurt
+ * anything even when the CPSR flags aren't updated by the
+ * instruction. It's just a little slower in return for saving
+ * a little space by not having a duplicate function that doesn't
+ * update the flags. (The same optimization can be said for
+ * instructions that do or don't perform register writeback)
+ * Also, instructions can either read the flags, only write the
+ * flags, or read and write the flags. To save combinations
+ * rather than for sheer performance, flag functions just assume
+ * read and write of flags.
+ */
+
+static void __kprobes simulate_bbl(struct kprobe *p, struct pt_regs *regs)
+{
+ kprobe_opcode_t insn = p->opcode;
+ long iaddr = (long)p->addr;
+ int disp = branch_displacement(insn);
+
+ if (insn & (1 << 24))
+ regs->ARM_lr = iaddr + 4;
+
+ regs->ARM_pc = iaddr + 8 + disp;
+}
+
+static void __kprobes simulate_blx1(struct kprobe *p, struct pt_regs *regs)
+{
+ kprobe_opcode_t insn = p->opcode;
+ long iaddr = (long)p->addr;
+ int disp = branch_displacement(insn);
+
+ regs->ARM_lr = iaddr + 4;
+ regs->ARM_pc = iaddr + 8 + disp + ((insn >> 23) & 0x2);
+ regs->ARM_cpsr |= PSR_T_BIT;
+}
+
+static void __kprobes simulate_blx2bx(struct kprobe *p, struct pt_regs *regs)
+{
+ kprobe_opcode_t insn = p->opcode;
+ int rm = insn & 0xf;
+ long rmv = regs->uregs[rm];
+
+ if (insn & (1 << 5))
+ regs->ARM_lr = (long)p->addr + 4;
+
+ regs->ARM_pc = rmv & ~0x1;
+ regs->ARM_cpsr &= ~PSR_T_BIT;
+ if (rmv & 0x1)
+ regs->ARM_cpsr |= PSR_T_BIT;
+}
+
+static void __kprobes simulate_mrs(struct kprobe *p, struct pt_regs *regs)
+{
+ kprobe_opcode_t insn = p->opcode;
+ int rd = (insn >> 12) & 0xf;
+ unsigned long mask = 0xf8ff03df; /* Mask out execution state */
+ regs->uregs[rd] = regs->ARM_cpsr & mask;
+}
+
+static void __kprobes simulate_mov_ipsp(struct kprobe *p, struct pt_regs *regs)
+{
+ regs->uregs[12] = regs->uregs[13];
+}
+
+static void __kprobes
+emulate_ldrdstrd(struct kprobe *p, struct pt_regs *regs)
+{
+ kprobe_opcode_t insn = p->opcode;
+ unsigned long pc = (unsigned long)p->addr + 8;
+ int rt = (insn >> 12) & 0xf;
+ int rn = (insn >> 16) & 0xf;
+ int rm = insn & 0xf;
+
+ register unsigned long rtv asm("r0") = regs->uregs[rt];
+ register unsigned long rt2v asm("r1") = regs->uregs[rt+1];
+ register unsigned long rnv asm("r2") = (rn == 15) ? pc
+ : regs->uregs[rn];
+ register unsigned long rmv asm("r3") = regs->uregs[rm];
+
+ __asm__ __volatile__ (
+ BLX("%[fn]")
+ : "=r" (rtv), "=r" (rt2v), "=r" (rnv)
+ : "0" (rtv), "1" (rt2v), "2" (rnv), "r" (rmv),
+ [fn] "r" (p->ainsn.insn_fn)
+ : "lr", "memory", "cc"
+ );
+
+ regs->uregs[rt] = rtv;
+ regs->uregs[rt+1] = rt2v;
+ if (is_writeback(insn))
+ regs->uregs[rn] = rnv;
+}
+
+static void __kprobes
+emulate_ldr(struct kprobe *p, struct pt_regs *regs)
+{
+ kprobe_opcode_t insn = p->opcode;
+ unsigned long pc = (unsigned long)p->addr + 8;
+ int rt = (insn >> 12) & 0xf;
+ int rn = (insn >> 16) & 0xf;
+ int rm = insn & 0xf;
+
+ register unsigned long rtv asm("r0");
+ register unsigned long rnv asm("r2") = (rn == 15) ? pc
+ : regs->uregs[rn];
+ register unsigned long rmv asm("r3") = regs->uregs[rm];
+
+ __asm__ __volatile__ (
+ BLX("%[fn]")
+ : "=r" (rtv), "=r" (rnv)
+ : "1" (rnv), "r" (rmv), [fn] "r" (p->ainsn.insn_fn)
+ : "lr", "memory", "cc"
+ );
+
+ if (rt == 15)
+ load_write_pc(rtv, regs);
+ else
+ regs->uregs[rt] = rtv;
+
+ if (is_writeback(insn))
+ regs->uregs[rn] = rnv;
+}
+
+static void __kprobes
+emulate_str(struct kprobe *p, struct pt_regs *regs)
+{
+ kprobe_opcode_t insn = p->opcode;
+ unsigned long rtpc = (unsigned long)p->addr + str_pc_offset;
+ unsigned long rnpc = (unsigned long)p->addr + 8;
+ int rt = (insn >> 12) & 0xf;
+ int rn = (insn >> 16) & 0xf;
+ int rm = insn & 0xf;
+
+ register unsigned long rtv asm("r0") = (rt == 15) ? rtpc
+ : regs->uregs[rt];
+ register unsigned long rnv asm("r2") = (rn == 15) ? rnpc
+ : regs->uregs[rn];
+ register unsigned long rmv asm("r3") = regs->uregs[rm];
+
+ __asm__ __volatile__ (
+ BLX("%[fn]")
+ : "=r" (rnv)
+ : "r" (rtv), "0" (rnv), "r" (rmv), [fn] "r" (p->ainsn.insn_fn)
+ : "lr", "memory", "cc"
+ );
+
+ if (is_writeback(insn))
+ regs->uregs[rn] = rnv;
+}
+
+static void __kprobes
+emulate_rd12rn16rm0rs8_rwflags(struct kprobe *p, struct pt_regs *regs)
+{
+ kprobe_opcode_t insn = p->opcode;
+ unsigned long pc = (unsigned long)p->addr + 8;
+ int rd = (insn >> 12) & 0xf;
+ int rn = (insn >> 16) & 0xf;
+ int rm = insn & 0xf;
+ int rs = (insn >> 8) & 0xf;
+
+ register unsigned long rdv asm("r0") = regs->uregs[rd];
+ register unsigned long rnv asm("r2") = (rn == 15) ? pc
+ : regs->uregs[rn];
+ register unsigned long rmv asm("r3") = (rm == 15) ? pc
+ : regs->uregs[rm];
+ register unsigned long rsv asm("r1") = regs->uregs[rs];
+ unsigned long cpsr = regs->ARM_cpsr;
+
+ __asm__ __volatile__ (
+ "msr cpsr_fs, %[cpsr] \n\t"
+ BLX("%[fn]")
+ "mrs %[cpsr], cpsr \n\t"
+ : "=r" (rdv), [cpsr] "=r" (cpsr)
+ : "0" (rdv), "r" (rnv), "r" (rmv), "r" (rsv),
+ "1" (cpsr), [fn] "r" (p->ainsn.insn_fn)
+ : "lr", "memory", "cc"
+ );
+
+ if (rd == 15)
+ alu_write_pc(rdv, regs);
+ else
+ regs->uregs[rd] = rdv;
+ regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK);
+}
+
+static void __kprobes
+emulate_rd12rn16rm0_rwflags_nopc(struct kprobe *p, struct pt_regs *regs)
+{
+ kprobe_opcode_t insn = p->opcode;
+ int rd = (insn >> 12) & 0xf;
+ int rn = (insn >> 16) & 0xf;
+ int rm = insn & 0xf;
+
+ register unsigned long rdv asm("r0") = regs->uregs[rd];
+ register unsigned long rnv asm("r2") = regs->uregs[rn];
+ register unsigned long rmv asm("r3") = regs->uregs[rm];
+ unsigned long cpsr = regs->ARM_cpsr;
+
+ __asm__ __volatile__ (
+ "msr cpsr_fs, %[cpsr] \n\t"
+ BLX("%[fn]")
+ "mrs %[cpsr], cpsr \n\t"
+ : "=r" (rdv), [cpsr] "=r" (cpsr)
+ : "0" (rdv), "r" (rnv), "r" (rmv),
+ "1" (cpsr), [fn] "r" (p->ainsn.insn_fn)
+ : "lr", "memory", "cc"
+ );
+
+ regs->uregs[rd] = rdv;
+ regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK);
+}
+
+static void __kprobes
+emulate_rd16rn12rm0rs8_rwflags_nopc(struct kprobe *p, struct pt_regs *regs)
+{
+ kprobe_opcode_t insn = p->opcode;
+ int rd = (insn >> 16) & 0xf;
+ int rn = (insn >> 12) & 0xf;
+ int rm = insn & 0xf;
+ int rs = (insn >> 8) & 0xf;
+
+ register unsigned long rdv asm("r2") = regs->uregs[rd];
+ register unsigned long rnv asm("r0") = regs->uregs[rn];
+ register unsigned long rmv asm("r3") = regs->uregs[rm];
+ register unsigned long rsv asm("r1") = regs->uregs[rs];
+ unsigned long cpsr = regs->ARM_cpsr;
+
+ __asm__ __volatile__ (
+ "msr cpsr_fs, %[cpsr] \n\t"
+ BLX("%[fn]")
+ "mrs %[cpsr], cpsr \n\t"
+ : "=r" (rdv), [cpsr] "=r" (cpsr)
+ : "0" (rdv), "r" (rnv), "r" (rmv), "r" (rsv),
+ "1" (cpsr), [fn] "r" (p->ainsn.insn_fn)
+ : "lr", "memory", "cc"
+ );
+
+ regs->uregs[rd] = rdv;
+ regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK);
+}
+
+static void __kprobes
+emulate_rd12rm0_noflags_nopc(struct kprobe *p, struct pt_regs *regs)
+{
+ kprobe_opcode_t insn = p->opcode;
+ int rd = (insn >> 12) & 0xf;
+ int rm = insn & 0xf;
+
+ register unsigned long rdv asm("r0") = regs->uregs[rd];
+ register unsigned long rmv asm("r3") = regs->uregs[rm];
+
+ __asm__ __volatile__ (
+ BLX("%[fn]")
+ : "=r" (rdv)
+ : "0" (rdv), "r" (rmv), [fn] "r" (p->ainsn.insn_fn)
+ : "lr", "memory", "cc"
+ );
+
+ regs->uregs[rd] = rdv;
+}
+
+static void __kprobes
+emulate_rdlo12rdhi16rn0rm8_rwflags_nopc(struct kprobe *p, struct pt_regs *regs)
+{
+ kprobe_opcode_t insn = p->opcode;
+ int rdlo = (insn >> 12) & 0xf;
+ int rdhi = (insn >> 16) & 0xf;
+ int rn = insn & 0xf;
+ int rm = (insn >> 8) & 0xf;
+
+ register unsigned long rdlov asm("r0") = regs->uregs[rdlo];
+ register unsigned long rdhiv asm("r2") = regs->uregs[rdhi];
+ register unsigned long rnv asm("r3") = regs->uregs[rn];
+ register unsigned long rmv asm("r1") = regs->uregs[rm];
+ unsigned long cpsr = regs->ARM_cpsr;
+
+ __asm__ __volatile__ (
+ "msr cpsr_fs, %[cpsr] \n\t"
+ BLX("%[fn]")
+ "mrs %[cpsr], cpsr \n\t"
+ : "=r" (rdlov), "=r" (rdhiv), [cpsr] "=r" (cpsr)
+ : "0" (rdlov), "1" (rdhiv), "r" (rnv), "r" (rmv),
+ "2" (cpsr), [fn] "r" (p->ainsn.insn_fn)
+ : "lr", "memory", "cc"
+ );
+
+ regs->uregs[rdlo] = rdlov;
+ regs->uregs[rdhi] = rdhiv;
+ regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK);
+}
+
+/*
+ * For the instruction masking and comparisons in all the "space_*"
+ * functions below, Do _not_ rearrange the order of tests unless
+ * you're very, very sure of what you are doing. For the sake of
+ * efficiency, the masks for some tests sometimes assume other test
+ * have been done prior to them so the number of patterns to test
+ * for an instruction set can be as broad as possible to reduce the
+ * number of tests needed.
+ */
+
+static const union decode_item arm_1111_table[] = {
+ /* Unconditional instructions */
+
+ /* memory hint 1111 0100 x001 xxxx xxxx xxxx xxxx xxxx */
+ /* PLDI (immediate) 1111 0100 x101 xxxx xxxx xxxx xxxx xxxx */
+ /* PLDW (immediate) 1111 0101 x001 xxxx xxxx xxxx xxxx xxxx */
+ /* PLD (immediate) 1111 0101 x101 xxxx xxxx xxxx xxxx xxxx */
+ DECODE_SIMULATE (0xfe300000, 0xf4100000, kprobe_simulate_nop),
+
+ /* memory hint 1111 0110 x001 xxxx xxxx xxxx xxx0 xxxx */
+ /* PLDI (register) 1111 0110 x101 xxxx xxxx xxxx xxx0 xxxx */
+ /* PLDW (register) 1111 0111 x001 xxxx xxxx xxxx xxx0 xxxx */
+ /* PLD (register) 1111 0111 x101 xxxx xxxx xxxx xxx0 xxxx */
+ DECODE_SIMULATE (0xfe300010, 0xf6100000, kprobe_simulate_nop),
+
+ /* BLX (immediate) 1111 101x xxxx xxxx xxxx xxxx xxxx xxxx */
+ DECODE_SIMULATE (0xfe000000, 0xfa000000, simulate_blx1),
+
+ /* CPS 1111 0001 0000 xxx0 xxxx xxxx xx0x xxxx */
+ /* SETEND 1111 0001 0000 0001 xxxx xxxx 0000 xxxx */
+ /* SRS 1111 100x x1x0 xxxx xxxx xxxx xxxx xxxx */
+ /* RFE 1111 100x x0x1 xxxx xxxx xxxx xxxx xxxx */
+
+ /* Coprocessor instructions... */
+ /* MCRR2 1111 1100 0100 xxxx xxxx xxxx xxxx xxxx */
+ /* MRRC2 1111 1100 0101 xxxx xxxx xxxx xxxx xxxx */
+ /* LDC2 1111 110x xxx1 xxxx xxxx xxxx xxxx xxxx */
+ /* STC2 1111 110x xxx0 xxxx xxxx xxxx xxxx xxxx */
+ /* CDP2 1111 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */
+ /* MCR2 1111 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */
+ /* MRC2 1111 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */
+
+ /* Other unallocated instructions... */
+ DECODE_END
+};
+
+static const union decode_item arm_cccc_0001_0xx0____0xxx_table[] = {
+ /* Miscellaneous instructions */
+
+ /* MRS cpsr cccc 0001 0000 xxxx xxxx xxxx 0000 xxxx */
+ DECODE_SIMULATEX(0x0ff000f0, 0x01000000, simulate_mrs,
+ REGS(0, NOPC, 0, 0, 0)),
+
+ /* BX cccc 0001 0010 xxxx xxxx xxxx 0001 xxxx */
+ DECODE_SIMULATE (0x0ff000f0, 0x01200010, simulate_blx2bx),
+
+ /* BLX (register) cccc 0001 0010 xxxx xxxx xxxx 0011 xxxx */
+ DECODE_SIMULATEX(0x0ff000f0, 0x01200030, simulate_blx2bx,
+ REGS(0, 0, 0, 0, NOPC)),
+
+ /* CLZ cccc 0001 0110 xxxx xxxx xxxx 0001 xxxx */
+ DECODE_EMULATEX (0x0ff000f0, 0x01600010, emulate_rd12rm0_noflags_nopc,
+ REGS(0, NOPC, 0, 0, NOPC)),
+
+ /* QADD cccc 0001 0000 xxxx xxxx xxxx 0101 xxxx */
+ /* QSUB cccc 0001 0010 xxxx xxxx xxxx 0101 xxxx */
+ /* QDADD cccc 0001 0100 xxxx xxxx xxxx 0101 xxxx */
+ /* QDSUB cccc 0001 0110 xxxx xxxx xxxx 0101 xxxx */
+ DECODE_EMULATEX (0x0f9000f0, 0x01000050, emulate_rd12rn16rm0_rwflags_nopc,
+ REGS(NOPC, NOPC, 0, 0, NOPC)),
+
+ /* BXJ cccc 0001 0010 xxxx xxxx xxxx 0010 xxxx */
+ /* MSR cccc 0001 0x10 xxxx xxxx xxxx 0000 xxxx */
+ /* MRS spsr cccc 0001 0100 xxxx xxxx xxxx 0000 xxxx */
+ /* BKPT 1110 0001 0010 xxxx xxxx xxxx 0111 xxxx */
+ /* SMC cccc 0001 0110 xxxx xxxx xxxx 0111 xxxx */
+ /* And unallocated instructions... */
+ DECODE_END
+};
+
+static const union decode_item arm_cccc_0001_0xx0____1xx0_table[] = {
+ /* Halfword multiply and multiply-accumulate */
+
+ /* SMLALxy cccc 0001 0100 xxxx xxxx xxxx 1xx0 xxxx */
+ DECODE_EMULATEX (0x0ff00090, 0x01400080, emulate_rdlo12rdhi16rn0rm8_rwflags_nopc,
+ REGS(NOPC, NOPC, NOPC, 0, NOPC)),
+
+ /* SMULWy cccc 0001 0010 xxxx xxxx xxxx 1x10 xxxx */
+ DECODE_OR (0x0ff000b0, 0x012000a0),
+ /* SMULxy cccc 0001 0110 xxxx xxxx xxxx 1xx0 xxxx */
+ DECODE_EMULATEX (0x0ff00090, 0x01600080, emulate_rd16rn12rm0rs8_rwflags_nopc,
+ REGS(NOPC, 0, NOPC, 0, NOPC)),
+
+ /* SMLAxy cccc 0001 0000 xxxx xxxx xxxx 1xx0 xxxx */
+ DECODE_OR (0x0ff00090, 0x01000080),
+ /* SMLAWy cccc 0001 0010 xxxx xxxx xxxx 1x00 xxxx */
+ DECODE_EMULATEX (0x0ff000b0, 0x01200080, emulate_rd16rn12rm0rs8_rwflags_nopc,
+ REGS(NOPC, NOPC, NOPC, 0, NOPC)),
+
+ DECODE_END
+};
+
+static const union decode_item arm_cccc_0000_____1001_table[] = {
+ /* Multiply and multiply-accumulate */
+
+ /* MUL cccc 0000 0000 xxxx xxxx xxxx 1001 xxxx */
+ /* MULS cccc 0000 0001 xxxx xxxx xxxx 1001 xxxx */
+ DECODE_EMULATEX (0x0fe000f0, 0x00000090, emulate_rd16rn12rm0rs8_rwflags_nopc,
+ REGS(NOPC, 0, NOPC, 0, NOPC)),
+
+ /* MLA cccc 0000 0010 xxxx xxxx xxxx 1001 xxxx */
+ /* MLAS cccc 0000 0011 xxxx xxxx xxxx 1001 xxxx */
+ DECODE_OR (0x0fe000f0, 0x00200090),
+ /* MLS cccc 0000 0110 xxxx xxxx xxxx 1001 xxxx */
+ DECODE_EMULATEX (0x0ff000f0, 0x00600090, emulate_rd16rn12rm0rs8_rwflags_nopc,
+ REGS(NOPC, NOPC, NOPC, 0, NOPC)),
+
+ /* UMAAL cccc 0000 0100 xxxx xxxx xxxx 1001 xxxx */
+ DECODE_OR (0x0ff000f0, 0x00400090),
+ /* UMULL cccc 0000 1000 xxxx xxxx xxxx 1001 xxxx */
+ /* UMULLS cccc 0000 1001 xxxx xxxx xxxx 1001 xxxx */
+ /* UMLAL cccc 0000 1010 xxxx xxxx xxxx 1001 xxxx */
+ /* UMLALS cccc 0000 1011 xxxx xxxx xxxx 1001 xxxx */
+ /* SMULL cccc 0000 1100 xxxx xxxx xxxx 1001 xxxx */
+ /* SMULLS cccc 0000 1101 xxxx xxxx xxxx 1001 xxxx */
+ /* SMLAL cccc 0000 1110 xxxx xxxx xxxx 1001 xxxx */
+ /* SMLALS cccc 0000 1111 xxxx xxxx xxxx 1001 xxxx */
+ DECODE_EMULATEX (0x0f8000f0, 0x00800090, emulate_rdlo12rdhi16rn0rm8_rwflags_nopc,
+ REGS(NOPC, NOPC, NOPC, 0, NOPC)),
+
+ DECODE_END
+};
+
+static const union decode_item arm_cccc_0001_____1001_table[] = {
+ /* Synchronization primitives */
+
+ /* SMP/SWPB cccc 0001 0x00 xxxx xxxx xxxx 1001 xxxx */
+ DECODE_EMULATEX (0x0fb000f0, 0x01000090, emulate_rd12rn16rm0_rwflags_nopc,
+ REGS(NOPC, NOPC, 0, 0, NOPC)),
+
+ /* LDREX/STREX{,D,B,H} cccc 0001 1xxx xxxx xxxx xxxx 1001 xxxx */
+ /* And unallocated instructions... */
+ DECODE_END
+};
+
+static const union decode_item arm_cccc_000x_____1xx1_table[] = {
+ /* Extra load/store instructions */
+
+ /* STRHT cccc 0000 xx10 xxxx xxxx xxxx 1011 xxxx */
+ /* ??? cccc 0000 xx10 xxxx xxxx xxxx 11x1 xxxx */
+ /* LDRHT cccc 0000 xx11 xxxx xxxx xxxx 1011 xxxx */
+ /* LDRSBT cccc 0000 xx11 xxxx xxxx xxxx 1101 xxxx */
+ /* LDRSHT cccc 0000 xx11 xxxx xxxx xxxx 1111 xxxx */
+ DECODE_REJECT (0x0f200090, 0x00200090),
+
+ /* LDRD/STRD lr,pc,{... cccc 000x x0x0 xxxx 111x xxxx 1101 xxxx */
+ DECODE_REJECT (0x0e10e0d0, 0x0000e0d0),
+
+ /* LDRD (register) cccc 000x x0x0 xxxx xxxx xxxx 1101 xxxx */
+ /* STRD (register) cccc 000x x0x0 xxxx xxxx xxxx 1111 xxxx */
+ DECODE_EMULATEX (0x0e5000d0, 0x000000d0, emulate_ldrdstrd,
+ REGS(NOPCWB, NOPCX, 0, 0, NOPC)),
+
+ /* LDRD (immediate) cccc 000x x1x0 xxxx xxxx xxxx 1101 xxxx */
+ /* STRD (immediate) cccc 000x x1x0 xxxx xxxx xxxx 1111 xxxx */
+ DECODE_EMULATEX (0x0e5000d0, 0x004000d0, emulate_ldrdstrd,
+ REGS(NOPCWB, NOPCX, 0, 0, 0)),
+
+ /* STRH (register) cccc 000x x0x0 xxxx xxxx xxxx 1011 xxxx */
+ DECODE_EMULATEX (0x0e5000f0, 0x000000b0, emulate_str,
+ REGS(NOPCWB, NOPC, 0, 0, NOPC)),
+
+ /* LDRH (register) cccc 000x x0x1 xxxx xxxx xxxx 1011 xxxx */
+ /* LDRSB (register) cccc 000x x0x1 xxxx xxxx xxxx 1101 xxxx */
+ /* LDRSH (register) cccc 000x x0x1 xxxx xxxx xxxx 1111 xxxx */
+ DECODE_EMULATEX (0x0e500090, 0x00100090, emulate_ldr,
+ REGS(NOPCWB, NOPC, 0, 0, NOPC)),
+
+ /* STRH (immediate) cccc 000x x1x0 xxxx xxxx xxxx 1011 xxxx */
+ DECODE_EMULATEX (0x0e5000f0, 0x004000b0, emulate_str,
+ REGS(NOPCWB, NOPC, 0, 0, 0)),
+
+ /* LDRH (immediate) cccc 000x x1x1 xxxx xxxx xxxx 1011 xxxx */
+ /* LDRSB (immediate) cccc 000x x1x1 xxxx xxxx xxxx 1101 xxxx */
+ /* LDRSH (immediate) cccc 000x x1x1 xxxx xxxx xxxx 1111 xxxx */
+ DECODE_EMULATEX (0x0e500090, 0x00500090, emulate_ldr,
+ REGS(NOPCWB, NOPC, 0, 0, 0)),
+
+ DECODE_END
+};
+
+static const union decode_item arm_cccc_000x_table[] = {
+ /* Data-processing (register) */
+
+ /* <op>S PC, ... cccc 000x xxx1 xxxx 1111 xxxx xxxx xxxx */
+ DECODE_REJECT (0x0e10f000, 0x0010f000),
+
+ /* MOV IP, SP 1110 0001 1010 0000 1100 0000 0000 1101 */
+ DECODE_SIMULATE (0xffffffff, 0xe1a0c00d, simulate_mov_ipsp),
+
+ /* TST (register) cccc 0001 0001 xxxx xxxx xxxx xxx0 xxxx */
+ /* TEQ (register) cccc 0001 0011 xxxx xxxx xxxx xxx0 xxxx */
+ /* CMP (register) cccc 0001 0101 xxxx xxxx xxxx xxx0 xxxx */
+ /* CMN (register) cccc 0001 0111 xxxx xxxx xxxx xxx0 xxxx */
+ DECODE_EMULATEX (0x0f900010, 0x01100000, emulate_rd12rn16rm0rs8_rwflags,
+ REGS(ANY, 0, 0, 0, ANY)),
+
+ /* MOV (register) cccc 0001 101x xxxx xxxx xxxx xxx0 xxxx */
+ /* MVN (register) cccc 0001 111x xxxx xxxx xxxx xxx0 xxxx */
+ DECODE_EMULATEX (0x0fa00010, 0x01a00000, emulate_rd12rn16rm0rs8_rwflags,
+ REGS(0, ANY, 0, 0, ANY)),
+
+ /* AND (register) cccc 0000 000x xxxx xxxx xxxx xxx0 xxxx */
+ /* EOR (register) cccc 0000 001x xxxx xxxx xxxx xxx0 xxxx */
+ /* SUB (register) cccc 0000 010x xxxx xxxx xxxx xxx0 xxxx */
+ /* RSB (register) cccc 0000 011x xxxx xxxx xxxx xxx0 xxxx */
+ /* ADD (register) cccc 0000 100x xxxx xxxx xxxx xxx0 xxxx */
+ /* ADC (register) cccc 0000 101x xxxx xxxx xxxx xxx0 xxxx */
+ /* SBC (register) cccc 0000 110x xxxx xxxx xxxx xxx0 xxxx */
+ /* RSC (register) cccc 0000 111x xxxx xxxx xxxx xxx0 xxxx */
+ /* ORR (register) cccc 0001 100x xxxx xxxx xxxx xxx0 xxxx */
+ /* BIC (register) cccc 0001 110x xxxx xxxx xxxx xxx0 xxxx */
+ DECODE_EMULATEX (0x0e000010, 0x00000000, emulate_rd12rn16rm0rs8_rwflags,
+ REGS(ANY, ANY, 0, 0, ANY)),
+
+ /* TST (reg-shift reg) cccc 0001 0001 xxxx xxxx xxxx 0xx1 xxxx */
+ /* TEQ (reg-shift reg) cccc 0001 0011 xxxx xxxx xxxx 0xx1 xxxx */
+ /* CMP (reg-shift reg) cccc 0001 0101 xxxx xxxx xxxx 0xx1 xxxx */
+ /* CMN (reg-shift reg) cccc 0001 0111 xxxx xxxx xxxx 0xx1 xxxx */
+ DECODE_EMULATEX (0x0f900090, 0x01100010, emulate_rd12rn16rm0rs8_rwflags,
+ REGS(ANY, 0, NOPC, 0, ANY)),
+
+ /* MOV (reg-shift reg) cccc 0001 101x xxxx xxxx xxxx 0xx1 xxxx */
+ /* MVN (reg-shift reg) cccc 0001 111x xxxx xxxx xxxx 0xx1 xxxx */
+ DECODE_EMULATEX (0x0fa00090, 0x01a00010, emulate_rd12rn16rm0rs8_rwflags,
+ REGS(0, ANY, NOPC, 0, ANY)),
+
+ /* AND (reg-shift reg) cccc 0000 000x xxxx xxxx xxxx 0xx1 xxxx */
+ /* EOR (reg-shift reg) cccc 0000 001x xxxx xxxx xxxx 0xx1 xxxx */
+ /* SUB (reg-shift reg) cccc 0000 010x xxxx xxxx xxxx 0xx1 xxxx */
+ /* RSB (reg-shift reg) cccc 0000 011x xxxx xxxx xxxx 0xx1 xxxx */
+ /* ADD (reg-shift reg) cccc 0000 100x xxxx xxxx xxxx 0xx1 xxxx */
+ /* ADC (reg-shift reg) cccc 0000 101x xxxx xxxx xxxx 0xx1 xxxx */
+ /* SBC (reg-shift reg) cccc 0000 110x xxxx xxxx xxxx 0xx1 xxxx */
+ /* RSC (reg-shift reg) cccc 0000 111x xxxx xxxx xxxx 0xx1 xxxx */
+ /* ORR (reg-shift reg) cccc 0001 100x xxxx xxxx xxxx 0xx1 xxxx */
+ /* BIC (reg-shift reg) cccc 0001 110x xxxx xxxx xxxx 0xx1 xxxx */
+ DECODE_EMULATEX (0x0e000090, 0x00000010, emulate_rd12rn16rm0rs8_rwflags,
+ REGS(ANY, ANY, NOPC, 0, ANY)),
+
+ DECODE_END
+};
+
+static const union decode_item arm_cccc_001x_table[] = {
+ /* Data-processing (immediate) */
+
+ /* MOVW cccc 0011 0000 xxxx xxxx xxxx xxxx xxxx */
+ /* MOVT cccc 0011 0100 xxxx xxxx xxxx xxxx xxxx */
+ DECODE_EMULATEX (0x0fb00000, 0x03000000, emulate_rd12rm0_noflags_nopc,
+ REGS(0, NOPC, 0, 0, 0)),
+
+ /* YIELD cccc 0011 0010 0000 xxxx xxxx 0000 0001 */
+ DECODE_OR (0x0fff00ff, 0x03200001),
+ /* SEV cccc 0011 0010 0000 xxxx xxxx 0000 0100 */
+ DECODE_EMULATE (0x0fff00ff, 0x03200004, kprobe_emulate_none),
+ /* NOP cccc 0011 0010 0000 xxxx xxxx 0000 0000 */
+ /* WFE cccc 0011 0010 0000 xxxx xxxx 0000 0010 */
+ /* WFI cccc 0011 0010 0000 xxxx xxxx 0000 0011 */
+ DECODE_SIMULATE (0x0fff00fc, 0x03200000, kprobe_simulate_nop),
+ /* DBG cccc 0011 0010 0000 xxxx xxxx ffff xxxx */
+ /* unallocated hints cccc 0011 0010 0000 xxxx xxxx xxxx xxxx */
+ /* MSR (immediate) cccc 0011 0x10 xxxx xxxx xxxx xxxx xxxx */
+ DECODE_REJECT (0x0fb00000, 0x03200000),
+
+ /* <op>S PC, ... cccc 001x xxx1 xxxx 1111 xxxx xxxx xxxx */
+ DECODE_REJECT (0x0e10f000, 0x0210f000),
+
+ /* TST (immediate) cccc 0011 0001 xxxx xxxx xxxx xxxx xxxx */
+ /* TEQ (immediate) cccc 0011 0011 xxxx xxxx xxxx xxxx xxxx */
+ /* CMP (immediate) cccc 0011 0101 xxxx xxxx xxxx xxxx xxxx */
+ /* CMN (immediate) cccc 0011 0111 xxxx xxxx xxxx xxxx xxxx */
+ DECODE_EMULATEX (0x0f900000, 0x03100000, emulate_rd12rn16rm0rs8_rwflags,
+ REGS(ANY, 0, 0, 0, 0)),
+
+ /* MOV (immediate) cccc 0011 101x xxxx xxxx xxxx xxxx xxxx */
+ /* MVN (immediate) cccc 0011 111x xxxx xxxx xxxx xxxx xxxx */
+ DECODE_EMULATEX (0x0fa00000, 0x03a00000, emulate_rd12rn16rm0rs8_rwflags,
+ REGS(0, ANY, 0, 0, 0)),
+
+ /* AND (immediate) cccc 0010 000x xxxx xxxx xxxx xxxx xxxx */
+ /* EOR (immediate) cccc 0010 001x xxxx xxxx xxxx xxxx xxxx */
+ /* SUB (immediate) cccc 0010 010x xxxx xxxx xxxx xxxx xxxx */
+ /* RSB (immediate) cccc 0010 011x xxxx xxxx xxxx xxxx xxxx */
+ /* ADD (immediate) cccc 0010 100x xxxx xxxx xxxx xxxx xxxx */
+ /* ADC (immediate) cccc 0010 101x xxxx xxxx xxxx xxxx xxxx */
+ /* SBC (immediate) cccc 0010 110x xxxx xxxx xxxx xxxx xxxx */
+ /* RSC (immediate) cccc 0010 111x xxxx xxxx xxxx xxxx xxxx */
+ /* ORR (immediate) cccc 0011 100x xxxx xxxx xxxx xxxx xxxx */
+ /* BIC (immediate) cccc 0011 110x xxxx xxxx xxxx xxxx xxxx */
+ DECODE_EMULATEX (0x0e000000, 0x02000000, emulate_rd12rn16rm0rs8_rwflags,
+ REGS(ANY, ANY, 0, 0, 0)),
+
+ DECODE_END
+};
+
+static const union decode_item arm_cccc_0110_____xxx1_table[] = {
+ /* Media instructions */
+
+ /* SEL cccc 0110 1000 xxxx xxxx xxxx 1011 xxxx */
+ DECODE_EMULATEX (0x0ff000f0, 0x068000b0, emulate_rd12rn16rm0_rwflags_nopc,
+ REGS(NOPC, NOPC, 0, 0, NOPC)),
+
+ /* SSAT cccc 0110 101x xxxx xxxx xxxx xx01 xxxx */
+ /* USAT cccc 0110 111x xxxx xxxx xxxx xx01 xxxx */
+ DECODE_OR(0x0fa00030, 0x06a00010),
+ /* SSAT16 cccc 0110 1010 xxxx xxxx xxxx 0011 xxxx */
+ /* USAT16 cccc 0110 1110 xxxx xxxx xxxx 0011 xxxx */
+ DECODE_EMULATEX (0x0fb000f0, 0x06a00030, emulate_rd12rn16rm0_rwflags_nopc,
+ REGS(0, NOPC, 0, 0, NOPC)),
+
+ /* REV cccc 0110 1011 xxxx xxxx xxxx 0011 xxxx */
+ /* REV16 cccc 0110 1011 xxxx xxxx xxxx 1011 xxxx */
+ /* RBIT cccc 0110 1111 xxxx xxxx xxxx 0011 xxxx */
+ /* REVSH cccc 0110 1111 xxxx xxxx xxxx 1011 xxxx */
+ DECODE_EMULATEX (0x0fb00070, 0x06b00030, emulate_rd12rm0_noflags_nopc,
+ REGS(0, NOPC, 0, 0, NOPC)),
+
+ /* ??? cccc 0110 0x00 xxxx xxxx xxxx xxx1 xxxx */
+ DECODE_REJECT (0x0fb00010, 0x06000010),
+ /* ??? cccc 0110 0xxx xxxx xxxx xxxx 1011 xxxx */
+ DECODE_REJECT (0x0f8000f0, 0x060000b0),
+ /* ??? cccc 0110 0xxx xxxx xxxx xxxx 1101 xxxx */
+ DECODE_REJECT (0x0f8000f0, 0x060000d0),
+ /* SADD16 cccc 0110 0001 xxxx xxxx xxxx 0001 xxxx */
+ /* SADDSUBX cccc 0110 0001 xxxx xxxx xxxx 0011 xxxx */
+ /* SSUBADDX cccc 0110 0001 xxxx xxxx xxxx 0101 xxxx */
+ /* SSUB16 cccc 0110 0001 xxxx xxxx xxxx 0111 xxxx */
+ /* SADD8 cccc 0110 0001 xxxx xxxx xxxx 1001 xxxx */
+ /* SSUB8 cccc 0110 0001 xxxx xxxx xxxx 1111 xxxx */
+ /* QADD16 cccc 0110 0010 xxxx xxxx xxxx 0001 xxxx */
+ /* QADDSUBX cccc 0110 0010 xxxx xxxx xxxx 0011 xxxx */
+ /* QSUBADDX cccc 0110 0010 xxxx xxxx xxxx 0101 xxxx */
+ /* QSUB16 cccc 0110 0010 xxxx xxxx xxxx 0111 xxxx */
+ /* QADD8 cccc 0110 0010 xxxx xxxx xxxx 1001 xxxx */
+ /* QSUB8 cccc 0110 0010 xxxx xxxx xxxx 1111 xxxx */
+ /* SHADD16 cccc 0110 0011 xxxx xxxx xxxx 0001 xxxx */
+ /* SHADDSUBX cccc 0110 0011 xxxx xxxx xxxx 0011 xxxx */
+ /* SHSUBADDX cccc 0110 0011 xxxx xxxx xxxx 0101 xxxx */
+ /* SHSUB16 cccc 0110 0011 xxxx xxxx xxxx 0111 xxxx */
+ /* SHADD8 cccc 0110 0011 xxxx xxxx xxxx 1001 xxxx */
+ /* SHSUB8 cccc 0110 0011 xxxx xxxx xxxx 1111 xxxx */
+ /* UADD16 cccc 0110 0101 xxxx xxxx xxxx 0001 xxxx */
+ /* UADDSUBX cccc 0110 0101 xxxx xxxx xxxx 0011 xxxx */
+ /* USUBADDX cccc 0110 0101 xxxx xxxx xxxx 0101 xxxx */
+ /* USUB16 cccc 0110 0101 xxxx xxxx xxxx 0111 xxxx */
+ /* UADD8 cccc 0110 0101 xxxx xxxx xxxx 1001 xxxx */
+ /* USUB8 cccc 0110 0101 xxxx xxxx xxxx 1111 xxxx */
+ /* UQADD16 cccc 0110 0110 xxxx xxxx xxxx 0001 xxxx */
+ /* UQADDSUBX cccc 0110 0110 xxxx xxxx xxxx 0011 xxxx */
+ /* UQSUBADDX cccc 0110 0110 xxxx xxxx xxxx 0101 xxxx */
+ /* UQSUB16 cccc 0110 0110 xxxx xxxx xxxx 0111 xxxx */
+ /* UQADD8 cccc 0110 0110 xxxx xxxx xxxx 1001 xxxx */
+ /* UQSUB8 cccc 0110 0110 xxxx xxxx xxxx 1111 xxxx */
+ /* UHADD16 cccc 0110 0111 xxxx xxxx xxxx 0001 xxxx */
+ /* UHADDSUBX cccc 0110 0111 xxxx xxxx xxxx 0011 xxxx */
+ /* UHSUBADDX cccc 0110 0111 xxxx xxxx xxxx 0101 xxxx */
+ /* UHSUB16 cccc 0110 0111 xxxx xxxx xxxx 0111 xxxx */
+ /* UHADD8 cccc 0110 0111 xxxx xxxx xxxx 1001 xxxx */
+ /* UHSUB8 cccc 0110 0111 xxxx xxxx xxxx 1111 xxxx */
+ DECODE_EMULATEX (0x0f800010, 0x06000010, emulate_rd12rn16rm0_rwflags_nopc,
+ REGS(NOPC, NOPC, 0, 0, NOPC)),
+
+ /* PKHBT cccc 0110 1000 xxxx xxxx xxxx x001 xxxx */
+ /* PKHTB cccc 0110 1000 xxxx xxxx xxxx x101 xxxx */
+ DECODE_EMULATEX (0x0ff00030, 0x06800010, emulate_rd12rn16rm0_rwflags_nopc,
+ REGS(NOPC, NOPC, 0, 0, NOPC)),
+
+ /* ??? cccc 0110 1001 xxxx xxxx xxxx 0111 xxxx */
+ /* ??? cccc 0110 1101 xxxx xxxx xxxx 0111 xxxx */
+ DECODE_REJECT (0x0fb000f0, 0x06900070),
+
+ /* SXTB16 cccc 0110 1000 1111 xxxx xxxx 0111 xxxx */
+ /* SXTB cccc 0110 1010 1111 xxxx xxxx 0111 xxxx */
+ /* SXTH cccc 0110 1011 1111 xxxx xxxx 0111 xxxx */
+ /* UXTB16 cccc 0110 1100 1111 xxxx xxxx 0111 xxxx */
+ /* UXTB cccc 0110 1110 1111 xxxx xxxx 0111 xxxx */
+ /* UXTH cccc 0110 1111 1111 xxxx xxxx 0111 xxxx */
+ DECODE_EMULATEX (0x0f8f00f0, 0x068f0070, emulate_rd12rm0_noflags_nopc,
+ REGS(0, NOPC, 0, 0, NOPC)),
+
+ /* SXTAB16 cccc 0110 1000 xxxx xxxx xxxx 0111 xxxx */
+ /* SXTAB cccc 0110 1010 xxxx xxxx xxxx 0111 xxxx */
+ /* SXTAH cccc 0110 1011 xxxx xxxx xxxx 0111 xxxx */
+ /* UXTAB16 cccc 0110 1100 xxxx xxxx xxxx 0111 xxxx */
+ /* UXTAB cccc 0110 1110 xxxx xxxx xxxx 0111 xxxx */
+ /* UXTAH cccc 0110 1111 xxxx xxxx xxxx 0111 xxxx */
+ DECODE_EMULATEX (0x0f8000f0, 0x06800070, emulate_rd12rn16rm0_rwflags_nopc,
+ REGS(NOPCX, NOPC, 0, 0, NOPC)),
+
+ DECODE_END
+};
+
+static const union decode_item arm_cccc_0111_____xxx1_table[] = {
+ /* Media instructions */
+
+ /* UNDEFINED cccc 0111 1111 xxxx xxxx xxxx 1111 xxxx */
+ DECODE_REJECT (0x0ff000f0, 0x07f000f0),
+
+ /* SMLALD cccc 0111 0100 xxxx xxxx xxxx 00x1 xxxx */
+ /* SMLSLD cccc 0111 0100 xxxx xxxx xxxx 01x1 xxxx */
+ DECODE_EMULATEX (0x0ff00090, 0x07400010, emulate_rdlo12rdhi16rn0rm8_rwflags_nopc,
+ REGS(NOPC, NOPC, NOPC, 0, NOPC)),
+
+ /* SMUAD cccc 0111 0000 xxxx 1111 xxxx 00x1 xxxx */
+ /* SMUSD cccc 0111 0000 xxxx 1111 xxxx 01x1 xxxx */
+ DECODE_OR (0x0ff0f090, 0x0700f010),
+ /* SMMUL cccc 0111 0101 xxxx 1111 xxxx 00x1 xxxx */
+ DECODE_OR (0x0ff0f0d0, 0x0750f010),
+ /* USAD8 cccc 0111 1000 xxxx 1111 xxxx 0001 xxxx */
+ DECODE_EMULATEX (0x0ff0f0f0, 0x0780f010, emulate_rd16rn12rm0rs8_rwflags_nopc,
+ REGS(NOPC, 0, NOPC, 0, NOPC)),
+
+ /* SMLAD cccc 0111 0000 xxxx xxxx xxxx 00x1 xxxx */
+ /* SMLSD cccc 0111 0000 xxxx xxxx xxxx 01x1 xxxx */
+ DECODE_OR (0x0ff00090, 0x07000010),
+ /* SMMLA cccc 0111 0101 xxxx xxxx xxxx 00x1 xxxx */
+ DECODE_OR (0x0ff000d0, 0x07500010),
+ /* USADA8 cccc 0111 1000 xxxx xxxx xxxx 0001 xxxx */
+ DECODE_EMULATEX (0x0ff000f0, 0x07800010, emulate_rd16rn12rm0rs8_rwflags_nopc,
+ REGS(NOPC, NOPCX, NOPC, 0, NOPC)),
+
+ /* SMMLS cccc 0111 0101 xxxx xxxx xxxx 11x1 xxxx */
+ DECODE_EMULATEX (0x0ff000d0, 0x075000d0, emulate_rd16rn12rm0rs8_rwflags_nopc,
+ REGS(NOPC, NOPC, NOPC, 0, NOPC)),
+
+ /* SBFX cccc 0111 101x xxxx xxxx xxxx x101 xxxx */
+ /* UBFX cccc 0111 111x xxxx xxxx xxxx x101 xxxx */
+ DECODE_EMULATEX (0x0fa00070, 0x07a00050, emulate_rd12rm0_noflags_nopc,
+ REGS(0, NOPC, 0, 0, NOPC)),
+
+ /* BFC cccc 0111 110x xxxx xxxx xxxx x001 1111 */
+ DECODE_EMULATEX (0x0fe0007f, 0x07c0001f, emulate_rd12rm0_noflags_nopc,
+ REGS(0, NOPC, 0, 0, 0)),
+
+ /* BFI cccc 0111 110x xxxx xxxx xxxx x001 xxxx */
+ DECODE_EMULATEX (0x0fe00070, 0x07c00010, emulate_rd12rm0_noflags_nopc,
+ REGS(0, NOPC, 0, 0, NOPCX)),
+
+ DECODE_END
+};
+
+static const union decode_item arm_cccc_01xx_table[] = {
+ /* Load/store word and unsigned byte */
+
+ /* LDRB/STRB pc,[...] cccc 01xx x0xx xxxx xxxx xxxx xxxx xxxx */
+ DECODE_REJECT (0x0c40f000, 0x0440f000),
+
+ /* STRT cccc 01x0 x010 xxxx xxxx xxxx xxxx xxxx */
+ /* LDRT cccc 01x0 x011 xxxx xxxx xxxx xxxx xxxx */
+ /* STRBT cccc 01x0 x110 xxxx xxxx xxxx xxxx xxxx */
+ /* LDRBT cccc 01x0 x111 xxxx xxxx xxxx xxxx xxxx */
+ DECODE_REJECT (0x0d200000, 0x04200000),
+
+ /* STR (immediate) cccc 010x x0x0 xxxx xxxx xxxx xxxx xxxx */
+ /* STRB (immediate) cccc 010x x1x0 xxxx xxxx xxxx xxxx xxxx */
+ DECODE_EMULATEX (0x0e100000, 0x04000000, emulate_str,
+ REGS(NOPCWB, ANY, 0, 0, 0)),
+
+ /* LDR (immediate) cccc 010x x0x1 xxxx xxxx xxxx xxxx xxxx */
+ /* LDRB (immediate) cccc 010x x1x1 xxxx xxxx xxxx xxxx xxxx */
+ DECODE_EMULATEX (0x0e100000, 0x04100000, emulate_ldr,
+ REGS(NOPCWB, ANY, 0, 0, 0)),
+
+ /* STR (register) cccc 011x x0x0 xxxx xxxx xxxx xxxx xxxx */
+ /* STRB (register) cccc 011x x1x0 xxxx xxxx xxxx xxxx xxxx */
+ DECODE_EMULATEX (0x0e100000, 0x06000000, emulate_str,
+ REGS(NOPCWB, ANY, 0, 0, NOPC)),
+
+ /* LDR (register) cccc 011x x0x1 xxxx xxxx xxxx xxxx xxxx */
+ /* LDRB (register) cccc 011x x1x1 xxxx xxxx xxxx xxxx xxxx */
+ DECODE_EMULATEX (0x0e100000, 0x06100000, emulate_ldr,
+ REGS(NOPCWB, ANY, 0, 0, NOPC)),
+
+ DECODE_END
+};
+
+static const union decode_item arm_cccc_100x_table[] = {
+ /* Block data transfer instructions */
+
+ /* LDM cccc 100x x0x1 xxxx xxxx xxxx xxxx xxxx */
+ /* STM cccc 100x x0x0 xxxx xxxx xxxx xxxx xxxx */
+ DECODE_CUSTOM (0x0e400000, 0x08000000, kprobe_decode_ldmstm),
+
+ /* STM (user registers) cccc 100x x1x0 xxxx xxxx xxxx xxxx xxxx */
+ /* LDM (user registers) cccc 100x x1x1 xxxx 0xxx xxxx xxxx xxxx */
+ /* LDM (exception ret) cccc 100x x1x1 xxxx 1xxx xxxx xxxx xxxx */
+ DECODE_END
+};
+
+const union decode_item kprobe_decode_arm_table[] = {
+ /*
+ * Unconditional instructions
+ * 1111 xxxx xxxx xxxx xxxx xxxx xxxx xxxx
+ */
+ DECODE_TABLE (0xf0000000, 0xf0000000, arm_1111_table),
+
+ /*
+ * Miscellaneous instructions
+ * cccc 0001 0xx0 xxxx xxxx xxxx 0xxx xxxx
+ */
+ DECODE_TABLE (0x0f900080, 0x01000000, arm_cccc_0001_0xx0____0xxx_table),
+
+ /*
+ * Halfword multiply and multiply-accumulate
+ * cccc 0001 0xx0 xxxx xxxx xxxx 1xx0 xxxx
+ */
+ DECODE_TABLE (0x0f900090, 0x01000080, arm_cccc_0001_0xx0____1xx0_table),
+
+ /*
+ * Multiply and multiply-accumulate
+ * cccc 0000 xxxx xxxx xxxx xxxx 1001 xxxx
+ */
+ DECODE_TABLE (0x0f0000f0, 0x00000090, arm_cccc_0000_____1001_table),
+
+ /*
+ * Synchronization primitives
+ * cccc 0001 xxxx xxxx xxxx xxxx 1001 xxxx
+ */
+ DECODE_TABLE (0x0f0000f0, 0x01000090, arm_cccc_0001_____1001_table),
+
+ /*
+ * Extra load/store instructions
+ * cccc 000x xxxx xxxx xxxx xxxx 1xx1 xxxx
+ */
+ DECODE_TABLE (0x0e000090, 0x00000090, arm_cccc_000x_____1xx1_table),
+
+ /*
+ * Data-processing (register)
+ * cccc 000x xxxx xxxx xxxx xxxx xxx0 xxxx
+ * Data-processing (register-shifted register)
+ * cccc 000x xxxx xxxx xxxx xxxx 0xx1 xxxx
+ */
+ DECODE_TABLE (0x0e000000, 0x00000000, arm_cccc_000x_table),
+
+ /*
+ * Data-processing (immediate)
+ * cccc 001x xxxx xxxx xxxx xxxx xxxx xxxx
+ */
+ DECODE_TABLE (0x0e000000, 0x02000000, arm_cccc_001x_table),
+
+ /*
+ * Media instructions
+ * cccc 011x xxxx xxxx xxxx xxxx xxx1 xxxx
+ */
+ DECODE_TABLE (0x0f000010, 0x06000010, arm_cccc_0110_____xxx1_table),
+ DECODE_TABLE (0x0f000010, 0x07000010, arm_cccc_0111_____xxx1_table),
+
+ /*
+ * Load/store word and unsigned byte
+ * cccc 01xx xxxx xxxx xxxx xxxx xxxx xxxx
+ */
+ DECODE_TABLE (0x0c000000, 0x04000000, arm_cccc_01xx_table),
+
+ /*
+ * Block data transfer instructions
+ * cccc 100x xxxx xxxx xxxx xxxx xxxx xxxx
+ */
+ DECODE_TABLE (0x0e000000, 0x08000000, arm_cccc_100x_table),
+
+ /* B cccc 1010 xxxx xxxx xxxx xxxx xxxx xxxx */
+ /* BL cccc 1011 xxxx xxxx xxxx xxxx xxxx xxxx */
+ DECODE_SIMULATE (0x0e000000, 0x0a000000, simulate_bbl),
+
+ /*
+ * Supervisor Call, and coprocessor instructions
+ */
+
+ /* MCRR cccc 1100 0100 xxxx xxxx xxxx xxxx xxxx */
+ /* MRRC cccc 1100 0101 xxxx xxxx xxxx xxxx xxxx */
+ /* LDC cccc 110x xxx1 xxxx xxxx xxxx xxxx xxxx */
+ /* STC cccc 110x xxx0 xxxx xxxx xxxx xxxx xxxx */
+ /* CDP cccc 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */
+ /* MCR cccc 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */
+ /* MRC cccc 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */
+ /* SVC cccc 1111 xxxx xxxx xxxx xxxx xxxx xxxx */
+ DECODE_REJECT (0x0c000000, 0x0c000000),
+
+ DECODE_END
+};
+
+static void __kprobes arm_singlestep(struct kprobe *p, struct pt_regs *regs)
+{
+ regs->ARM_pc += 4;
+ p->ainsn.insn_handler(p, regs);
+}
+
+/* Return:
+ * INSN_REJECTED If instruction is one not allowed to kprobe,
+ * INSN_GOOD If instruction is supported and uses instruction slot,
+ * INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot.
+ *
+ * For instructions we don't want to kprobe (INSN_REJECTED return result):
+ * These are generally ones that modify the processor state making
+ * them "hard" to simulate such as switches processor modes or
+ * make accesses in alternate modes. Any of these could be simulated
+ * if the work was put into it, but low return considering they
+ * should also be very rare.
+ */
+enum kprobe_insn __kprobes
+arm_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+{
+ asi->insn_singlestep = arm_singlestep;
+ asi->insn_check_cc = kprobe_condition_checks[insn>>28];
+ return kprobe_decode_insn(insn, asi, kprobe_decode_arm_table, false);
+}
diff --git a/arch/arm/kernel/kprobes-common.c b/arch/arm/kernel/kprobes-common.c
new file mode 100644
index 000000000000..a5394fb4e4e0
--- /dev/null
+++ b/arch/arm/kernel/kprobes-common.c
@@ -0,0 +1,577 @@
+/*
+ * arch/arm/kernel/kprobes-common.c
+ *
+ * Copyright (C) 2011 Jon Medhurst <tixy@yxit.co.uk>.
+ *
+ * Some contents moved here from arch/arm/include/asm/kprobes-arm.c which is
+ * Copyright (C) 2006, 2007 Motorola Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+
+#include "kprobes.h"
+
+
+#ifndef find_str_pc_offset
+
+/*
+ * For STR and STM instructions, an ARM core may choose to use either
+ * a +8 or a +12 displacement from the current instruction's address.
+ * Whichever value is chosen for a given core, it must be the same for
+ * both instructions and may not change. This function measures it.
+ */
+
+int str_pc_offset;
+
+void __init find_str_pc_offset(void)
+{
+ int addr, scratch, ret;
+
+ __asm__ (
+ "sub %[ret], pc, #4 \n\t"
+ "str pc, %[addr] \n\t"
+ "ldr %[scr], %[addr] \n\t"
+ "sub %[ret], %[scr], %[ret] \n\t"
+ : [ret] "=r" (ret), [scr] "=r" (scratch), [addr] "+m" (addr));
+
+ str_pc_offset = ret;
+}
+
+#endif /* !find_str_pc_offset */
+
+
+#ifndef test_load_write_pc_interworking
+
+bool load_write_pc_interworks;
+
+void __init test_load_write_pc_interworking(void)
+{
+ int arch = cpu_architecture();
+ BUG_ON(arch == CPU_ARCH_UNKNOWN);
+ load_write_pc_interworks = arch >= CPU_ARCH_ARMv5T;
+}
+
+#endif /* !test_load_write_pc_interworking */
+
+
+#ifndef test_alu_write_pc_interworking
+
+bool alu_write_pc_interworks;
+
+void __init test_alu_write_pc_interworking(void)
+{
+ int arch = cpu_architecture();
+ BUG_ON(arch == CPU_ARCH_UNKNOWN);
+ alu_write_pc_interworks = arch >= CPU_ARCH_ARMv7;
+}
+
+#endif /* !test_alu_write_pc_interworking */
+
+
+void __init arm_kprobe_decode_init(void)
+{
+ find_str_pc_offset();
+ test_load_write_pc_interworking();
+ test_alu_write_pc_interworking();
+}
+
+
+static unsigned long __kprobes __check_eq(unsigned long cpsr)
+{
+ return cpsr & PSR_Z_BIT;
+}
+
+static unsigned long __kprobes __check_ne(unsigned long cpsr)
+{
+ return (~cpsr) & PSR_Z_BIT;
+}
+
+static unsigned long __kprobes __check_cs(unsigned long cpsr)
+{
+ return cpsr & PSR_C_BIT;
+}
+
+static unsigned long __kprobes __check_cc(unsigned long cpsr)
+{
+ return (~cpsr) & PSR_C_BIT;
+}
+
+static unsigned long __kprobes __check_mi(unsigned long cpsr)
+{
+ return cpsr & PSR_N_BIT;
+}
+
+static unsigned long __kprobes __check_pl(unsigned long cpsr)
+{
+ return (~cpsr) & PSR_N_BIT;
+}
+
+static unsigned long __kprobes __check_vs(unsigned long cpsr)
+{
+ return cpsr & PSR_V_BIT;
+}
+
+static unsigned long __kprobes __check_vc(unsigned long cpsr)
+{
+ return (~cpsr) & PSR_V_BIT;
+}
+
+static unsigned long __kprobes __check_hi(unsigned long cpsr)
+{
+ cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
+ return cpsr & PSR_C_BIT;
+}
+
+static unsigned long __kprobes __check_ls(unsigned long cpsr)
+{
+ cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
+ return (~cpsr) & PSR_C_BIT;
+}
+
+static unsigned long __kprobes __check_ge(unsigned long cpsr)
+{
+ cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
+ return (~cpsr) & PSR_N_BIT;
+}
+
+static unsigned long __kprobes __check_lt(unsigned long cpsr)
+{
+ cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
+ return cpsr & PSR_N_BIT;
+}
+
+static unsigned long __kprobes __check_gt(unsigned long cpsr)
+{
+ unsigned long temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
+ temp |= (cpsr << 1); /* PSR_N_BIT |= PSR_Z_BIT */
+ return (~temp) & PSR_N_BIT;
+}
+
+static unsigned long __kprobes __check_le(unsigned long cpsr)
+{
+ unsigned long temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
+ temp |= (cpsr << 1); /* PSR_N_BIT |= PSR_Z_BIT */
+ return temp & PSR_N_BIT;
+}
+
+static unsigned long __kprobes __check_al(unsigned long cpsr)
+{
+ return true;
+}
+
+kprobe_check_cc * const kprobe_condition_checks[16] = {
+ &__check_eq, &__check_ne, &__check_cs, &__check_cc,
+ &__check_mi, &__check_pl, &__check_vs, &__check_vc,
+ &__check_hi, &__check_ls, &__check_ge, &__check_lt,
+ &__check_gt, &__check_le, &__check_al, &__check_al
+};
+
+
+void __kprobes kprobe_simulate_nop(struct kprobe *p, struct pt_regs *regs)
+{
+}
+
+void __kprobes kprobe_emulate_none(struct kprobe *p, struct pt_regs *regs)
+{
+ p->ainsn.insn_fn();
+}
+
+static void __kprobes simulate_ldm1stm1(struct kprobe *p, struct pt_regs *regs)
+{
+ kprobe_opcode_t insn = p->opcode;
+ int rn = (insn >> 16) & 0xf;
+ int lbit = insn & (1 << 20);
+ int wbit = insn & (1 << 21);
+ int ubit = insn & (1 << 23);
+ int pbit = insn & (1 << 24);
+ long *addr = (long *)regs->uregs[rn];
+ int reg_bit_vector;
+ int reg_count;
+
+ reg_count = 0;
+ reg_bit_vector = insn & 0xffff;
+ while (reg_bit_vector) {
+ reg_bit_vector &= (reg_bit_vector - 1);
+ ++reg_count;
+ }
+
+ if (!ubit)
+ addr -= reg_count;
+ addr += (!pbit == !ubit);
+
+ reg_bit_vector = insn & 0xffff;
+ while (reg_bit_vector) {
+ int reg = __ffs(reg_bit_vector);
+ reg_bit_vector &= (reg_bit_vector - 1);
+ if (lbit)
+ regs->uregs[reg] = *addr++;
+ else
+ *addr++ = regs->uregs[reg];
+ }
+
+ if (wbit) {
+ if (!ubit)
+ addr -= reg_count;
+ addr -= (!pbit == !ubit);
+ regs->uregs[rn] = (long)addr;
+ }
+}
+
+static void __kprobes simulate_stm1_pc(struct kprobe *p, struct pt_regs *regs)
+{
+ regs->ARM_pc = (long)p->addr + str_pc_offset;
+ simulate_ldm1stm1(p, regs);
+ regs->ARM_pc = (long)p->addr + 4;
+}
+
+static void __kprobes simulate_ldm1_pc(struct kprobe *p, struct pt_regs *regs)
+{
+ simulate_ldm1stm1(p, regs);
+ load_write_pc(regs->ARM_pc, regs);
+}
+
+static void __kprobes
+emulate_generic_r0_12_noflags(struct kprobe *p, struct pt_regs *regs)
+{
+ register void *rregs asm("r1") = regs;
+ register void *rfn asm("lr") = p->ainsn.insn_fn;
+
+ __asm__ __volatile__ (
+ "stmdb sp!, {%[regs], r11} \n\t"
+ "ldmia %[regs], {r0-r12} \n\t"
+#if __LINUX_ARM_ARCH__ >= 6
+ "blx %[fn] \n\t"
+#else
+ "str %[fn], [sp, #-4]! \n\t"
+ "adr lr, 1f \n\t"
+ "ldr pc, [sp], #4 \n\t"
+ "1: \n\t"
+#endif
+ "ldr lr, [sp], #4 \n\t" /* lr = regs */
+ "stmia lr, {r0-r12} \n\t"
+ "ldr r11, [sp], #4 \n\t"
+ : [regs] "=r" (rregs), [fn] "=r" (rfn)
+ : "0" (rregs), "1" (rfn)
+ : "r0", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r12", "memory", "cc"
+ );
+}
+
+static void __kprobes
+emulate_generic_r2_14_noflags(struct kprobe *p, struct pt_regs *regs)
+{
+ emulate_generic_r0_12_noflags(p, (struct pt_regs *)(regs->uregs+2));
+}
+
+static void __kprobes
+emulate_ldm_r3_15(struct kprobe *p, struct pt_regs *regs)
+{
+ emulate_generic_r0_12_noflags(p, (struct pt_regs *)(regs->uregs+3));
+ load_write_pc(regs->ARM_pc, regs);
+}
+
+enum kprobe_insn __kprobes
+kprobe_decode_ldmstm(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+{
+ kprobe_insn_handler_t *handler = 0;
+ unsigned reglist = insn & 0xffff;
+ int is_ldm = insn & 0x100000;
+ int rn = (insn >> 16) & 0xf;
+
+ if (rn <= 12 && (reglist & 0xe000) == 0) {
+ /* Instruction only uses registers in the range R0..R12 */
+ handler = emulate_generic_r0_12_noflags;
+
+ } else if (rn >= 2 && (reglist & 0x8003) == 0) {
+ /* Instruction only uses registers in the range R2..R14 */
+ rn -= 2;
+ reglist >>= 2;
+ handler = emulate_generic_r2_14_noflags;
+
+ } else if (rn >= 3 && (reglist & 0x0007) == 0) {
+ /* Instruction only uses registers in the range R3..R15 */
+ if (is_ldm && (reglist & 0x8000)) {
+ rn -= 3;
+ reglist >>= 3;
+ handler = emulate_ldm_r3_15;
+ }
+ }
+
+ if (handler) {
+ /* We can emulate the instruction in (possibly) modified form */
+ asi->insn[0] = (insn & 0xfff00000) | (rn << 16) | reglist;
+ asi->insn_handler = handler;
+ return INSN_GOOD;
+ }
+
+ /* Fallback to slower simulation... */
+ if (reglist & 0x8000)
+ handler = is_ldm ? simulate_ldm1_pc : simulate_stm1_pc;
+ else
+ handler = simulate_ldm1stm1;
+ asi->insn_handler = handler;
+ return INSN_GOOD_NO_SLOT;
+}
+
+
+/*
+ * Prepare an instruction slot to receive an instruction for emulating.
+ * This is done by placing a subroutine return after the location where the
+ * instruction will be placed. We also modify ARM instructions to be
+ * unconditional as the condition code will already be checked before any
+ * emulation handler is called.
+ */
+static kprobe_opcode_t __kprobes
+prepare_emulated_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi,
+ bool thumb)
+{
+#ifdef CONFIG_THUMB2_KERNEL
+ if (thumb) {
+ u16 *thumb_insn = (u16 *)asi->insn;
+ thumb_insn[1] = 0x4770; /* Thumb bx lr */
+ thumb_insn[2] = 0x4770; /* Thumb bx lr */
+ return insn;
+ }
+ asi->insn[1] = 0xe12fff1e; /* ARM bx lr */
+#else
+ asi->insn[1] = 0xe1a0f00e; /* mov pc, lr */
+#endif
+ /* Make an ARM instruction unconditional */
+ if (insn < 0xe0000000)
+ insn = (insn | 0xe0000000) & ~0x10000000;
+ return insn;
+}
+
+/*
+ * Write a (probably modified) instruction into the slot previously prepared by
+ * prepare_emulated_insn
+ */
+static void __kprobes
+set_emulated_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi,
+ bool thumb)
+{
+#ifdef CONFIG_THUMB2_KERNEL
+ if (thumb) {
+ u16 *ip = (u16 *)asi->insn;
+ if (is_wide_instruction(insn))
+ *ip++ = insn >> 16;
+ *ip++ = insn;
+ return;
+ }
+#endif
+ asi->insn[0] = insn;
+}
+
+/*
+ * When we modify the register numbers encoded in an instruction to be emulated,
+ * the new values come from this define. For ARM and 32-bit Thumb instructions
+ * this gives...
+ *
+ * bit position 16 12 8 4 0
+ * ---------------+---+---+---+---+---+
+ * register r2 r0 r1 -- r3
+ */
+#define INSN_NEW_BITS 0x00020103
+
+/* Each nibble has same value as that at INSN_NEW_BITS bit 16 */
+#define INSN_SAMEAS16_BITS 0x22222222
+
+/*
+ * Validate and modify each of the registers encoded in an instruction.
+ *
+ * Each nibble in regs contains a value from enum decode_reg_type. For each
+ * non-zero value, the corresponding nibble in pinsn is validated and modified
+ * according to the type.
+ */
+static bool __kprobes decode_regs(kprobe_opcode_t* pinsn, u32 regs)
+{
+ kprobe_opcode_t insn = *pinsn;
+ kprobe_opcode_t mask = 0xf; /* Start at least significant nibble */
+
+ for (; regs != 0; regs >>= 4, mask <<= 4) {
+
+ kprobe_opcode_t new_bits = INSN_NEW_BITS;
+
+ switch (regs & 0xf) {
+
+ case REG_TYPE_NONE:
+ /* Nibble not a register, skip to next */
+ continue;
+
+ case REG_TYPE_ANY:
+ /* Any register is allowed */
+ break;
+
+ case REG_TYPE_SAMEAS16:
+ /* Replace register with same as at bit position 16 */
+ new_bits = INSN_SAMEAS16_BITS;
+ break;
+
+ case REG_TYPE_SP:
+ /* Only allow SP (R13) */
+ if ((insn ^ 0xdddddddd) & mask)
+ goto reject;
+ break;
+
+ case REG_TYPE_PC:
+ /* Only allow PC (R15) */
+ if ((insn ^ 0xffffffff) & mask)
+ goto reject;
+ break;
+
+ case REG_TYPE_NOSP:
+ /* Reject SP (R13) */
+ if (((insn ^ 0xdddddddd) & mask) == 0)
+ goto reject;
+ break;
+
+ case REG_TYPE_NOSPPC:
+ case REG_TYPE_NOSPPCX:
+ /* Reject SP and PC (R13 and R15) */
+ if (((insn ^ 0xdddddddd) & 0xdddddddd & mask) == 0)
+ goto reject;
+ break;
+
+ case REG_TYPE_NOPCWB:
+ if (!is_writeback(insn))
+ break; /* No writeback, so any register is OK */
+ /* fall through... */
+ case REG_TYPE_NOPC:
+ case REG_TYPE_NOPCX:
+ /* Reject PC (R15) */
+ if (((insn ^ 0xffffffff) & mask) == 0)
+ goto reject;
+ break;
+ }
+
+ /* Replace value of nibble with new register number... */
+ insn &= ~mask;
+ insn |= new_bits & mask;
+ }
+
+ *pinsn = insn;
+ return true;
+
+reject:
+ return false;
+}
+
+static const int decode_struct_sizes[NUM_DECODE_TYPES] = {
+ [DECODE_TYPE_TABLE] = sizeof(struct decode_table),
+ [DECODE_TYPE_CUSTOM] = sizeof(struct decode_custom),
+ [DECODE_TYPE_SIMULATE] = sizeof(struct decode_simulate),
+ [DECODE_TYPE_EMULATE] = sizeof(struct decode_emulate),
+ [DECODE_TYPE_OR] = sizeof(struct decode_or),
+ [DECODE_TYPE_REJECT] = sizeof(struct decode_reject)
+};
+
+/*
+ * kprobe_decode_insn operates on data tables in order to decode an ARM
+ * architecture instruction onto which a kprobe has been placed.
+ *
+ * These instruction decoding tables are a concatenation of entries each
+ * of which consist of one of the following structs:
+ *
+ * decode_table
+ * decode_custom
+ * decode_simulate
+ * decode_emulate
+ * decode_or
+ * decode_reject
+ *
+ * Each of these starts with a struct decode_header which has the following
+ * fields:
+ *
+ * type_regs
+ * mask
+ * value
+ *
+ * The least significant DECODE_TYPE_BITS of type_regs contains a value
+ * from enum decode_type, this indicates which of the decode_* structs
+ * the entry contains. The value DECODE_TYPE_END indicates the end of the
+ * table.
+ *
+ * When the table is parsed, each entry is checked in turn to see if it
+ * matches the instruction to be decoded using the test:
+ *
+ * (insn & mask) == value
+ *
+ * If no match is found before the end of the table is reached then decoding
+ * fails with INSN_REJECTED.
+ *
+ * When a match is found, decode_regs() is called to validate and modify each
+ * of the registers encoded in the instruction; the data it uses to do this
+ * is (type_regs >> DECODE_TYPE_BITS). A validation failure will cause decoding
+ * to fail with INSN_REJECTED.
+ *
+ * Once the instruction has passed the above tests, further processing
+ * depends on the type of the table entry's decode struct.
+ *
+ */
+int __kprobes
+kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi,
+ const union decode_item *table, bool thumb)
+{
+ const struct decode_header *h = (struct decode_header *)table;
+ const struct decode_header *next;
+ bool matched = false;
+
+ insn = prepare_emulated_insn(insn, asi, thumb);
+
+ for (;; h = next) {
+ enum decode_type type = h->type_regs.bits & DECODE_TYPE_MASK;
+ u32 regs = h->type_regs.bits >> DECODE_TYPE_BITS;
+
+ if (type == DECODE_TYPE_END)
+ return INSN_REJECTED;
+
+ next = (struct decode_header *)
+ ((uintptr_t)h + decode_struct_sizes[type]);
+
+ if (!matched && (insn & h->mask.bits) != h->value.bits)
+ continue;
+
+ if (!decode_regs(&insn, regs))
+ return INSN_REJECTED;
+
+ switch (type) {
+
+ case DECODE_TYPE_TABLE: {
+ struct decode_table *d = (struct decode_table *)h;
+ next = (struct decode_header *)d->table.table;
+ break;
+ }
+
+ case DECODE_TYPE_CUSTOM: {
+ struct decode_custom *d = (struct decode_custom *)h;
+ return (*d->decoder.decoder)(insn, asi);
+ }
+
+ case DECODE_TYPE_SIMULATE: {
+ struct decode_simulate *d = (struct decode_simulate *)h;
+ asi->insn_handler = d->handler.handler;
+ return INSN_GOOD_NO_SLOT;
+ }
+
+ case DECODE_TYPE_EMULATE: {
+ struct decode_emulate *d = (struct decode_emulate *)h;
+ asi->insn_handler = d->handler.handler;
+ set_emulated_insn(insn, asi, thumb);
+ return INSN_GOOD;
+ }
+
+ case DECODE_TYPE_OR:
+ matched = true;
+ break;
+
+ case DECODE_TYPE_REJECT:
+ default:
+ return INSN_REJECTED;
+ }
+ }
+ }
diff --git a/arch/arm/kernel/kprobes-decode.c b/arch/arm/kernel/kprobes-decode.c
deleted file mode 100644
index 15eeff6aea0e..000000000000
--- a/arch/arm/kernel/kprobes-decode.c
+++ /dev/null
@@ -1,1670 +0,0 @@
-/*
- * arch/arm/kernel/kprobes-decode.c
- *
- * Copyright (C) 2006, 2007 Motorola Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-
-/*
- * We do not have hardware single-stepping on ARM, This
- * effort is further complicated by the ARM not having a
- * "next PC" register. Instructions that change the PC
- * can't be safely single-stepped in a MP environment, so
- * we have a lot of work to do:
- *
- * In the prepare phase:
- * *) If it is an instruction that does anything
- * with the CPU mode, we reject it for a kprobe.
- * (This is out of laziness rather than need. The
- * instructions could be simulated.)
- *
- * *) Otherwise, decode the instruction rewriting its
- * registers to take fixed, ordered registers and
- * setting a handler for it to run the instruction.
- *
- * In the execution phase by an instruction's handler:
- *
- * *) If the PC is written to by the instruction, the
- * instruction must be fully simulated in software.
- *
- * *) Otherwise, a modified form of the instruction is
- * directly executed. Its handler calls the
- * instruction in insn[0]. In insn[1] is a
- * "mov pc, lr" to return.
- *
- * Before calling, load up the reordered registers
- * from the original instruction's registers. If one
- * of the original input registers is the PC, compute
- * and adjust the appropriate input register.
- *
- * After call completes, copy the output registers to
- * the original instruction's original registers.
- *
- * We don't use a real breakpoint instruction since that
- * would have us in the kernel go from SVC mode to SVC
- * mode losing the link register. Instead we use an
- * undefined instruction. To simplify processing, the
- * undefined instruction used for kprobes must be reserved
- * exclusively for kprobes use.
- *
- * TODO: ifdef out some instruction decoding based on architecture.
- */
-
-#include <linux/kernel.h>
-#include <linux/kprobes.h>
-
-#define sign_extend(x, signbit) ((x) | (0 - ((x) & (1 << (signbit)))))
-
-#define branch_displacement(insn) sign_extend(((insn) & 0xffffff) << 2, 25)
-
-#define is_r15(insn, bitpos) (((insn) & (0xf << bitpos)) == (0xf << bitpos))
-
-/*
- * Test if load/store instructions writeback the address register.
- * if P (bit 24) == 0 or W (bit 21) == 1
- */
-#define is_writeback(insn) ((insn ^ 0x01000000) & 0x01200000)
-
-#define PSR_fs (PSR_f|PSR_s)
-
-#define KPROBE_RETURN_INSTRUCTION 0xe1a0f00e /* mov pc, lr */
-
-typedef long (insn_0arg_fn_t)(void);
-typedef long (insn_1arg_fn_t)(long);
-typedef long (insn_2arg_fn_t)(long, long);
-typedef long (insn_3arg_fn_t)(long, long, long);
-typedef long (insn_4arg_fn_t)(long, long, long, long);
-typedef long long (insn_llret_0arg_fn_t)(void);
-typedef long long (insn_llret_3arg_fn_t)(long, long, long);
-typedef long long (insn_llret_4arg_fn_t)(long, long, long, long);
-
-union reg_pair {
- long long dr;
-#ifdef __LITTLE_ENDIAN
- struct { long r0, r1; };
-#else
- struct { long r1, r0; };
-#endif
-};
-
-/*
- * For STR and STM instructions, an ARM core may choose to use either
- * a +8 or a +12 displacement from the current instruction's address.
- * Whichever value is chosen for a given core, it must be the same for
- * both instructions and may not change. This function measures it.
- */
-
-static int str_pc_offset;
-
-static void __init find_str_pc_offset(void)
-{
- int addr, scratch, ret;
-
- __asm__ (
- "sub %[ret], pc, #4 \n\t"
- "str pc, %[addr] \n\t"
- "ldr %[scr], %[addr] \n\t"
- "sub %[ret], %[scr], %[ret] \n\t"
- : [ret] "=r" (ret), [scr] "=r" (scratch), [addr] "+m" (addr));
-
- str_pc_offset = ret;
-}
-
-/*
- * The insnslot_?arg_r[w]flags() functions below are to keep the
- * msr -> *fn -> mrs instruction sequences indivisible so that
- * the state of the CPSR flags aren't inadvertently modified
- * just before or just after the call.
- */
-
-static inline long __kprobes
-insnslot_0arg_rflags(long cpsr, insn_0arg_fn_t *fn)
-{
- register long ret asm("r0");
-
- __asm__ __volatile__ (
- "msr cpsr_fs, %[cpsr] \n\t"
- "mov lr, pc \n\t"
- "mov pc, %[fn] \n\t"
- : "=r" (ret)
- : [cpsr] "r" (cpsr), [fn] "r" (fn)
- : "lr", "cc"
- );
- return ret;
-}
-
-static inline long long __kprobes
-insnslot_llret_0arg_rflags(long cpsr, insn_llret_0arg_fn_t *fn)
-{
- register long ret0 asm("r0");
- register long ret1 asm("r1");
- union reg_pair fnr;
-
- __asm__ __volatile__ (
- "msr cpsr_fs, %[cpsr] \n\t"
- "mov lr, pc \n\t"
- "mov pc, %[fn] \n\t"
- : "=r" (ret0), "=r" (ret1)
- : [cpsr] "r" (cpsr), [fn] "r" (fn)
- : "lr", "cc"
- );
- fnr.r0 = ret0;
- fnr.r1 = ret1;
- return fnr.dr;
-}
-
-static inline long __kprobes
-insnslot_1arg_rflags(long r0, long cpsr, insn_1arg_fn_t *fn)
-{
- register long rr0 asm("r0") = r0;
- register long ret asm("r0");
-
- __asm__ __volatile__ (
- "msr cpsr_fs, %[cpsr] \n\t"
- "mov lr, pc \n\t"
- "mov pc, %[fn] \n\t"
- : "=r" (ret)
- : "0" (rr0), [cpsr] "r" (cpsr), [fn] "r" (fn)
- : "lr", "cc"
- );
- return ret;
-}
-
-static inline long __kprobes
-insnslot_2arg_rflags(long r0, long r1, long cpsr, insn_2arg_fn_t *fn)
-{
- register long rr0 asm("r0") = r0;
- register long rr1 asm("r1") = r1;
- register long ret asm("r0");
-
- __asm__ __volatile__ (
- "msr cpsr_fs, %[cpsr] \n\t"
- "mov lr, pc \n\t"
- "mov pc, %[fn] \n\t"
- : "=r" (ret)
- : "0" (rr0), "r" (rr1),
- [cpsr] "r" (cpsr), [fn] "r" (fn)
- : "lr", "cc"
- );
- return ret;
-}
-
-static inline long __kprobes
-insnslot_3arg_rflags(long r0, long r1, long r2, long cpsr, insn_3arg_fn_t *fn)
-{
- register long rr0 asm("r0") = r0;
- register long rr1 asm("r1") = r1;
- register long rr2 asm("r2") = r2;
- register long ret asm("r0");
-
- __asm__ __volatile__ (
- "msr cpsr_fs, %[cpsr] \n\t"
- "mov lr, pc \n\t"
- "mov pc, %[fn] \n\t"
- : "=r" (ret)
- : "0" (rr0), "r" (rr1), "r" (rr2),
- [cpsr] "r" (cpsr), [fn] "r" (fn)
- : "lr", "cc"
- );
- return ret;
-}
-
-static inline long long __kprobes
-insnslot_llret_3arg_rflags(long r0, long r1, long r2, long cpsr,
- insn_llret_3arg_fn_t *fn)
-{
- register long rr0 asm("r0") = r0;
- register long rr1 asm("r1") = r1;
- register long rr2 asm("r2") = r2;
- register long ret0 asm("r0");
- register long ret1 asm("r1");
- union reg_pair fnr;
-
- __asm__ __volatile__ (
- "msr cpsr_fs, %[cpsr] \n\t"
- "mov lr, pc \n\t"
- "mov pc, %[fn] \n\t"
- : "=r" (ret0), "=r" (ret1)
- : "0" (rr0), "r" (rr1), "r" (rr2),
- [cpsr] "r" (cpsr), [fn] "r" (fn)
- : "lr", "cc"
- );
- fnr.r0 = ret0;
- fnr.r1 = ret1;
- return fnr.dr;
-}
-
-static inline long __kprobes
-insnslot_4arg_rflags(long r0, long r1, long r2, long r3, long cpsr,
- insn_4arg_fn_t *fn)
-{
- register long rr0 asm("r0") = r0;
- register long rr1 asm("r1") = r1;
- register long rr2 asm("r2") = r2;
- register long rr3 asm("r3") = r3;
- register long ret asm("r0");
-
- __asm__ __volatile__ (
- "msr cpsr_fs, %[cpsr] \n\t"
- "mov lr, pc \n\t"
- "mov pc, %[fn] \n\t"
- : "=r" (ret)
- : "0" (rr0), "r" (rr1), "r" (rr2), "r" (rr3),
- [cpsr] "r" (cpsr), [fn] "r" (fn)
- : "lr", "cc"
- );
- return ret;
-}
-
-static inline long __kprobes
-insnslot_1arg_rwflags(long r0, long *cpsr, insn_1arg_fn_t *fn)
-{
- register long rr0 asm("r0") = r0;
- register long ret asm("r0");
- long oldcpsr = *cpsr;
- long newcpsr;
-
- __asm__ __volatile__ (
- "msr cpsr_fs, %[oldcpsr] \n\t"
- "mov lr, pc \n\t"
- "mov pc, %[fn] \n\t"
- "mrs %[newcpsr], cpsr \n\t"
- : "=r" (ret), [newcpsr] "=r" (newcpsr)
- : "0" (rr0), [oldcpsr] "r" (oldcpsr), [fn] "r" (fn)
- : "lr", "cc"
- );
- *cpsr = (oldcpsr & ~PSR_fs) | (newcpsr & PSR_fs);
- return ret;
-}
-
-static inline long __kprobes
-insnslot_2arg_rwflags(long r0, long r1, long *cpsr, insn_2arg_fn_t *fn)
-{
- register long rr0 asm("r0") = r0;
- register long rr1 asm("r1") = r1;
- register long ret asm("r0");
- long oldcpsr = *cpsr;
- long newcpsr;
-
- __asm__ __volatile__ (
- "msr cpsr_fs, %[oldcpsr] \n\t"
- "mov lr, pc \n\t"
- "mov pc, %[fn] \n\t"
- "mrs %[newcpsr], cpsr \n\t"
- : "=r" (ret), [newcpsr] "=r" (newcpsr)
- : "0" (rr0), "r" (rr1), [oldcpsr] "r" (oldcpsr), [fn] "r" (fn)
- : "lr", "cc"
- );
- *cpsr = (oldcpsr & ~PSR_fs) | (newcpsr & PSR_fs);
- return ret;
-}
-
-static inline long __kprobes
-insnslot_3arg_rwflags(long r0, long r1, long r2, long *cpsr,
- insn_3arg_fn_t *fn)
-{
- register long rr0 asm("r0") = r0;
- register long rr1 asm("r1") = r1;
- register long rr2 asm("r2") = r2;
- register long ret asm("r0");
- long oldcpsr = *cpsr;
- long newcpsr;
-
- __asm__ __volatile__ (
- "msr cpsr_fs, %[oldcpsr] \n\t"
- "mov lr, pc \n\t"
- "mov pc, %[fn] \n\t"
- "mrs %[newcpsr], cpsr \n\t"
- : "=r" (ret), [newcpsr] "=r" (newcpsr)
- : "0" (rr0), "r" (rr1), "r" (rr2),
- [oldcpsr] "r" (oldcpsr), [fn] "r" (fn)
- : "lr", "cc"
- );
- *cpsr = (oldcpsr & ~PSR_fs) | (newcpsr & PSR_fs);
- return ret;
-}
-
-static inline long __kprobes
-insnslot_4arg_rwflags(long r0, long r1, long r2, long r3, long *cpsr,
- insn_4arg_fn_t *fn)
-{
- register long rr0 asm("r0") = r0;
- register long rr1 asm("r1") = r1;
- register long rr2 asm("r2") = r2;
- register long rr3 asm("r3") = r3;
- register long ret asm("r0");
- long oldcpsr = *cpsr;
- long newcpsr;
-
- __asm__ __volatile__ (
- "msr cpsr_fs, %[oldcpsr] \n\t"
- "mov lr, pc \n\t"
- "mov pc, %[fn] \n\t"
- "mrs %[newcpsr], cpsr \n\t"
- : "=r" (ret), [newcpsr] "=r" (newcpsr)
- : "0" (rr0), "r" (rr1), "r" (rr2), "r" (rr3),
- [oldcpsr] "r" (oldcpsr), [fn] "r" (fn)
- : "lr", "cc"
- );
- *cpsr = (oldcpsr & ~PSR_fs) | (newcpsr & PSR_fs);
- return ret;
-}
-
-static inline long long __kprobes
-insnslot_llret_4arg_rwflags(long r0, long r1, long r2, long r3, long *cpsr,
- insn_llret_4arg_fn_t *fn)
-{
- register long rr0 asm("r0") = r0;
- register long rr1 asm("r1") = r1;
- register long rr2 asm("r2") = r2;
- register long rr3 asm("r3") = r3;
- register long ret0 asm("r0");
- register long ret1 asm("r1");
- long oldcpsr = *cpsr;
- long newcpsr;
- union reg_pair fnr;
-
- __asm__ __volatile__ (
- "msr cpsr_fs, %[oldcpsr] \n\t"
- "mov lr, pc \n\t"
- "mov pc, %[fn] \n\t"
- "mrs %[newcpsr], cpsr \n\t"
- : "=r" (ret0), "=r" (ret1), [newcpsr] "=r" (newcpsr)
- : "0" (rr0), "r" (rr1), "r" (rr2), "r" (rr3),
- [oldcpsr] "r" (oldcpsr), [fn] "r" (fn)
- : "lr", "cc"
- );
- *cpsr = (oldcpsr & ~PSR_fs) | (newcpsr & PSR_fs);
- fnr.r0 = ret0;
- fnr.r1 = ret1;
- return fnr.dr;
-}
-
-/*
- * To avoid the complications of mimicing single-stepping on a
- * processor without a Next-PC or a single-step mode, and to
- * avoid having to deal with the side-effects of boosting, we
- * simulate or emulate (almost) all ARM instructions.
- *
- * "Simulation" is where the instruction's behavior is duplicated in
- * C code. "Emulation" is where the original instruction is rewritten
- * and executed, often by altering its registers.
- *
- * By having all behavior of the kprobe'd instruction completed before
- * returning from the kprobe_handler(), all locks (scheduler and
- * interrupt) can safely be released. There is no need for secondary
- * breakpoints, no race with MP or preemptable kernels, nor having to
- * clean up resources counts at a later time impacting overall system
- * performance. By rewriting the instruction, only the minimum registers
- * need to be loaded and saved back optimizing performance.
- *
- * Calling the insnslot_*_rwflags version of a function doesn't hurt
- * anything even when the CPSR flags aren't updated by the
- * instruction. It's just a little slower in return for saving
- * a little space by not having a duplicate function that doesn't
- * update the flags. (The same optimization can be said for
- * instructions that do or don't perform register writeback)
- * Also, instructions can either read the flags, only write the
- * flags, or read and write the flags. To save combinations
- * rather than for sheer performance, flag functions just assume
- * read and write of flags.
- */
-
-static void __kprobes simulate_bbl(struct kprobe *p, struct pt_regs *regs)
-{
- kprobe_opcode_t insn = p->opcode;
- long iaddr = (long)p->addr;
- int disp = branch_displacement(insn);
-
- if (insn & (1 << 24))
- regs->ARM_lr = iaddr + 4;
-
- regs->ARM_pc = iaddr + 8 + disp;
-}
-
-static void __kprobes simulate_blx1(struct kprobe *p, struct pt_regs *regs)
-{
- kprobe_opcode_t insn = p->opcode;
- long iaddr = (long)p->addr;
- int disp = branch_displacement(insn);
-
- regs->ARM_lr = iaddr + 4;
- regs->ARM_pc = iaddr + 8 + disp + ((insn >> 23) & 0x2);
- regs->ARM_cpsr |= PSR_T_BIT;
-}
-
-static void __kprobes simulate_blx2bx(struct kprobe *p, struct pt_regs *regs)
-{
- kprobe_opcode_t insn = p->opcode;
- int rm = insn & 0xf;
- long rmv = regs->uregs[rm];
-
- if (insn & (1 << 5))
- regs->ARM_lr = (long)p->addr + 4;
-
- regs->ARM_pc = rmv & ~0x1;
- regs->ARM_cpsr &= ~PSR_T_BIT;
- if (rmv & 0x1)
- regs->ARM_cpsr |= PSR_T_BIT;
-}
-
-static void __kprobes simulate_mrs(struct kprobe *p, struct pt_regs *regs)
-{
- kprobe_opcode_t insn = p->opcode;
- int rd = (insn >> 12) & 0xf;
- unsigned long mask = 0xf8ff03df; /* Mask out execution state */
- regs->uregs[rd] = regs->ARM_cpsr & mask;
-}
-
-static void __kprobes simulate_ldm1stm1(struct kprobe *p, struct pt_regs *regs)
-{
- kprobe_opcode_t insn = p->opcode;
- int rn = (insn >> 16) & 0xf;
- int lbit = insn & (1 << 20);
- int wbit = insn & (1 << 21);
- int ubit = insn & (1 << 23);
- int pbit = insn & (1 << 24);
- long *addr = (long *)regs->uregs[rn];
- int reg_bit_vector;
- int reg_count;
-
- reg_count = 0;
- reg_bit_vector = insn & 0xffff;
- while (reg_bit_vector) {
- reg_bit_vector &= (reg_bit_vector - 1);
- ++reg_count;
- }
-
- if (!ubit)
- addr -= reg_count;
- addr += (!pbit == !ubit);
-
- reg_bit_vector = insn & 0xffff;
- while (reg_bit_vector) {
- int reg = __ffs(reg_bit_vector);
- reg_bit_vector &= (reg_bit_vector - 1);
- if (lbit)
- regs->uregs[reg] = *addr++;
- else
- *addr++ = regs->uregs[reg];
- }
-
- if (wbit) {
- if (!ubit)
- addr -= reg_count;
- addr -= (!pbit == !ubit);
- regs->uregs[rn] = (long)addr;
- }
-}
-
-static void __kprobes simulate_stm1_pc(struct kprobe *p, struct pt_regs *regs)
-{
- regs->ARM_pc = (long)p->addr + str_pc_offset;
- simulate_ldm1stm1(p, regs);
- regs->ARM_pc = (long)p->addr + 4;
-}
-
-static void __kprobes simulate_mov_ipsp(struct kprobe *p, struct pt_regs *regs)
-{
- regs->uregs[12] = regs->uregs[13];
-}
-
-static void __kprobes emulate_ldrd(struct kprobe *p, struct pt_regs *regs)
-{
- insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0];
- kprobe_opcode_t insn = p->opcode;
- long ppc = (long)p->addr + 8;
- int rd = (insn >> 12) & 0xf;
- int rn = (insn >> 16) & 0xf;
- int rm = insn & 0xf; /* rm may be invalid, don't care. */
- long rmv = (rm == 15) ? ppc : regs->uregs[rm];
- long rnv = (rn == 15) ? ppc : regs->uregs[rn];
-
- /* Not following the C calling convention here, so need asm(). */
- __asm__ __volatile__ (
- "ldr r0, %[rn] \n\t"
- "ldr r1, %[rm] \n\t"
- "msr cpsr_fs, %[cpsr]\n\t"
- "mov lr, pc \n\t"
- "mov pc, %[i_fn] \n\t"
- "str r0, %[rn] \n\t" /* in case of writeback */
- "str r2, %[rd0] \n\t"
- "str r3, %[rd1] \n\t"
- : [rn] "+m" (rnv),
- [rd0] "=m" (regs->uregs[rd]),
- [rd1] "=m" (regs->uregs[rd+1])
- : [rm] "m" (rmv),
- [cpsr] "r" (regs->ARM_cpsr),
- [i_fn] "r" (i_fn)
- : "r0", "r1", "r2", "r3", "lr", "cc"
- );
- if (is_writeback(insn))
- regs->uregs[rn] = rnv;
-}
-
-static void __kprobes emulate_strd(struct kprobe *p, struct pt_regs *regs)
-{
- insn_4arg_fn_t *i_fn = (insn_4arg_fn_t *)&p->ainsn.insn[0];
- kprobe_opcode_t insn = p->opcode;
- long ppc = (long)p->addr + 8;
- int rd = (insn >> 12) & 0xf;
- int rn = (insn >> 16) & 0xf;
- int rm = insn & 0xf;
- long rnv = (rn == 15) ? ppc : regs->uregs[rn];
- /* rm/rmv may be invalid, don't care. */
- long rmv = (rm == 15) ? ppc : regs->uregs[rm];
- long rnv_wb;
-
- rnv_wb = insnslot_4arg_rflags(rnv, rmv, regs->uregs[rd],
- regs->uregs[rd+1],
- regs->ARM_cpsr, i_fn);
- if (is_writeback(insn))
- regs->uregs[rn] = rnv_wb;
-}
-
-static void __kprobes emulate_ldr(struct kprobe *p, struct pt_regs *regs)
-{
- insn_llret_3arg_fn_t *i_fn = (insn_llret_3arg_fn_t *)&p->ainsn.insn[0];
- kprobe_opcode_t insn = p->opcode;
- long ppc = (long)p->addr + 8;
- union reg_pair fnr;
- int rd = (insn >> 12) & 0xf;
- int rn = (insn >> 16) & 0xf;
- int rm = insn & 0xf;
- long rdv;
- long rnv = (rn == 15) ? ppc : regs->uregs[rn];
- long rmv = (rm == 15) ? ppc : regs->uregs[rm];
- long cpsr = regs->ARM_cpsr;
-
- fnr.dr = insnslot_llret_3arg_rflags(rnv, 0, rmv, cpsr, i_fn);
- if (rn != 15)
- regs->uregs[rn] = fnr.r0; /* Save Rn in case of writeback. */
- rdv = fnr.r1;
-
- if (rd == 15) {
-#if __LINUX_ARM_ARCH__ >= 5
- cpsr &= ~PSR_T_BIT;
- if (rdv & 0x1)
- cpsr |= PSR_T_BIT;
- regs->ARM_cpsr = cpsr;
- rdv &= ~0x1;
-#else
- rdv &= ~0x2;
-#endif
- }
- regs->uregs[rd] = rdv;
-}
-
-static void __kprobes emulate_str(struct kprobe *p, struct pt_regs *regs)
-{
- insn_3arg_fn_t *i_fn = (insn_3arg_fn_t *)&p->ainsn.insn[0];
- kprobe_opcode_t insn = p->opcode;
- long iaddr = (long)p->addr;
- int rd = (insn >> 12) & 0xf;
- int rn = (insn >> 16) & 0xf;
- int rm = insn & 0xf;
- long rdv = (rd == 15) ? iaddr + str_pc_offset : regs->uregs[rd];
- long rnv = (rn == 15) ? iaddr + 8 : regs->uregs[rn];
- long rmv = regs->uregs[rm]; /* rm/rmv may be invalid, don't care. */
- long rnv_wb;
-
- rnv_wb = insnslot_3arg_rflags(rnv, rdv, rmv, regs->ARM_cpsr, i_fn);
- if (rn != 15)
- regs->uregs[rn] = rnv_wb; /* Save Rn in case of writeback. */
-}
-
-static void __kprobes emulate_sat(struct kprobe *p, struct pt_regs *regs)
-{
- insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0];
- kprobe_opcode_t insn = p->opcode;
- int rd = (insn >> 12) & 0xf;
- int rm = insn & 0xf;
- long rmv = regs->uregs[rm];
-
- /* Writes Q flag */
- regs->uregs[rd] = insnslot_1arg_rwflags(rmv, &regs->ARM_cpsr, i_fn);
-}
-
-static void __kprobes emulate_sel(struct kprobe *p, struct pt_regs *regs)
-{
- insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0];
- kprobe_opcode_t insn = p->opcode;
- int rd = (insn >> 12) & 0xf;
- int rn = (insn >> 16) & 0xf;
- int rm = insn & 0xf;
- long rnv = regs->uregs[rn];
- long rmv = regs->uregs[rm];
-
- /* Reads GE bits */
- regs->uregs[rd] = insnslot_2arg_rflags(rnv, rmv, regs->ARM_cpsr, i_fn);
-}
-
-static void __kprobes emulate_none(struct kprobe *p, struct pt_regs *regs)
-{
- insn_0arg_fn_t *i_fn = (insn_0arg_fn_t *)&p->ainsn.insn[0];
-
- insnslot_0arg_rflags(regs->ARM_cpsr, i_fn);
-}
-
-static void __kprobes emulate_nop(struct kprobe *p, struct pt_regs *regs)
-{
-}
-
-static void __kprobes
-emulate_rd12_modify(struct kprobe *p, struct pt_regs *regs)
-{
- insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0];
- kprobe_opcode_t insn = p->opcode;
- int rd = (insn >> 12) & 0xf;
- long rdv = regs->uregs[rd];
-
- regs->uregs[rd] = insnslot_1arg_rflags(rdv, regs->ARM_cpsr, i_fn);
-}
-
-static void __kprobes
-emulate_rd12rn0_modify(struct kprobe *p, struct pt_regs *regs)
-{
- insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0];
- kprobe_opcode_t insn = p->opcode;
- int rd = (insn >> 12) & 0xf;
- int rn = insn & 0xf;
- long rdv = regs->uregs[rd];
- long rnv = regs->uregs[rn];
-
- regs->uregs[rd] = insnslot_2arg_rflags(rdv, rnv, regs->ARM_cpsr, i_fn);
-}
-
-static void __kprobes emulate_rd12rm0(struct kprobe *p, struct pt_regs *regs)
-{
- insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0];
- kprobe_opcode_t insn = p->opcode;
- int rd = (insn >> 12) & 0xf;
- int rm = insn & 0xf;
- long rmv = regs->uregs[rm];
-
- regs->uregs[rd] = insnslot_1arg_rflags(rmv, regs->ARM_cpsr, i_fn);
-}
-
-static void __kprobes
-emulate_rd12rn16rm0_rwflags(struct kprobe *p, struct pt_regs *regs)
-{
- insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0];
- kprobe_opcode_t insn = p->opcode;
- int rd = (insn >> 12) & 0xf;
- int rn = (insn >> 16) & 0xf;
- int rm = insn & 0xf;
- long rnv = regs->uregs[rn];
- long rmv = regs->uregs[rm];
-
- regs->uregs[rd] =
- insnslot_2arg_rwflags(rnv, rmv, &regs->ARM_cpsr, i_fn);
-}
-
-static void __kprobes
-emulate_rd16rn12rs8rm0_rwflags(struct kprobe *p, struct pt_regs *regs)
-{
- insn_3arg_fn_t *i_fn = (insn_3arg_fn_t *)&p->ainsn.insn[0];
- kprobe_opcode_t insn = p->opcode;
- int rd = (insn >> 16) & 0xf;
- int rn = (insn >> 12) & 0xf;
- int rs = (insn >> 8) & 0xf;
- int rm = insn & 0xf;
- long rnv = regs->uregs[rn];
- long rsv = regs->uregs[rs];
- long rmv = regs->uregs[rm];
-
- regs->uregs[rd] =
- insnslot_3arg_rwflags(rnv, rsv, rmv, &regs->ARM_cpsr, i_fn);
-}
-
-static void __kprobes
-emulate_rd16rs8rm0_rwflags(struct kprobe *p, struct pt_regs *regs)
-{
- insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0];
- kprobe_opcode_t insn = p->opcode;
- int rd = (insn >> 16) & 0xf;
- int rs = (insn >> 8) & 0xf;
- int rm = insn & 0xf;
- long rsv = regs->uregs[rs];
- long rmv = regs->uregs[rm];
-
- regs->uregs[rd] =
- insnslot_2arg_rwflags(rsv, rmv, &regs->ARM_cpsr, i_fn);
-}
-
-static void __kprobes
-emulate_rdhi16rdlo12rs8rm0_rwflags(struct kprobe *p, struct pt_regs *regs)
-{
- insn_llret_4arg_fn_t *i_fn = (insn_llret_4arg_fn_t *)&p->ainsn.insn[0];
- kprobe_opcode_t insn = p->opcode;
- union reg_pair fnr;
- int rdhi = (insn >> 16) & 0xf;
- int rdlo = (insn >> 12) & 0xf;
- int rs = (insn >> 8) & 0xf;
- int rm = insn & 0xf;
- long rsv = regs->uregs[rs];
- long rmv = regs->uregs[rm];
-
- fnr.dr = insnslot_llret_4arg_rwflags(regs->uregs[rdhi],
- regs->uregs[rdlo], rsv, rmv,
- &regs->ARM_cpsr, i_fn);
- regs->uregs[rdhi] = fnr.r0;
- regs->uregs[rdlo] = fnr.r1;
-}
-
-static void __kprobes
-emulate_alu_imm_rflags(struct kprobe *p, struct pt_regs *regs)
-{
- insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0];
- kprobe_opcode_t insn = p->opcode;
- int rd = (insn >> 12) & 0xf;
- int rn = (insn >> 16) & 0xf;
- long rnv = (rn == 15) ? (long)p->addr + 8 : regs->uregs[rn];
-
- regs->uregs[rd] = insnslot_1arg_rflags(rnv, regs->ARM_cpsr, i_fn);
-}
-
-static void __kprobes
-emulate_alu_imm_rwflags(struct kprobe *p, struct pt_regs *regs)
-{
- insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0];
- kprobe_opcode_t insn = p->opcode;
- int rd = (insn >> 12) & 0xf;
- int rn = (insn >> 16) & 0xf;
- long rnv = (rn == 15) ? (long)p->addr + 8 : regs->uregs[rn];
-
- regs->uregs[rd] = insnslot_1arg_rwflags(rnv, &regs->ARM_cpsr, i_fn);
-}
-
-static void __kprobes
-emulate_alu_tests_imm(struct kprobe *p, struct pt_regs *regs)
-{
- insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0];
- kprobe_opcode_t insn = p->opcode;
- int rn = (insn >> 16) & 0xf;
- long rnv = (rn == 15) ? (long)p->addr + 8 : regs->uregs[rn];
-
- insnslot_1arg_rwflags(rnv, &regs->ARM_cpsr, i_fn);
-}
-
-static void __kprobes
-emulate_alu_rflags(struct kprobe *p, struct pt_regs *regs)
-{
- insn_3arg_fn_t *i_fn = (insn_3arg_fn_t *)&p->ainsn.insn[0];
- kprobe_opcode_t insn = p->opcode;
- long ppc = (long)p->addr + 8;
- int rd = (insn >> 12) & 0xf;
- int rn = (insn >> 16) & 0xf; /* rn/rnv/rs/rsv may be */
- int rs = (insn >> 8) & 0xf; /* invalid, don't care. */
- int rm = insn & 0xf;
- long rnv = (rn == 15) ? ppc : regs->uregs[rn];
- long rmv = (rm == 15) ? ppc : regs->uregs[rm];
- long rsv = regs->uregs[rs];
-
- regs->uregs[rd] =
- insnslot_3arg_rflags(rnv, rmv, rsv, regs->ARM_cpsr, i_fn);
-}
-
-static void __kprobes
-emulate_alu_rwflags(struct kprobe *p, struct pt_regs *regs)
-{
- insn_3arg_fn_t *i_fn = (insn_3arg_fn_t *)&p->ainsn.insn[0];
- kprobe_opcode_t insn = p->opcode;
- long ppc = (long)p->addr + 8;
- int rd = (insn >> 12) & 0xf;
- int rn = (insn >> 16) & 0xf; /* rn/rnv/rs/rsv may be */
- int rs = (insn >> 8) & 0xf; /* invalid, don't care. */
- int rm = insn & 0xf;
- long rnv = (rn == 15) ? ppc : regs->uregs[rn];
- long rmv = (rm == 15) ? ppc : regs->uregs[rm];
- long rsv = regs->uregs[rs];
-
- regs->uregs[rd] =
- insnslot_3arg_rwflags(rnv, rmv, rsv, &regs->ARM_cpsr, i_fn);
-}
-
-static void __kprobes
-emulate_alu_tests(struct kprobe *p, struct pt_regs *regs)
-{
- insn_3arg_fn_t *i_fn = (insn_3arg_fn_t *)&p->ainsn.insn[0];
- kprobe_opcode_t insn = p->opcode;
- long ppc = (long)p->addr + 8;
- int rn = (insn >> 16) & 0xf;
- int rs = (insn >> 8) & 0xf; /* rs/rsv may be invalid, don't care. */
- int rm = insn & 0xf;
- long rnv = (rn == 15) ? ppc : regs->uregs[rn];
- long rmv = (rm == 15) ? ppc : regs->uregs[rm];
- long rsv = regs->uregs[rs];
-
- insnslot_3arg_rwflags(rnv, rmv, rsv, &regs->ARM_cpsr, i_fn);
-}
-
-static enum kprobe_insn __kprobes
-prep_emulate_ldr_str(kprobe_opcode_t insn, struct arch_specific_insn *asi)
-{
- int not_imm = (insn & (1 << 26)) ? (insn & (1 << 25))
- : (~insn & (1 << 22));
-
- if (is_writeback(insn) && is_r15(insn, 16))
- return INSN_REJECTED; /* Writeback to PC */
-
- insn &= 0xfff00fff;
- insn |= 0x00001000; /* Rn = r0, Rd = r1 */
- if (not_imm) {
- insn &= ~0xf;
- insn |= 2; /* Rm = r2 */
- }
- asi->insn[0] = insn;
- asi->insn_handler = (insn & (1 << 20)) ? emulate_ldr : emulate_str;
- return INSN_GOOD;
-}
-
-static enum kprobe_insn __kprobes
-prep_emulate_rd12_modify(kprobe_opcode_t insn, struct arch_specific_insn *asi)
-{
- if (is_r15(insn, 12))
- return INSN_REJECTED; /* Rd is PC */
-
- insn &= 0xffff0fff; /* Rd = r0 */
- asi->insn[0] = insn;
- asi->insn_handler = emulate_rd12_modify;
- return INSN_GOOD;
-}
-
-static enum kprobe_insn __kprobes
-prep_emulate_rd12rn0_modify(kprobe_opcode_t insn,
- struct arch_specific_insn *asi)
-{
- if (is_r15(insn, 12))
- return INSN_REJECTED; /* Rd is PC */
-
- insn &= 0xffff0ff0; /* Rd = r0 */
- insn |= 0x00000001; /* Rn = r1 */
- asi->insn[0] = insn;
- asi->insn_handler = emulate_rd12rn0_modify;
- return INSN_GOOD;
-}
-
-static enum kprobe_insn __kprobes
-prep_emulate_rd12rm0(kprobe_opcode_t insn, struct arch_specific_insn *asi)
-{
- if (is_r15(insn, 12))
- return INSN_REJECTED; /* Rd is PC */
-
- insn &= 0xffff0ff0; /* Rd = r0, Rm = r0 */
- asi->insn[0] = insn;
- asi->insn_handler = emulate_rd12rm0;
- return INSN_GOOD;
-}
-
-static enum kprobe_insn __kprobes
-prep_emulate_rd12rn16rm0_wflags(kprobe_opcode_t insn,
- struct arch_specific_insn *asi)
-{
- if (is_r15(insn, 12))
- return INSN_REJECTED; /* Rd is PC */
-
- insn &= 0xfff00ff0; /* Rd = r0, Rn = r0 */
- insn |= 0x00000001; /* Rm = r1 */
- asi->insn[0] = insn;
- asi->insn_handler = emulate_rd12rn16rm0_rwflags;
- return INSN_GOOD;
-}
-
-static enum kprobe_insn __kprobes
-prep_emulate_rd16rs8rm0_wflags(kprobe_opcode_t insn,
- struct arch_specific_insn *asi)
-{
- if (is_r15(insn, 16))
- return INSN_REJECTED; /* Rd is PC */
-
- insn &= 0xfff0f0f0; /* Rd = r0, Rs = r0 */
- insn |= 0x00000001; /* Rm = r1 */
- asi->insn[0] = insn;
- asi->insn_handler = emulate_rd16rs8rm0_rwflags;
- return INSN_GOOD;
-}
-
-static enum kprobe_insn __kprobes
-prep_emulate_rd16rn12rs8rm0_wflags(kprobe_opcode_t insn,
- struct arch_specific_insn *asi)
-{
- if (is_r15(insn, 16))
- return INSN_REJECTED; /* Rd is PC */
-
- insn &= 0xfff000f0; /* Rd = r0, Rn = r0 */
- insn |= 0x00000102; /* Rs = r1, Rm = r2 */
- asi->insn[0] = insn;
- asi->insn_handler = emulate_rd16rn12rs8rm0_rwflags;
- return INSN_GOOD;
-}
-
-static enum kprobe_insn __kprobes
-prep_emulate_rdhi16rdlo12rs8rm0_wflags(kprobe_opcode_t insn,
- struct arch_specific_insn *asi)
-{
- if (is_r15(insn, 16) || is_r15(insn, 12))
- return INSN_REJECTED; /* RdHi or RdLo is PC */
-
- insn &= 0xfff000f0; /* RdHi = r0, RdLo = r1 */
- insn |= 0x00001203; /* Rs = r2, Rm = r3 */
- asi->insn[0] = insn;
- asi->insn_handler = emulate_rdhi16rdlo12rs8rm0_rwflags;
- return INSN_GOOD;
-}
-
-/*
- * For the instruction masking and comparisons in all the "space_*"
- * functions below, Do _not_ rearrange the order of tests unless
- * you're very, very sure of what you are doing. For the sake of
- * efficiency, the masks for some tests sometimes assume other test
- * have been done prior to them so the number of patterns to test
- * for an instruction set can be as broad as possible to reduce the
- * number of tests needed.
- */
-
-static enum kprobe_insn __kprobes
-space_1111(kprobe_opcode_t insn, struct arch_specific_insn *asi)
-{
- /* memory hint : 1111 0100 x001 xxxx xxxx xxxx xxxx xxxx : */
- /* PLDI : 1111 0100 x101 xxxx xxxx xxxx xxxx xxxx : */
- /* PLDW : 1111 0101 x001 xxxx xxxx xxxx xxxx xxxx : */
- /* PLD : 1111 0101 x101 xxxx xxxx xxxx xxxx xxxx : */
- if ((insn & 0xfe300000) == 0xf4100000) {
- asi->insn_handler = emulate_nop;
- return INSN_GOOD_NO_SLOT;
- }
-
- /* BLX(1) : 1111 101x xxxx xxxx xxxx xxxx xxxx xxxx : */
- if ((insn & 0xfe000000) == 0xfa000000) {
- asi->insn_handler = simulate_blx1;
- return INSN_GOOD_NO_SLOT;
- }
-
- /* CPS : 1111 0001 0000 xxx0 xxxx xxxx xx0x xxxx */
- /* SETEND: 1111 0001 0000 0001 xxxx xxxx 0000 xxxx */
-
- /* SRS : 1111 100x x1x0 xxxx xxxx xxxx xxxx xxxx */
- /* RFE : 1111 100x x0x1 xxxx xxxx xxxx xxxx xxxx */
-
- /* Coprocessor instructions... */
- /* MCRR2 : 1111 1100 0100 xxxx xxxx xxxx xxxx xxxx : (Rd != Rn) */
- /* MRRC2 : 1111 1100 0101 xxxx xxxx xxxx xxxx xxxx : (Rd != Rn) */
- /* LDC2 : 1111 110x xxx1 xxxx xxxx xxxx xxxx xxxx */
- /* STC2 : 1111 110x xxx0 xxxx xxxx xxxx xxxx xxxx */
- /* CDP2 : 1111 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */
- /* MCR2 : 1111 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */
- /* MRC2 : 1111 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */
-
- return INSN_REJECTED;
-}
-
-static enum kprobe_insn __kprobes
-space_cccc_000x(kprobe_opcode_t insn, struct arch_specific_insn *asi)
-{
- /* cccc 0001 0xx0 xxxx xxxx xxxx xxxx xxx0 xxxx */
- if ((insn & 0x0f900010) == 0x01000000) {
-
- /* MRS cpsr : cccc 0001 0000 xxxx xxxx xxxx 0000 xxxx */
- if ((insn & 0x0ff000f0) == 0x01000000) {
- if (is_r15(insn, 12))
- return INSN_REJECTED; /* Rd is PC */
- asi->insn_handler = simulate_mrs;
- return INSN_GOOD_NO_SLOT;
- }
-
- /* SMLALxy : cccc 0001 0100 xxxx xxxx xxxx 1xx0 xxxx */
- if ((insn & 0x0ff00090) == 0x01400080)
- return prep_emulate_rdhi16rdlo12rs8rm0_wflags(insn,
- asi);
-
- /* SMULWy : cccc 0001 0010 xxxx xxxx xxxx 1x10 xxxx */
- /* SMULxy : cccc 0001 0110 xxxx xxxx xxxx 1xx0 xxxx */
- if ((insn & 0x0ff000b0) == 0x012000a0 ||
- (insn & 0x0ff00090) == 0x01600080)
- return prep_emulate_rd16rs8rm0_wflags(insn, asi);
-
- /* SMLAxy : cccc 0001 0000 xxxx xxxx xxxx 1xx0 xxxx : Q */
- /* SMLAWy : cccc 0001 0010 xxxx xxxx xxxx 1x00 xxxx : Q */
- if ((insn & 0x0ff00090) == 0x01000080 ||
- (insn & 0x0ff000b0) == 0x01200080)
- return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi);
-
- /* BXJ : cccc 0001 0010 xxxx xxxx xxxx 0010 xxxx */
- /* MSR : cccc 0001 0x10 xxxx xxxx xxxx 0000 xxxx */
- /* MRS spsr : cccc 0001 0100 xxxx xxxx xxxx 0000 xxxx */
-
- /* Other instruction encodings aren't yet defined */
- return INSN_REJECTED;
- }
-
- /* cccc 0001 0xx0 xxxx xxxx xxxx xxxx 0xx1 xxxx */
- else if ((insn & 0x0f900090) == 0x01000010) {
-
- /* BLX(2) : cccc 0001 0010 xxxx xxxx xxxx 0011 xxxx */
- /* BX : cccc 0001 0010 xxxx xxxx xxxx 0001 xxxx */
- if ((insn & 0x0ff000d0) == 0x01200010) {
- if ((insn & 0x0ff000ff) == 0x0120003f)
- return INSN_REJECTED; /* BLX pc */
- asi->insn_handler = simulate_blx2bx;
- return INSN_GOOD_NO_SLOT;
- }
-
- /* CLZ : cccc 0001 0110 xxxx xxxx xxxx 0001 xxxx */
- if ((insn & 0x0ff000f0) == 0x01600010)
- return prep_emulate_rd12rm0(insn, asi);
-
- /* QADD : cccc 0001 0000 xxxx xxxx xxxx 0101 xxxx :Q */
- /* QSUB : cccc 0001 0010 xxxx xxxx xxxx 0101 xxxx :Q */
- /* QDADD : cccc 0001 0100 xxxx xxxx xxxx 0101 xxxx :Q */
- /* QDSUB : cccc 0001 0110 xxxx xxxx xxxx 0101 xxxx :Q */
- if ((insn & 0x0f9000f0) == 0x01000050)
- return prep_emulate_rd12rn16rm0_wflags(insn, asi);
-
- /* BKPT : 1110 0001 0010 xxxx xxxx xxxx 0111 xxxx */
- /* SMC : cccc 0001 0110 xxxx xxxx xxxx 0111 xxxx */
-
- /* Other instruction encodings aren't yet defined */
- return INSN_REJECTED;
- }
-
- /* cccc 0000 xxxx xxxx xxxx xxxx xxxx 1001 xxxx */
- else if ((insn & 0x0f0000f0) == 0x00000090) {
-
- /* MUL : cccc 0000 0000 xxxx xxxx xxxx 1001 xxxx : */
- /* MULS : cccc 0000 0001 xxxx xxxx xxxx 1001 xxxx :cc */
- /* MLA : cccc 0000 0010 xxxx xxxx xxxx 1001 xxxx : */
- /* MLAS : cccc 0000 0011 xxxx xxxx xxxx 1001 xxxx :cc */
- /* UMAAL : cccc 0000 0100 xxxx xxxx xxxx 1001 xxxx : */
- /* undef : cccc 0000 0101 xxxx xxxx xxxx 1001 xxxx : */
- /* MLS : cccc 0000 0110 xxxx xxxx xxxx 1001 xxxx : */
- /* undef : cccc 0000 0111 xxxx xxxx xxxx 1001 xxxx : */
- /* UMULL : cccc 0000 1000 xxxx xxxx xxxx 1001 xxxx : */
- /* UMULLS : cccc 0000 1001 xxxx xxxx xxxx 1001 xxxx :cc */
- /* UMLAL : cccc 0000 1010 xxxx xxxx xxxx 1001 xxxx : */
- /* UMLALS : cccc 0000 1011 xxxx xxxx xxxx 1001 xxxx :cc */
- /* SMULL : cccc 0000 1100 xxxx xxxx xxxx 1001 xxxx : */
- /* SMULLS : cccc 0000 1101 xxxx xxxx xxxx 1001 xxxx :cc */
- /* SMLAL : cccc 0000 1110 xxxx xxxx xxxx 1001 xxxx : */
- /* SMLALS : cccc 0000 1111 xxxx xxxx xxxx 1001 xxxx :cc */
- if ((insn & 0x00d00000) == 0x00500000)
- return INSN_REJECTED;
- else if ((insn & 0x00e00000) == 0x00000000)
- return prep_emulate_rd16rs8rm0_wflags(insn, asi);
- else if ((insn & 0x00a00000) == 0x00200000)
- return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi);
- else
- return prep_emulate_rdhi16rdlo12rs8rm0_wflags(insn,
- asi);
- }
-
- /* cccc 000x xxxx xxxx xxxx xxxx xxxx 1xx1 xxxx */
- else if ((insn & 0x0e000090) == 0x00000090) {
-
- /* SWP : cccc 0001 0000 xxxx xxxx xxxx 1001 xxxx */
- /* SWPB : cccc 0001 0100 xxxx xxxx xxxx 1001 xxxx */
- /* ??? : cccc 0001 0x01 xxxx xxxx xxxx 1001 xxxx */
- /* ??? : cccc 0001 0x10 xxxx xxxx xxxx 1001 xxxx */
- /* ??? : cccc 0001 0x11 xxxx xxxx xxxx 1001 xxxx */
- /* STREX : cccc 0001 1000 xxxx xxxx xxxx 1001 xxxx */
- /* LDREX : cccc 0001 1001 xxxx xxxx xxxx 1001 xxxx */
- /* STREXD: cccc 0001 1010 xxxx xxxx xxxx 1001 xxxx */
- /* LDREXD: cccc 0001 1011 xxxx xxxx xxxx 1001 xxxx */
- /* STREXB: cccc 0001 1100 xxxx xxxx xxxx 1001 xxxx */
- /* LDREXB: cccc 0001 1101 xxxx xxxx xxxx 1001 xxxx */
- /* STREXH: cccc 0001 1110 xxxx xxxx xxxx 1001 xxxx */
- /* LDREXH: cccc 0001 1111 xxxx xxxx xxxx 1001 xxxx */
-
- /* LDRD : cccc 000x xxx0 xxxx xxxx xxxx 1101 xxxx */
- /* STRD : cccc 000x xxx0 xxxx xxxx xxxx 1111 xxxx */
- /* LDRH : cccc 000x xxx1 xxxx xxxx xxxx 1011 xxxx */
- /* STRH : cccc 000x xxx0 xxxx xxxx xxxx 1011 xxxx */
- /* LDRSB : cccc 000x xxx1 xxxx xxxx xxxx 1101 xxxx */
- /* LDRSH : cccc 000x xxx1 xxxx xxxx xxxx 1111 xxxx */
- if ((insn & 0x0f0000f0) == 0x01000090) {
- if ((insn & 0x0fb000f0) == 0x01000090) {
- /* SWP/SWPB */
- return prep_emulate_rd12rn16rm0_wflags(insn,
- asi);
- } else {
- /* STREX/LDREX variants and unallocaed space */
- return INSN_REJECTED;
- }
-
- } else if ((insn & 0x0e1000d0) == 0x00000d0) {
- /* STRD/LDRD */
- if ((insn & 0x0000e000) == 0x0000e000)
- return INSN_REJECTED; /* Rd is LR or PC */
- if (is_writeback(insn) && is_r15(insn, 16))
- return INSN_REJECTED; /* Writeback to PC */
-
- insn &= 0xfff00fff;
- insn |= 0x00002000; /* Rn = r0, Rd = r2 */
- if (!(insn & (1 << 22))) {
- /* Register index */
- insn &= ~0xf;
- insn |= 1; /* Rm = r1 */
- }
- asi->insn[0] = insn;
- asi->insn_handler =
- (insn & (1 << 5)) ? emulate_strd : emulate_ldrd;
- return INSN_GOOD;
- }
-
- /* LDRH/STRH/LDRSB/LDRSH */
- if (is_r15(insn, 12))
- return INSN_REJECTED; /* Rd is PC */
- return prep_emulate_ldr_str(insn, asi);
- }
-
- /* cccc 000x xxxx xxxx xxxx xxxx xxxx xxxx xxxx */
-
- /*
- * ALU op with S bit and Rd == 15 :
- * cccc 000x xxx1 xxxx 1111 xxxx xxxx xxxx
- */
- if ((insn & 0x0e10f000) == 0x0010f000)
- return INSN_REJECTED;
-
- /*
- * "mov ip, sp" is the most common kprobe'd instruction by far.
- * Check and optimize for it explicitly.
- */
- if (insn == 0xe1a0c00d) {
- asi->insn_handler = simulate_mov_ipsp;
- return INSN_GOOD_NO_SLOT;
- }
-
- /*
- * Data processing: Immediate-shift / Register-shift
- * ALU op : cccc 000x xxxx xxxx xxxx xxxx xxxx xxxx
- * CPY : cccc 0001 1010 xxxx xxxx 0000 0000 xxxx
- * MOV : cccc 0001 101x xxxx xxxx xxxx xxxx xxxx
- * *S (bit 20) updates condition codes
- * ADC/SBC/RSC reads the C flag
- */
- insn &= 0xfff00ff0; /* Rn = r0, Rd = r0 */
- insn |= 0x00000001; /* Rm = r1 */
- if (insn & 0x010) {
- insn &= 0xfffff0ff; /* register shift */
- insn |= 0x00000200; /* Rs = r2 */
- }
- asi->insn[0] = insn;
-
- if ((insn & 0x0f900000) == 0x01100000) {
- /*
- * TST : cccc 0001 0001 xxxx xxxx xxxx xxxx xxxx
- * TEQ : cccc 0001 0011 xxxx xxxx xxxx xxxx xxxx
- * CMP : cccc 0001 0101 xxxx xxxx xxxx xxxx xxxx
- * CMN : cccc 0001 0111 xxxx xxxx xxxx xxxx xxxx
- */
- asi->insn_handler = emulate_alu_tests;
- } else {
- /* ALU ops which write to Rd */
- asi->insn_handler = (insn & (1 << 20)) ? /* S-bit */
- emulate_alu_rwflags : emulate_alu_rflags;
- }
- return INSN_GOOD;
-}
-
-static enum kprobe_insn __kprobes
-space_cccc_001x(kprobe_opcode_t insn, struct arch_specific_insn *asi)
-{
- /* MOVW : cccc 0011 0000 xxxx xxxx xxxx xxxx xxxx */
- /* MOVT : cccc 0011 0100 xxxx xxxx xxxx xxxx xxxx */
- if ((insn & 0x0fb00000) == 0x03000000)
- return prep_emulate_rd12_modify(insn, asi);
-
- /* hints : cccc 0011 0010 0000 xxxx xxxx xxxx xxxx */
- if ((insn & 0x0fff0000) == 0x03200000) {
- unsigned op2 = insn & 0x000000ff;
- if (op2 == 0x01 || op2 == 0x04) {
- /* YIELD : cccc 0011 0010 0000 xxxx xxxx 0000 0001 */
- /* SEV : cccc 0011 0010 0000 xxxx xxxx 0000 0100 */
- asi->insn[0] = insn;
- asi->insn_handler = emulate_none;
- return INSN_GOOD;
- } else if (op2 <= 0x03) {
- /* NOP : cccc 0011 0010 0000 xxxx xxxx 0000 0000 */
- /* WFE : cccc 0011 0010 0000 xxxx xxxx 0000 0010 */
- /* WFI : cccc 0011 0010 0000 xxxx xxxx 0000 0011 */
- /*
- * We make WFE and WFI true NOPs to avoid stalls due
- * to missing events whilst processing the probe.
- */
- asi->insn_handler = emulate_nop;
- return INSN_GOOD_NO_SLOT;
- }
- /* For DBG and unallocated hints it's safest to reject them */
- return INSN_REJECTED;
- }
-
- /*
- * MSR : cccc 0011 0x10 xxxx xxxx xxxx xxxx xxxx
- * ALU op with S bit and Rd == 15 :
- * cccc 001x xxx1 xxxx 1111 xxxx xxxx xxxx
- */
- if ((insn & 0x0fb00000) == 0x03200000 || /* MSR */
- (insn & 0x0e10f000) == 0x0210f000) /* ALU s-bit, R15 */
- return INSN_REJECTED;
-
- /*
- * Data processing: 32-bit Immediate
- * ALU op : cccc 001x xxxx xxxx xxxx xxxx xxxx xxxx
- * MOV : cccc 0011 101x xxxx xxxx xxxx xxxx xxxx
- * *S (bit 20) updates condition codes
- * ADC/SBC/RSC reads the C flag
- */
- insn &= 0xfff00fff; /* Rn = r0 and Rd = r0 */
- asi->insn[0] = insn;
-
- if ((insn & 0x0f900000) == 0x03100000) {
- /*
- * TST : cccc 0011 0001 xxxx xxxx xxxx xxxx xxxx
- * TEQ : cccc 0011 0011 xxxx xxxx xxxx xxxx xxxx
- * CMP : cccc 0011 0101 xxxx xxxx xxxx xxxx xxxx
- * CMN : cccc 0011 0111 xxxx xxxx xxxx xxxx xxxx
- */
- asi->insn_handler = emulate_alu_tests_imm;
- } else {
- /* ALU ops which write to Rd */
- asi->insn_handler = (insn & (1 << 20)) ? /* S-bit */
- emulate_alu_imm_rwflags : emulate_alu_imm_rflags;
- }
- return INSN_GOOD;
-}
-
-static enum kprobe_insn __kprobes
-space_cccc_0110__1(kprobe_opcode_t insn, struct arch_specific_insn *asi)
-{
- /* SEL : cccc 0110 1000 xxxx xxxx xxxx 1011 xxxx GE: !!! */
- if ((insn & 0x0ff000f0) == 0x068000b0) {
- if (is_r15(insn, 12))
- return INSN_REJECTED; /* Rd is PC */
- insn &= 0xfff00ff0; /* Rd = r0, Rn = r0 */
- insn |= 0x00000001; /* Rm = r1 */
- asi->insn[0] = insn;
- asi->insn_handler = emulate_sel;
- return INSN_GOOD;
- }
-
- /* SSAT : cccc 0110 101x xxxx xxxx xxxx xx01 xxxx :Q */
- /* USAT : cccc 0110 111x xxxx xxxx xxxx xx01 xxxx :Q */
- /* SSAT16 : cccc 0110 1010 xxxx xxxx xxxx 0011 xxxx :Q */
- /* USAT16 : cccc 0110 1110 xxxx xxxx xxxx 0011 xxxx :Q */
- if ((insn & 0x0fa00030) == 0x06a00010 ||
- (insn & 0x0fb000f0) == 0x06a00030) {
- if (is_r15(insn, 12))
- return INSN_REJECTED; /* Rd is PC */
- insn &= 0xffff0ff0; /* Rd = r0, Rm = r0 */
- asi->insn[0] = insn;
- asi->insn_handler = emulate_sat;
- return INSN_GOOD;
- }
-
- /* REV : cccc 0110 1011 xxxx xxxx xxxx 0011 xxxx */
- /* REV16 : cccc 0110 1011 xxxx xxxx xxxx 1011 xxxx */
- /* RBIT : cccc 0110 1111 xxxx xxxx xxxx 0011 xxxx */
- /* REVSH : cccc 0110 1111 xxxx xxxx xxxx 1011 xxxx */
- if ((insn & 0x0ff00070) == 0x06b00030 ||
- (insn & 0x0ff00070) == 0x06f00030)
- return prep_emulate_rd12rm0(insn, asi);
-
- /* ??? : cccc 0110 0000 xxxx xxxx xxxx xxx1 xxxx : */
- /* SADD16 : cccc 0110 0001 xxxx xxxx xxxx 0001 xxxx :GE */
- /* SADDSUBX : cccc 0110 0001 xxxx xxxx xxxx 0011 xxxx :GE */
- /* SSUBADDX : cccc 0110 0001 xxxx xxxx xxxx 0101 xxxx :GE */
- /* SSUB16 : cccc 0110 0001 xxxx xxxx xxxx 0111 xxxx :GE */
- /* SADD8 : cccc 0110 0001 xxxx xxxx xxxx 1001 xxxx :GE */
- /* ??? : cccc 0110 0001 xxxx xxxx xxxx 1011 xxxx : */
- /* ??? : cccc 0110 0001 xxxx xxxx xxxx 1101 xxxx : */
- /* SSUB8 : cccc 0110 0001 xxxx xxxx xxxx 1111 xxxx :GE */
- /* QADD16 : cccc 0110 0010 xxxx xxxx xxxx 0001 xxxx : */
- /* QADDSUBX : cccc 0110 0010 xxxx xxxx xxxx 0011 xxxx : */
- /* QSUBADDX : cccc 0110 0010 xxxx xxxx xxxx 0101 xxxx : */
- /* QSUB16 : cccc 0110 0010 xxxx xxxx xxxx 0111 xxxx : */
- /* QADD8 : cccc 0110 0010 xxxx xxxx xxxx 1001 xxxx : */
- /* ??? : cccc 0110 0010 xxxx xxxx xxxx 1011 xxxx : */
- /* ??? : cccc 0110 0010 xxxx xxxx xxxx 1101 xxxx : */
- /* QSUB8 : cccc 0110 0010 xxxx xxxx xxxx 1111 xxxx : */
- /* SHADD16 : cccc 0110 0011 xxxx xxxx xxxx 0001 xxxx : */
- /* SHADDSUBX : cccc 0110 0011 xxxx xxxx xxxx 0011 xxxx : */
- /* SHSUBADDX : cccc 0110 0011 xxxx xxxx xxxx 0101 xxxx : */
- /* SHSUB16 : cccc 0110 0011 xxxx xxxx xxxx 0111 xxxx : */
- /* SHADD8 : cccc 0110 0011 xxxx xxxx xxxx 1001 xxxx : */
- /* ??? : cccc 0110 0011 xxxx xxxx xxxx 1011 xxxx : */
- /* ??? : cccc 0110 0011 xxxx xxxx xxxx 1101 xxxx : */
- /* SHSUB8 : cccc 0110 0011 xxxx xxxx xxxx 1111 xxxx : */
- /* ??? : cccc 0110 0100 xxxx xxxx xxxx xxx1 xxxx : */
- /* UADD16 : cccc 0110 0101 xxxx xxxx xxxx 0001 xxxx :GE */
- /* UADDSUBX : cccc 0110 0101 xxxx xxxx xxxx 0011 xxxx :GE */
- /* USUBADDX : cccc 0110 0101 xxxx xxxx xxxx 0101 xxxx :GE */
- /* USUB16 : cccc 0110 0101 xxxx xxxx xxxx 0111 xxxx :GE */
- /* UADD8 : cccc 0110 0101 xxxx xxxx xxxx 1001 xxxx :GE */
- /* ??? : cccc 0110 0101 xxxx xxxx xxxx 1011 xxxx : */
- /* ??? : cccc 0110 0101 xxxx xxxx xxxx 1101 xxxx : */
- /* USUB8 : cccc 0110 0101 xxxx xxxx xxxx 1111 xxxx :GE */
- /* UQADD16 : cccc 0110 0110 xxxx xxxx xxxx 0001 xxxx : */
- /* UQADDSUBX : cccc 0110 0110 xxxx xxxx xxxx 0011 xxxx : */
- /* UQSUBADDX : cccc 0110 0110 xxxx xxxx xxxx 0101 xxxx : */
- /* UQSUB16 : cccc 0110 0110 xxxx xxxx xxxx 0111 xxxx : */
- /* UQADD8 : cccc 0110 0110 xxxx xxxx xxxx 1001 xxxx : */
- /* ??? : cccc 0110 0110 xxxx xxxx xxxx 1011 xxxx : */
- /* ??? : cccc 0110 0110 xxxx xxxx xxxx 1101 xxxx : */
- /* UQSUB8 : cccc 0110 0110 xxxx xxxx xxxx 1111 xxxx : */
- /* UHADD16 : cccc 0110 0111 xxxx xxxx xxxx 0001 xxxx : */
- /* UHADDSUBX : cccc 0110 0111 xxxx xxxx xxxx 0011 xxxx : */
- /* UHSUBADDX : cccc 0110 0111 xxxx xxxx xxxx 0101 xxxx : */
- /* UHSUB16 : cccc 0110 0111 xxxx xxxx xxxx 0111 xxxx : */
- /* UHADD8 : cccc 0110 0111 xxxx xxxx xxxx 1001 xxxx : */
- /* ??? : cccc 0110 0111 xxxx xxxx xxxx 1011 xxxx : */
- /* ??? : cccc 0110 0111 xxxx xxxx xxxx 1101 xxxx : */
- /* UHSUB8 : cccc 0110 0111 xxxx xxxx xxxx 1111 xxxx : */
- if ((insn & 0x0f800010) == 0x06000010) {
- if ((insn & 0x00300000) == 0x00000000 ||
- (insn & 0x000000e0) == 0x000000a0 ||
- (insn & 0x000000e0) == 0x000000c0)
- return INSN_REJECTED; /* Unallocated space */
- return prep_emulate_rd12rn16rm0_wflags(insn, asi);
- }
-
- /* PKHBT : cccc 0110 1000 xxxx xxxx xxxx x001 xxxx : */
- /* PKHTB : cccc 0110 1000 xxxx xxxx xxxx x101 xxxx : */
- if ((insn & 0x0ff00030) == 0x06800010)
- return prep_emulate_rd12rn16rm0_wflags(insn, asi);
-
- /* SXTAB16 : cccc 0110 1000 xxxx xxxx xxxx 0111 xxxx : */
- /* SXTB16 : cccc 0110 1000 1111 xxxx xxxx 0111 xxxx : */
- /* ??? : cccc 0110 1001 xxxx xxxx xxxx 0111 xxxx : */
- /* SXTAB : cccc 0110 1010 xxxx xxxx xxxx 0111 xxxx : */
- /* SXTB : cccc 0110 1010 1111 xxxx xxxx 0111 xxxx : */
- /* SXTAH : cccc 0110 1011 xxxx xxxx xxxx 0111 xxxx : */
- /* SXTH : cccc 0110 1011 1111 xxxx xxxx 0111 xxxx : */
- /* UXTAB16 : cccc 0110 1100 xxxx xxxx xxxx 0111 xxxx : */
- /* UXTB16 : cccc 0110 1100 1111 xxxx xxxx 0111 xxxx : */
- /* ??? : cccc 0110 1101 xxxx xxxx xxxx 0111 xxxx : */
- /* UXTAB : cccc 0110 1110 xxxx xxxx xxxx 0111 xxxx : */
- /* UXTB : cccc 0110 1110 1111 xxxx xxxx 0111 xxxx : */
- /* UXTAH : cccc 0110 1111 xxxx xxxx xxxx 0111 xxxx : */
- /* UXTH : cccc 0110 1111 1111 xxxx xxxx 0111 xxxx : */
- if ((insn & 0x0f8000f0) == 0x06800070) {
- if ((insn & 0x00300000) == 0x00100000)
- return INSN_REJECTED; /* Unallocated space */
-
- if ((insn & 0x000f0000) == 0x000f0000)
- return prep_emulate_rd12rm0(insn, asi);
- else
- return prep_emulate_rd12rn16rm0_wflags(insn, asi);
- }
-
- /* Other instruction encodings aren't yet defined */
- return INSN_REJECTED;
-}
-
-static enum kprobe_insn __kprobes
-space_cccc_0111__1(kprobe_opcode_t insn, struct arch_specific_insn *asi)
-{
- /* Undef : cccc 0111 1111 xxxx xxxx xxxx 1111 xxxx */
- if ((insn & 0x0ff000f0) == 0x03f000f0)
- return INSN_REJECTED;
-
- /* SMLALD : cccc 0111 0100 xxxx xxxx xxxx 00x1 xxxx */
- /* SMLSLD : cccc 0111 0100 xxxx xxxx xxxx 01x1 xxxx */
- if ((insn & 0x0ff00090) == 0x07400010)
- return prep_emulate_rdhi16rdlo12rs8rm0_wflags(insn, asi);
-
- /* SMLAD : cccc 0111 0000 xxxx xxxx xxxx 00x1 xxxx :Q */
- /* SMUAD : cccc 0111 0000 xxxx 1111 xxxx 00x1 xxxx :Q */
- /* SMLSD : cccc 0111 0000 xxxx xxxx xxxx 01x1 xxxx :Q */
- /* SMUSD : cccc 0111 0000 xxxx 1111 xxxx 01x1 xxxx : */
- /* SMMLA : cccc 0111 0101 xxxx xxxx xxxx 00x1 xxxx : */
- /* SMMUL : cccc 0111 0101 xxxx 1111 xxxx 00x1 xxxx : */
- /* USADA8 : cccc 0111 1000 xxxx xxxx xxxx 0001 xxxx : */
- /* USAD8 : cccc 0111 1000 xxxx 1111 xxxx 0001 xxxx : */
- if ((insn & 0x0ff00090) == 0x07000010 ||
- (insn & 0x0ff000d0) == 0x07500010 ||
- (insn & 0x0ff000f0) == 0x07800010) {
-
- if ((insn & 0x0000f000) == 0x0000f000)
- return prep_emulate_rd16rs8rm0_wflags(insn, asi);
- else
- return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi);
- }
-
- /* SMMLS : cccc 0111 0101 xxxx xxxx xxxx 11x1 xxxx : */
- if ((insn & 0x0ff000d0) == 0x075000d0)
- return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi);
-
- /* SBFX : cccc 0111 101x xxxx xxxx xxxx x101 xxxx : */
- /* UBFX : cccc 0111 111x xxxx xxxx xxxx x101 xxxx : */
- if ((insn & 0x0fa00070) == 0x07a00050)
- return prep_emulate_rd12rm0(insn, asi);
-
- /* BFI : cccc 0111 110x xxxx xxxx xxxx x001 xxxx : */
- /* BFC : cccc 0111 110x xxxx xxxx xxxx x001 1111 : */
- if ((insn & 0x0fe00070) == 0x07c00010) {
-
- if ((insn & 0x0000000f) == 0x0000000f)
- return prep_emulate_rd12_modify(insn, asi);
- else
- return prep_emulate_rd12rn0_modify(insn, asi);
- }
-
- return INSN_REJECTED;
-}
-
-static enum kprobe_insn __kprobes
-space_cccc_01xx(kprobe_opcode_t insn, struct arch_specific_insn *asi)
-{
- /* LDR : cccc 01xx x0x1 xxxx xxxx xxxx xxxx xxxx */
- /* LDRB : cccc 01xx x1x1 xxxx xxxx xxxx xxxx xxxx */
- /* LDRBT : cccc 01x0 x111 xxxx xxxx xxxx xxxx xxxx */
- /* LDRT : cccc 01x0 x011 xxxx xxxx xxxx xxxx xxxx */
- /* STR : cccc 01xx x0x0 xxxx xxxx xxxx xxxx xxxx */
- /* STRB : cccc 01xx x1x0 xxxx xxxx xxxx xxxx xxxx */
- /* STRBT : cccc 01x0 x110 xxxx xxxx xxxx xxxx xxxx */
- /* STRT : cccc 01x0 x010 xxxx xxxx xxxx xxxx xxxx */
-
- if ((insn & 0x00500000) == 0x00500000 && is_r15(insn, 12))
- return INSN_REJECTED; /* LDRB into PC */
-
- return prep_emulate_ldr_str(insn, asi);
-}
-
-static enum kprobe_insn __kprobes
-space_cccc_100x(kprobe_opcode_t insn, struct arch_specific_insn *asi)
-{
- /* LDM(2) : cccc 100x x101 xxxx 0xxx xxxx xxxx xxxx */
- /* LDM(3) : cccc 100x x1x1 xxxx 1xxx xxxx xxxx xxxx */
- if ((insn & 0x0e708000) == 0x85000000 ||
- (insn & 0x0e508000) == 0x85010000)
- return INSN_REJECTED;
-
- /* LDM(1) : cccc 100x x0x1 xxxx xxxx xxxx xxxx xxxx */
- /* STM(1) : cccc 100x x0x0 xxxx xxxx xxxx xxxx xxxx */
- asi->insn_handler = ((insn & 0x108000) == 0x008000) ? /* STM & R15 */
- simulate_stm1_pc : simulate_ldm1stm1;
- return INSN_GOOD_NO_SLOT;
-}
-
-static enum kprobe_insn __kprobes
-space_cccc_101x(kprobe_opcode_t insn, struct arch_specific_insn *asi)
-{
- /* B : cccc 1010 xxxx xxxx xxxx xxxx xxxx xxxx */
- /* BL : cccc 1011 xxxx xxxx xxxx xxxx xxxx xxxx */
- asi->insn_handler = simulate_bbl;
- return INSN_GOOD_NO_SLOT;
-}
-
-static enum kprobe_insn __kprobes
-space_cccc_11xx(kprobe_opcode_t insn, struct arch_specific_insn *asi)
-{
- /* Coprocessor instructions... */
- /* MCRR : cccc 1100 0100 xxxx xxxx xxxx xxxx xxxx : (Rd!=Rn) */
- /* MRRC : cccc 1100 0101 xxxx xxxx xxxx xxxx xxxx : (Rd!=Rn) */
- /* LDC : cccc 110x xxx1 xxxx xxxx xxxx xxxx xxxx */
- /* STC : cccc 110x xxx0 xxxx xxxx xxxx xxxx xxxx */
- /* CDP : cccc 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */
- /* MCR : cccc 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */
- /* MRC : cccc 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */
-
- /* SVC : cccc 1111 xxxx xxxx xxxx xxxx xxxx xxxx */
-
- return INSN_REJECTED;
-}
-
-static unsigned long __kprobes __check_eq(unsigned long cpsr)
-{
- return cpsr & PSR_Z_BIT;
-}
-
-static unsigned long __kprobes __check_ne(unsigned long cpsr)
-{
- return (~cpsr) & PSR_Z_BIT;
-}
-
-static unsigned long __kprobes __check_cs(unsigned long cpsr)
-{
- return cpsr & PSR_C_BIT;
-}
-
-static unsigned long __kprobes __check_cc(unsigned long cpsr)
-{
- return (~cpsr) & PSR_C_BIT;
-}
-
-static unsigned long __kprobes __check_mi(unsigned long cpsr)
-{
- return cpsr & PSR_N_BIT;
-}
-
-static unsigned long __kprobes __check_pl(unsigned long cpsr)
-{
- return (~cpsr) & PSR_N_BIT;
-}
-
-static unsigned long __kprobes __check_vs(unsigned long cpsr)
-{
- return cpsr & PSR_V_BIT;
-}
-
-static unsigned long __kprobes __check_vc(unsigned long cpsr)
-{
- return (~cpsr) & PSR_V_BIT;
-}
-
-static unsigned long __kprobes __check_hi(unsigned long cpsr)
-{
- cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
- return cpsr & PSR_C_BIT;
-}
-
-static unsigned long __kprobes __check_ls(unsigned long cpsr)
-{
- cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
- return (~cpsr) & PSR_C_BIT;
-}
-
-static unsigned long __kprobes __check_ge(unsigned long cpsr)
-{
- cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
- return (~cpsr) & PSR_N_BIT;
-}
-
-static unsigned long __kprobes __check_lt(unsigned long cpsr)
-{
- cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
- return cpsr & PSR_N_BIT;
-}
-
-static unsigned long __kprobes __check_gt(unsigned long cpsr)
-{
- unsigned long temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
- temp |= (cpsr << 1); /* PSR_N_BIT |= PSR_Z_BIT */
- return (~temp) & PSR_N_BIT;
-}
-
-static unsigned long __kprobes __check_le(unsigned long cpsr)
-{
- unsigned long temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
- temp |= (cpsr << 1); /* PSR_N_BIT |= PSR_Z_BIT */
- return temp & PSR_N_BIT;
-}
-
-static unsigned long __kprobes __check_al(unsigned long cpsr)
-{
- return true;
-}
-
-static kprobe_check_cc * const condition_checks[16] = {
- &__check_eq, &__check_ne, &__check_cs, &__check_cc,
- &__check_mi, &__check_pl, &__check_vs, &__check_vc,
- &__check_hi, &__check_ls, &__check_ge, &__check_lt,
- &__check_gt, &__check_le, &__check_al, &__check_al
-};
-
-/* Return:
- * INSN_REJECTED If instruction is one not allowed to kprobe,
- * INSN_GOOD If instruction is supported and uses instruction slot,
- * INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot.
- *
- * For instructions we don't want to kprobe (INSN_REJECTED return result):
- * These are generally ones that modify the processor state making
- * them "hard" to simulate such as switches processor modes or
- * make accesses in alternate modes. Any of these could be simulated
- * if the work was put into it, but low return considering they
- * should also be very rare.
- */
-enum kprobe_insn __kprobes
-arm_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
-{
- asi->insn_check_cc = condition_checks[insn>>28];
- asi->insn[1] = KPROBE_RETURN_INSTRUCTION;
-
- if ((insn & 0xf0000000) == 0xf0000000)
-
- return space_1111(insn, asi);
-
- else if ((insn & 0x0e000000) == 0x00000000)
-
- return space_cccc_000x(insn, asi);
-
- else if ((insn & 0x0e000000) == 0x02000000)
-
- return space_cccc_001x(insn, asi);
-
- else if ((insn & 0x0f000010) == 0x06000010)
-
- return space_cccc_0110__1(insn, asi);
-
- else if ((insn & 0x0f000010) == 0x07000010)
-
- return space_cccc_0111__1(insn, asi);
-
- else if ((insn & 0x0c000000) == 0x04000000)
-
- return space_cccc_01xx(insn, asi);
-
- else if ((insn & 0x0e000000) == 0x08000000)
-
- return space_cccc_100x(insn, asi);
-
- else if ((insn & 0x0e000000) == 0x0a000000)
-
- return space_cccc_101x(insn, asi);
-
- return space_cccc_11xx(insn, asi);
-}
-
-void __init arm_kprobe_decode_init(void)
-{
- find_str_pc_offset();
-}
diff --git a/arch/arm/kernel/kprobes-thumb.c b/arch/arm/kernel/kprobes-thumb.c
new file mode 100644
index 000000000000..902ca59e8b11
--- /dev/null
+++ b/arch/arm/kernel/kprobes-thumb.c
@@ -0,0 +1,1462 @@
+/*
+ * arch/arm/kernel/kprobes-thumb.c
+ *
+ * Copyright (C) 2011 Jon Medhurst <tixy@yxit.co.uk>.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+
+#include "kprobes.h"
+
+
+/*
+ * True if current instruction is in an IT block.
+ */
+#define in_it_block(cpsr) ((cpsr & 0x06000c00) != 0x00000000)
+
+/*
+ * Return the condition code to check for the currently executing instruction.
+ * This is in ITSTATE<7:4> which is in CPSR<15:12> but is only valid if
+ * in_it_block returns true.
+ */
+#define current_cond(cpsr) ((cpsr >> 12) & 0xf)
+
+/*
+ * Return the PC value for a probe in thumb code.
+ * This is the address of the probed instruction plus 4.
+ * We subtract one because the address will have bit zero set to indicate
+ * a pointer to thumb code.
+ */
+static inline unsigned long __kprobes thumb_probe_pc(struct kprobe *p)
+{
+ return (unsigned long)p->addr - 1 + 4;
+}
+
+static void __kprobes
+t32_simulate_table_branch(struct kprobe *p, struct pt_regs *regs)
+{
+ kprobe_opcode_t insn = p->opcode;
+ unsigned long pc = thumb_probe_pc(p);
+ int rn = (insn >> 16) & 0xf;
+ int rm = insn & 0xf;
+
+ unsigned long rnv = (rn == 15) ? pc : regs->uregs[rn];
+ unsigned long rmv = regs->uregs[rm];
+ unsigned int halfwords;
+
+ if (insn & 0x10) /* TBH */
+ halfwords = ((u16 *)rnv)[rmv];
+ else /* TBB */
+ halfwords = ((u8 *)rnv)[rmv];
+
+ regs->ARM_pc = pc + 2 * halfwords;
+}
+
+static void __kprobes
+t32_simulate_mrs(struct kprobe *p, struct pt_regs *regs)
+{
+ kprobe_opcode_t insn = p->opcode;
+ int rd = (insn >> 8) & 0xf;
+ unsigned long mask = 0xf8ff03df; /* Mask out execution state */
+ regs->uregs[rd] = regs->ARM_cpsr & mask;
+}
+
+static void __kprobes
+t32_simulate_cond_branch(struct kprobe *p, struct pt_regs *regs)
+{
+ kprobe_opcode_t insn = p->opcode;
+ unsigned long pc = thumb_probe_pc(p);
+
+ long offset = insn & 0x7ff; /* imm11 */
+ offset += (insn & 0x003f0000) >> 5; /* imm6 */
+ offset += (insn & 0x00002000) << 4; /* J1 */
+ offset += (insn & 0x00000800) << 7; /* J2 */
+ offset -= (insn & 0x04000000) >> 7; /* Apply sign bit */
+
+ regs->ARM_pc = pc + (offset * 2);
+}
+
+static enum kprobe_insn __kprobes
+t32_decode_cond_branch(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+{
+ int cc = (insn >> 22) & 0xf;
+ asi->insn_check_cc = kprobe_condition_checks[cc];
+ asi->insn_handler = t32_simulate_cond_branch;
+ return INSN_GOOD_NO_SLOT;
+}
+
+static void __kprobes
+t32_simulate_branch(struct kprobe *p, struct pt_regs *regs)
+{
+ kprobe_opcode_t insn = p->opcode;
+ unsigned long pc = thumb_probe_pc(p);
+
+ long offset = insn & 0x7ff; /* imm11 */
+ offset += (insn & 0x03ff0000) >> 5; /* imm10 */
+ offset += (insn & 0x00002000) << 9; /* J1 */
+ offset += (insn & 0x00000800) << 10; /* J2 */
+ if (insn & 0x04000000)
+ offset -= 0x00800000; /* Apply sign bit */
+ else
+ offset ^= 0x00600000; /* Invert J1 and J2 */
+
+ if (insn & (1 << 14)) {
+ /* BL or BLX */
+ regs->ARM_lr = (unsigned long)p->addr + 4;
+ if (!(insn & (1 << 12))) {
+ /* BLX so switch to ARM mode */
+ regs->ARM_cpsr &= ~PSR_T_BIT;
+ pc &= ~3;
+ }
+ }
+
+ regs->ARM_pc = pc + (offset * 2);
+}
+
+static void __kprobes
+t32_simulate_ldr_literal(struct kprobe *p, struct pt_regs *regs)
+{
+ kprobe_opcode_t insn = p->opcode;
+ unsigned long addr = thumb_probe_pc(p) & ~3;
+ int rt = (insn >> 12) & 0xf;
+ unsigned long rtv;
+
+ long offset = insn & 0xfff;
+ if (insn & 0x00800000)
+ addr += offset;
+ else
+ addr -= offset;
+
+ if (insn & 0x00400000) {
+ /* LDR */
+ rtv = *(unsigned long *)addr;
+ if (rt == 15) {
+ bx_write_pc(rtv, regs);
+ return;
+ }
+ } else if (insn & 0x00200000) {
+ /* LDRH */
+ if (insn & 0x01000000)
+ rtv = *(s16 *)addr;
+ else
+ rtv = *(u16 *)addr;
+ } else {
+ /* LDRB */
+ if (insn & 0x01000000)
+ rtv = *(s8 *)addr;
+ else
+ rtv = *(u8 *)addr;
+ }
+
+ regs->uregs[rt] = rtv;
+}
+
+static enum kprobe_insn __kprobes
+t32_decode_ldmstm(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+{
+ enum kprobe_insn ret = kprobe_decode_ldmstm(insn, asi);
+
+ /* Fixup modified instruction to have halfwords in correct order...*/
+ insn = asi->insn[0];
+ ((u16 *)asi->insn)[0] = insn >> 16;
+ ((u16 *)asi->insn)[1] = insn & 0xffff;
+
+ return ret;
+}
+
+static void __kprobes
+t32_emulate_ldrdstrd(struct kprobe *p, struct pt_regs *regs)
+{
+ kprobe_opcode_t insn = p->opcode;
+ unsigned long pc = thumb_probe_pc(p) & ~3;
+ int rt1 = (insn >> 12) & 0xf;
+ int rt2 = (insn >> 8) & 0xf;
+ int rn = (insn >> 16) & 0xf;
+
+ register unsigned long rt1v asm("r0") = regs->uregs[rt1];
+ register unsigned long rt2v asm("r1") = regs->uregs[rt2];
+ register unsigned long rnv asm("r2") = (rn == 15) ? pc
+ : regs->uregs[rn];
+
+ __asm__ __volatile__ (
+ "blx %[fn]"
+ : "=r" (rt1v), "=r" (rt2v), "=r" (rnv)
+ : "0" (rt1v), "1" (rt2v), "2" (rnv), [fn] "r" (p->ainsn.insn_fn)
+ : "lr", "memory", "cc"
+ );
+
+ if (rn != 15)
+ regs->uregs[rn] = rnv; /* Writeback base register */
+ regs->uregs[rt1] = rt1v;
+ regs->uregs[rt2] = rt2v;
+}
+
+static void __kprobes
+t32_emulate_ldrstr(struct kprobe *p, struct pt_regs *regs)
+{
+ kprobe_opcode_t insn = p->opcode;
+ int rt = (insn >> 12) & 0xf;
+ int rn = (insn >> 16) & 0xf;
+ int rm = insn & 0xf;
+
+ register unsigned long rtv asm("r0") = regs->uregs[rt];
+ register unsigned long rnv asm("r2") = regs->uregs[rn];
+ register unsigned long rmv asm("r3") = regs->uregs[rm];
+
+ __asm__ __volatile__ (
+ "blx %[fn]"
+ : "=r" (rtv), "=r" (rnv)
+ : "0" (rtv), "1" (rnv), "r" (rmv), [fn] "r" (p->ainsn.insn_fn)
+ : "lr", "memory", "cc"
+ );
+
+ regs->uregs[rn] = rnv; /* Writeback base register */
+ if (rt == 15) /* Can't be true for a STR as they aren't allowed */
+ bx_write_pc(rtv, regs);
+ else
+ regs->uregs[rt] = rtv;
+}
+
+static void __kprobes
+t32_emulate_rd8rn16rm0_rwflags(struct kprobe *p, struct pt_regs *regs)
+{
+ kprobe_opcode_t insn = p->opcode;
+ int rd = (insn >> 8) & 0xf;
+ int rn = (insn >> 16) & 0xf;
+ int rm = insn & 0xf;
+
+ register unsigned long rdv asm("r1") = regs->uregs[rd];
+ register unsigned long rnv asm("r2") = regs->uregs[rn];
+ register unsigned long rmv asm("r3") = regs->uregs[rm];
+ unsigned long cpsr = regs->ARM_cpsr;
+
+ __asm__ __volatile__ (
+ "msr cpsr_fs, %[cpsr] \n\t"
+ "blx %[fn] \n\t"
+ "mrs %[cpsr], cpsr \n\t"
+ : "=r" (rdv), [cpsr] "=r" (cpsr)
+ : "0" (rdv), "r" (rnv), "r" (rmv),
+ "1" (cpsr), [fn] "r" (p->ainsn.insn_fn)
+ : "lr", "memory", "cc"
+ );
+
+ regs->uregs[rd] = rdv;
+ regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK);
+}
+
+static void __kprobes
+t32_emulate_rd8pc16_noflags(struct kprobe *p, struct pt_regs *regs)
+{
+ kprobe_opcode_t insn = p->opcode;
+ unsigned long pc = thumb_probe_pc(p);
+ int rd = (insn >> 8) & 0xf;
+
+ register unsigned long rdv asm("r1") = regs->uregs[rd];
+ register unsigned long rnv asm("r2") = pc & ~3;
+
+ __asm__ __volatile__ (
+ "blx %[fn]"
+ : "=r" (rdv)
+ : "0" (rdv), "r" (rnv), [fn] "r" (p->ainsn.insn_fn)
+ : "lr", "memory", "cc"
+ );
+
+ regs->uregs[rd] = rdv;
+}
+
+static void __kprobes
+t32_emulate_rd8rn16_noflags(struct kprobe *p, struct pt_regs *regs)
+{
+ kprobe_opcode_t insn = p->opcode;
+ int rd = (insn >> 8) & 0xf;
+ int rn = (insn >> 16) & 0xf;
+
+ register unsigned long rdv asm("r1") = regs->uregs[rd];
+ register unsigned long rnv asm("r2") = regs->uregs[rn];
+
+ __asm__ __volatile__ (
+ "blx %[fn]"
+ : "=r" (rdv)
+ : "0" (rdv), "r" (rnv), [fn] "r" (p->ainsn.insn_fn)
+ : "lr", "memory", "cc"
+ );
+
+ regs->uregs[rd] = rdv;
+}
+
+static void __kprobes
+t32_emulate_rdlo12rdhi8rn16rm0_noflags(struct kprobe *p, struct pt_regs *regs)
+{
+ kprobe_opcode_t insn = p->opcode;
+ int rdlo = (insn >> 12) & 0xf;
+ int rdhi = (insn >> 8) & 0xf;
+ int rn = (insn >> 16) & 0xf;
+ int rm = insn & 0xf;
+
+ register unsigned long rdlov asm("r0") = regs->uregs[rdlo];
+ register unsigned long rdhiv asm("r1") = regs->uregs[rdhi];
+ register unsigned long rnv asm("r2") = regs->uregs[rn];
+ register unsigned long rmv asm("r3") = regs->uregs[rm];
+
+ __asm__ __volatile__ (
+ "blx %[fn]"
+ : "=r" (rdlov), "=r" (rdhiv)
+ : "0" (rdlov), "1" (rdhiv), "r" (rnv), "r" (rmv),
+ [fn] "r" (p->ainsn.insn_fn)
+ : "lr", "memory", "cc"
+ );
+
+ regs->uregs[rdlo] = rdlov;
+ regs->uregs[rdhi] = rdhiv;
+}
+
+/* These emulation encodings are functionally equivalent... */
+#define t32_emulate_rd8rn16rm0ra12_noflags \
+ t32_emulate_rdlo12rdhi8rn16rm0_noflags
+
+static const union decode_item t32_table_1110_100x_x0xx[] = {
+ /* Load/store multiple instructions */
+
+ /* Rn is PC 1110 100x x0xx 1111 xxxx xxxx xxxx xxxx */
+ DECODE_REJECT (0xfe4f0000, 0xe80f0000),
+
+ /* SRS 1110 1000 00x0 xxxx xxxx xxxx xxxx xxxx */
+ /* RFE 1110 1000 00x1 xxxx xxxx xxxx xxxx xxxx */
+ DECODE_REJECT (0xffc00000, 0xe8000000),
+ /* SRS 1110 1001 10x0 xxxx xxxx xxxx xxxx xxxx */
+ /* RFE 1110 1001 10x1 xxxx xxxx xxxx xxxx xxxx */
+ DECODE_REJECT (0xffc00000, 0xe9800000),
+
+ /* STM Rn, {...pc} 1110 100x x0x0 xxxx 1xxx xxxx xxxx xxxx */
+ DECODE_REJECT (0xfe508000, 0xe8008000),
+ /* LDM Rn, {...lr,pc} 1110 100x x0x1 xxxx 11xx xxxx xxxx xxxx */
+ DECODE_REJECT (0xfe50c000, 0xe810c000),
+ /* LDM/STM Rn, {...sp} 1110 100x x0xx xxxx xx1x xxxx xxxx xxxx */
+ DECODE_REJECT (0xfe402000, 0xe8002000),
+
+ /* STMIA 1110 1000 10x0 xxxx xxxx xxxx xxxx xxxx */
+ /* LDMIA 1110 1000 10x1 xxxx xxxx xxxx xxxx xxxx */
+ /* STMDB 1110 1001 00x0 xxxx xxxx xxxx xxxx xxxx */
+ /* LDMDB 1110 1001 00x1 xxxx xxxx xxxx xxxx xxxx */
+ DECODE_CUSTOM (0xfe400000, 0xe8000000, t32_decode_ldmstm),
+
+ DECODE_END
+};
+
+static const union decode_item t32_table_1110_100x_x1xx[] = {
+ /* Load/store dual, load/store exclusive, table branch */
+
+ /* STRD (immediate) 1110 1000 x110 xxxx xxxx xxxx xxxx xxxx */
+ /* LDRD (immediate) 1110 1000 x111 xxxx xxxx xxxx xxxx xxxx */
+ DECODE_OR (0xff600000, 0xe8600000),
+ /* STRD (immediate) 1110 1001 x1x0 xxxx xxxx xxxx xxxx xxxx */
+ /* LDRD (immediate) 1110 1001 x1x1 xxxx xxxx xxxx xxxx xxxx */
+ DECODE_EMULATEX (0xff400000, 0xe9400000, t32_emulate_ldrdstrd,
+ REGS(NOPCWB, NOSPPC, NOSPPC, 0, 0)),
+
+ /* TBB 1110 1000 1101 xxxx xxxx xxxx 0000 xxxx */
+ /* TBH 1110 1000 1101 xxxx xxxx xxxx 0001 xxxx */
+ DECODE_SIMULATEX(0xfff000e0, 0xe8d00000, t32_simulate_table_branch,
+ REGS(NOSP, 0, 0, 0, NOSPPC)),
+
+ /* STREX 1110 1000 0100 xxxx xxxx xxxx xxxx xxxx */
+ /* LDREX 1110 1000 0101 xxxx xxxx xxxx xxxx xxxx */
+ /* STREXB 1110 1000 1100 xxxx xxxx xxxx 0100 xxxx */
+ /* STREXH 1110 1000 1100 xxxx xxxx xxxx 0101 xxxx */
+ /* STREXD 1110 1000 1100 xxxx xxxx xxxx 0111 xxxx */
+ /* LDREXB 1110 1000 1101 xxxx xxxx xxxx 0100 xxxx */
+ /* LDREXH 1110 1000 1101 xxxx xxxx xxxx 0101 xxxx */
+ /* LDREXD 1110 1000 1101 xxxx xxxx xxxx 0111 xxxx */
+ /* And unallocated instructions... */
+ DECODE_END
+};
+
+static const union decode_item t32_table_1110_101x[] = {
+ /* Data-processing (shifted register) */
+
+ /* TST 1110 1010 0001 xxxx xxxx 1111 xxxx xxxx */
+ /* TEQ 1110 1010 1001 xxxx xxxx 1111 xxxx xxxx */
+ DECODE_EMULATEX (0xff700f00, 0xea100f00, t32_emulate_rd8rn16rm0_rwflags,
+ REGS(NOSPPC, 0, 0, 0, NOSPPC)),
+
+ /* CMN 1110 1011 0001 xxxx xxxx 1111 xxxx xxxx */
+ DECODE_OR (0xfff00f00, 0xeb100f00),
+ /* CMP 1110 1011 1011 xxxx xxxx 1111 xxxx xxxx */
+ DECODE_EMULATEX (0xfff00f00, 0xebb00f00, t32_emulate_rd8rn16rm0_rwflags,
+ REGS(NOPC, 0, 0, 0, NOSPPC)),
+
+ /* MOV 1110 1010 010x 1111 xxxx xxxx xxxx xxxx */
+ /* MVN 1110 1010 011x 1111 xxxx xxxx xxxx xxxx */
+ DECODE_EMULATEX (0xffcf0000, 0xea4f0000, t32_emulate_rd8rn16rm0_rwflags,
+ REGS(0, 0, NOSPPC, 0, NOSPPC)),
+
+ /* ??? 1110 1010 101x xxxx xxxx xxxx xxxx xxxx */
+ /* ??? 1110 1010 111x xxxx xxxx xxxx xxxx xxxx */
+ DECODE_REJECT (0xffa00000, 0xeaa00000),
+ /* ??? 1110 1011 001x xxxx xxxx xxxx xxxx xxxx */
+ DECODE_REJECT (0xffe00000, 0xeb200000),
+ /* ??? 1110 1011 100x xxxx xxxx xxxx xxxx xxxx */
+ DECODE_REJECT (0xffe00000, 0xeb800000),
+ /* ??? 1110 1011 111x xxxx xxxx xxxx xxxx xxxx */
+ DECODE_REJECT (0xffe00000, 0xebe00000),
+
+ /* ADD/SUB SP, SP, Rm, LSL #0..3 */
+ /* 1110 1011 x0xx 1101 x000 1101 xx00 xxxx */
+ DECODE_EMULATEX (0xff4f7f30, 0xeb0d0d00, t32_emulate_rd8rn16rm0_rwflags,
+ REGS(SP, 0, SP, 0, NOSPPC)),
+
+ /* ADD/SUB SP, SP, Rm, shift */
+ /* 1110 1011 x0xx 1101 xxxx 1101 xxxx xxxx */
+ DECODE_REJECT (0xff4f0f00, 0xeb0d0d00),
+
+ /* ADD/SUB Rd, SP, Rm, shift */
+ /* 1110 1011 x0xx 1101 xxxx xxxx xxxx xxxx */
+ DECODE_EMULATEX (0xff4f0000, 0xeb0d0000, t32_emulate_rd8rn16rm0_rwflags,
+ REGS(SP, 0, NOPC, 0, NOSPPC)),
+
+ /* AND 1110 1010 000x xxxx xxxx xxxx xxxx xxxx */
+ /* BIC 1110 1010 001x xxxx xxxx xxxx xxxx xxxx */
+ /* ORR 1110 1010 010x xxxx xxxx xxxx xxxx xxxx */
+ /* ORN 1110 1010 011x xxxx xxxx xxxx xxxx xxxx */
+ /* EOR 1110 1010 100x xxxx xxxx xxxx xxxx xxxx */
+ /* PKH 1110 1010 110x xxxx xxxx xxxx xxxx xxxx */
+ /* ADD 1110 1011 000x xxxx xxxx xxxx xxxx xxxx */
+ /* ADC 1110 1011 010x xxxx xxxx xxxx xxxx xxxx */
+ /* SBC 1110 1011 011x xxxx xxxx xxxx xxxx xxxx */
+ /* SUB 1110 1011 101x xxxx xxxx xxxx xxxx xxxx */
+ /* RSB 1110 1011 110x xxxx xxxx xxxx xxxx xxxx */
+ DECODE_EMULATEX (0xfe000000, 0xea000000, t32_emulate_rd8rn16rm0_rwflags,
+ REGS(NOSPPC, 0, NOSPPC, 0, NOSPPC)),
+
+ DECODE_END
+};
+
+static const union decode_item t32_table_1111_0x0x___0[] = {
+ /* Data-processing (modified immediate) */
+
+ /* TST 1111 0x00 0001 xxxx 0xxx 1111 xxxx xxxx */
+ /* TEQ 1111 0x00 1001 xxxx 0xxx 1111 xxxx xxxx */
+ DECODE_EMULATEX (0xfb708f00, 0xf0100f00, t32_emulate_rd8rn16rm0_rwflags,
+ REGS(NOSPPC, 0, 0, 0, 0)),
+
+ /* CMN 1111 0x01 0001 xxxx 0xxx 1111 xxxx xxxx */
+ DECODE_OR (0xfbf08f00, 0xf1100f00),
+ /* CMP 1111 0x01 1011 xxxx 0xxx 1111 xxxx xxxx */
+ DECODE_EMULATEX (0xfbf08f00, 0xf1b00f00, t32_emulate_rd8rn16rm0_rwflags,
+ REGS(NOPC, 0, 0, 0, 0)),
+
+ /* MOV 1111 0x00 010x 1111 0xxx xxxx xxxx xxxx */
+ /* MVN 1111 0x00 011x 1111 0xxx xxxx xxxx xxxx */
+ DECODE_EMULATEX (0xfbcf8000, 0xf04f0000, t32_emulate_rd8rn16rm0_rwflags,
+ REGS(0, 0, NOSPPC, 0, 0)),
+
+ /* ??? 1111 0x00 101x xxxx 0xxx xxxx xxxx xxxx */
+ DECODE_REJECT (0xfbe08000, 0xf0a00000),
+ /* ??? 1111 0x00 110x xxxx 0xxx xxxx xxxx xxxx */
+ /* ??? 1111 0x00 111x xxxx 0xxx xxxx xxxx xxxx */
+ DECODE_REJECT (0xfbc08000, 0xf0c00000),
+ /* ??? 1111 0x01 001x xxxx 0xxx xxxx xxxx xxxx */
+ DECODE_REJECT (0xfbe08000, 0xf1200000),
+ /* ??? 1111 0x01 100x xxxx 0xxx xxxx xxxx xxxx */
+ DECODE_REJECT (0xfbe08000, 0xf1800000),
+ /* ??? 1111 0x01 111x xxxx 0xxx xxxx xxxx xxxx */
+ DECODE_REJECT (0xfbe08000, 0xf1e00000),
+
+ /* ADD Rd, SP, #imm 1111 0x01 000x 1101 0xxx xxxx xxxx xxxx */
+ /* SUB Rd, SP, #imm 1111 0x01 101x 1101 0xxx xxxx xxxx xxxx */
+ DECODE_EMULATEX (0xfb4f8000, 0xf10d0000, t32_emulate_rd8rn16rm0_rwflags,
+ REGS(SP, 0, NOPC, 0, 0)),
+
+ /* AND 1111 0x00 000x xxxx 0xxx xxxx xxxx xxxx */
+ /* BIC 1111 0x00 001x xxxx 0xxx xxxx xxxx xxxx */
+ /* ORR 1111 0x00 010x xxxx 0xxx xxxx xxxx xxxx */
+ /* ORN 1111 0x00 011x xxxx 0xxx xxxx xxxx xxxx */
+ /* EOR 1111 0x00 100x xxxx 0xxx xxxx xxxx xxxx */
+ /* ADD 1111 0x01 000x xxxx 0xxx xxxx xxxx xxxx */
+ /* ADC 1111 0x01 010x xxxx 0xxx xxxx xxxx xxxx */
+ /* SBC 1111 0x01 011x xxxx 0xxx xxxx xxxx xxxx */
+ /* SUB 1111 0x01 101x xxxx 0xxx xxxx xxxx xxxx */
+ /* RSB 1111 0x01 110x xxxx 0xxx xxxx xxxx xxxx */
+ DECODE_EMULATEX (0xfa008000, 0xf0000000, t32_emulate_rd8rn16rm0_rwflags,
+ REGS(NOSPPC, 0, NOSPPC, 0, 0)),
+
+ DECODE_END
+};
+
+static const union decode_item t32_table_1111_0x1x___0[] = {
+ /* Data-processing (plain binary immediate) */
+
+ /* ADDW Rd, PC, #imm 1111 0x10 0000 1111 0xxx xxxx xxxx xxxx */
+ DECODE_OR (0xfbff8000, 0xf20f0000),
+ /* SUBW Rd, PC, #imm 1111 0x10 1010 1111 0xxx xxxx xxxx xxxx */
+ DECODE_EMULATEX (0xfbff8000, 0xf2af0000, t32_emulate_rd8pc16_noflags,
+ REGS(PC, 0, NOSPPC, 0, 0)),
+
+ /* ADDW SP, SP, #imm 1111 0x10 0000 1101 0xxx 1101 xxxx xxxx */
+ DECODE_OR (0xfbff8f00, 0xf20d0d00),
+ /* SUBW SP, SP, #imm 1111 0x10 1010 1101 0xxx 1101 xxxx xxxx */
+ DECODE_EMULATEX (0xfbff8f00, 0xf2ad0d00, t32_emulate_rd8rn16_noflags,
+ REGS(SP, 0, SP, 0, 0)),
+
+ /* ADDW 1111 0x10 0000 xxxx 0xxx xxxx xxxx xxxx */
+ DECODE_OR (0xfbf08000, 0xf2000000),
+ /* SUBW 1111 0x10 1010 xxxx 0xxx xxxx xxxx xxxx */
+ DECODE_EMULATEX (0xfbf08000, 0xf2a00000, t32_emulate_rd8rn16_noflags,
+ REGS(NOPCX, 0, NOSPPC, 0, 0)),
+
+ /* MOVW 1111 0x10 0100 xxxx 0xxx xxxx xxxx xxxx */
+ /* MOVT 1111 0x10 1100 xxxx 0xxx xxxx xxxx xxxx */
+ DECODE_EMULATEX (0xfb708000, 0xf2400000, t32_emulate_rd8rn16_noflags,
+ REGS(0, 0, NOSPPC, 0, 0)),
+
+ /* SSAT16 1111 0x11 0010 xxxx 0000 xxxx 00xx xxxx */
+ /* SSAT 1111 0x11 00x0 xxxx 0xxx xxxx xxxx xxxx */
+ /* USAT16 1111 0x11 1010 xxxx 0000 xxxx 00xx xxxx */
+ /* USAT 1111 0x11 10x0 xxxx 0xxx xxxx xxxx xxxx */
+ DECODE_EMULATEX (0xfb508000, 0xf3000000, t32_emulate_rd8rn16rm0_rwflags,
+ REGS(NOSPPC, 0, NOSPPC, 0, 0)),
+
+ /* SFBX 1111 0x11 0100 xxxx 0xxx xxxx xxxx xxxx */
+ /* UFBX 1111 0x11 1100 xxxx 0xxx xxxx xxxx xxxx */
+ DECODE_EMULATEX (0xfb708000, 0xf3400000, t32_emulate_rd8rn16_noflags,
+ REGS(NOSPPC, 0, NOSPPC, 0, 0)),
+
+ /* BFC 1111 0x11 0110 1111 0xxx xxxx xxxx xxxx */
+ DECODE_EMULATEX (0xfbff8000, 0xf36f0000, t32_emulate_rd8rn16_noflags,
+ REGS(0, 0, NOSPPC, 0, 0)),
+
+ /* BFI 1111 0x11 0110 xxxx 0xxx xxxx xxxx xxxx */
+ DECODE_EMULATEX (0xfbf08000, 0xf3600000, t32_emulate_rd8rn16_noflags,
+ REGS(NOSPPCX, 0, NOSPPC, 0, 0)),
+
+ DECODE_END
+};
+
+static const union decode_item t32_table_1111_0xxx___1[] = {
+ /* Branches and miscellaneous control */
+
+ /* YIELD 1111 0011 1010 xxxx 10x0 x000 0000 0001 */
+ DECODE_OR (0xfff0d7ff, 0xf3a08001),
+ /* SEV 1111 0011 1010 xxxx 10x0 x000 0000 0100 */
+ DECODE_EMULATE (0xfff0d7ff, 0xf3a08004, kprobe_emulate_none),
+ /* NOP 1111 0011 1010 xxxx 10x0 x000 0000 0000 */
+ /* WFE 1111 0011 1010 xxxx 10x0 x000 0000 0010 */
+ /* WFI 1111 0011 1010 xxxx 10x0 x000 0000 0011 */
+ DECODE_SIMULATE (0xfff0d7fc, 0xf3a08000, kprobe_simulate_nop),
+
+ /* MRS Rd, CPSR 1111 0011 1110 xxxx 10x0 xxxx xxxx xxxx */
+ DECODE_SIMULATEX(0xfff0d000, 0xf3e08000, t32_simulate_mrs,
+ REGS(0, 0, NOSPPC, 0, 0)),
+
+ /*
+ * Unsupported instructions
+ * 1111 0x11 1xxx xxxx 10x0 xxxx xxxx xxxx
+ *
+ * MSR 1111 0011 100x xxxx 10x0 xxxx xxxx xxxx
+ * DBG hint 1111 0011 1010 xxxx 10x0 x000 1111 xxxx
+ * Unallocated hints 1111 0011 1010 xxxx 10x0 x000 xxxx xxxx
+ * CPS 1111 0011 1010 xxxx 10x0 xxxx xxxx xxxx
+ * CLREX/DSB/DMB/ISB 1111 0011 1011 xxxx 10x0 xxxx xxxx xxxx
+ * BXJ 1111 0011 1100 xxxx 10x0 xxxx xxxx xxxx
+ * SUBS PC,LR,#<imm8> 1111 0011 1101 xxxx 10x0 xxxx xxxx xxxx
+ * MRS Rd, SPSR 1111 0011 1111 xxxx 10x0 xxxx xxxx xxxx
+ * SMC 1111 0111 1111 xxxx 1000 xxxx xxxx xxxx
+ * UNDEFINED 1111 0111 1111 xxxx 1010 xxxx xxxx xxxx
+ * ??? 1111 0111 1xxx xxxx 1010 xxxx xxxx xxxx
+ */
+ DECODE_REJECT (0xfb80d000, 0xf3808000),
+
+ /* Bcc 1111 0xxx xxxx xxxx 10x0 xxxx xxxx xxxx */
+ DECODE_CUSTOM (0xf800d000, 0xf0008000, t32_decode_cond_branch),
+
+ /* BLX 1111 0xxx xxxx xxxx 11x0 xxxx xxxx xxx0 */
+ DECODE_OR (0xf800d001, 0xf000c000),
+ /* B 1111 0xxx xxxx xxxx 10x1 xxxx xxxx xxxx */
+ /* BL 1111 0xxx xxxx xxxx 11x1 xxxx xxxx xxxx */
+ DECODE_SIMULATE (0xf8009000, 0xf0009000, t32_simulate_branch),
+
+ DECODE_END
+};
+
+static const union decode_item t32_table_1111_100x_x0x1__1111[] = {
+ /* Memory hints */
+
+ /* PLD (literal) 1111 1000 x001 1111 1111 xxxx xxxx xxxx */
+ /* PLI (literal) 1111 1001 x001 1111 1111 xxxx xxxx xxxx */
+ DECODE_SIMULATE (0xfe7ff000, 0xf81ff000, kprobe_simulate_nop),
+
+ /* PLD{W} (immediate) 1111 1000 10x1 xxxx 1111 xxxx xxxx xxxx */
+ DECODE_OR (0xffd0f000, 0xf890f000),
+ /* PLD{W} (immediate) 1111 1000 00x1 xxxx 1111 1100 xxxx xxxx */
+ DECODE_OR (0xffd0ff00, 0xf810fc00),
+ /* PLI (immediate) 1111 1001 1001 xxxx 1111 xxxx xxxx xxxx */
+ DECODE_OR (0xfff0f000, 0xf990f000),
+ /* PLI (immediate) 1111 1001 0001 xxxx 1111 1100 xxxx xxxx */
+ DECODE_SIMULATEX(0xfff0ff00, 0xf910fc00, kprobe_simulate_nop,
+ REGS(NOPCX, 0, 0, 0, 0)),
+
+ /* PLD{W} (register) 1111 1000 00x1 xxxx 1111 0000 00xx xxxx */
+ DECODE_OR (0xffd0ffc0, 0xf810f000),
+ /* PLI (register) 1111 1001 0001 xxxx 1111 0000 00xx xxxx */
+ DECODE_SIMULATEX(0xfff0ffc0, 0xf910f000, kprobe_simulate_nop,
+ REGS(NOPCX, 0, 0, 0, NOSPPC)),
+
+ /* Other unallocated instructions... */
+ DECODE_END
+};
+
+static const union decode_item t32_table_1111_100x[] = {
+ /* Store/Load single data item */
+
+ /* ??? 1111 100x x11x xxxx xxxx xxxx xxxx xxxx */
+ DECODE_REJECT (0xfe600000, 0xf8600000),
+
+ /* ??? 1111 1001 0101 xxxx xxxx xxxx xxxx xxxx */
+ DECODE_REJECT (0xfff00000, 0xf9500000),
+
+ /* ??? 1111 100x 0xxx xxxx xxxx 10x0 xxxx xxxx */
+ DECODE_REJECT (0xfe800d00, 0xf8000800),
+
+ /* STRBT 1111 1000 0000 xxxx xxxx 1110 xxxx xxxx */
+ /* STRHT 1111 1000 0010 xxxx xxxx 1110 xxxx xxxx */
+ /* STRT 1111 1000 0100 xxxx xxxx 1110 xxxx xxxx */
+ /* LDRBT 1111 1000 0001 xxxx xxxx 1110 xxxx xxxx */
+ /* LDRSBT 1111 1001 0001 xxxx xxxx 1110 xxxx xxxx */
+ /* LDRHT 1111 1000 0011 xxxx xxxx 1110 xxxx xxxx */
+ /* LDRSHT 1111 1001 0011 xxxx xxxx 1110 xxxx xxxx */
+ /* LDRT 1111 1000 0101 xxxx xxxx 1110 xxxx xxxx */
+ DECODE_REJECT (0xfe800f00, 0xf8000e00),
+
+ /* STR{,B,H} Rn,[PC...] 1111 1000 xxx0 1111 xxxx xxxx xxxx xxxx */
+ DECODE_REJECT (0xff1f0000, 0xf80f0000),
+
+ /* STR{,B,H} PC,[Rn...] 1111 1000 xxx0 xxxx 1111 xxxx xxxx xxxx */
+ DECODE_REJECT (0xff10f000, 0xf800f000),
+
+ /* LDR (literal) 1111 1000 x101 1111 xxxx xxxx xxxx xxxx */
+ DECODE_SIMULATEX(0xff7f0000, 0xf85f0000, t32_simulate_ldr_literal,
+ REGS(PC, ANY, 0, 0, 0)),
+
+ /* STR (immediate) 1111 1000 0100 xxxx xxxx 1xxx xxxx xxxx */
+ /* LDR (immediate) 1111 1000 0101 xxxx xxxx 1xxx xxxx xxxx */
+ DECODE_OR (0xffe00800, 0xf8400800),
+ /* STR (immediate) 1111 1000 1100 xxxx xxxx xxxx xxxx xxxx */
+ /* LDR (immediate) 1111 1000 1101 xxxx xxxx xxxx xxxx xxxx */
+ DECODE_EMULATEX (0xffe00000, 0xf8c00000, t32_emulate_ldrstr,
+ REGS(NOPCX, ANY, 0, 0, 0)),
+
+ /* STR (register) 1111 1000 0100 xxxx xxxx 0000 00xx xxxx */
+ /* LDR (register) 1111 1000 0101 xxxx xxxx 0000 00xx xxxx */
+ DECODE_EMULATEX (0xffe00fc0, 0xf8400000, t32_emulate_ldrstr,
+ REGS(NOPCX, ANY, 0, 0, NOSPPC)),
+
+ /* LDRB (literal) 1111 1000 x001 1111 xxxx xxxx xxxx xxxx */
+ /* LDRSB (literal) 1111 1001 x001 1111 xxxx xxxx xxxx xxxx */
+ /* LDRH (literal) 1111 1000 x011 1111 xxxx xxxx xxxx xxxx */
+ /* LDRSH (literal) 1111 1001 x011 1111 xxxx xxxx xxxx xxxx */
+ DECODE_EMULATEX (0xfe5f0000, 0xf81f0000, t32_simulate_ldr_literal,
+ REGS(PC, NOSPPCX, 0, 0, 0)),
+
+ /* STRB (immediate) 1111 1000 0000 xxxx xxxx 1xxx xxxx xxxx */
+ /* STRH (immediate) 1111 1000 0010 xxxx xxxx 1xxx xxxx xxxx */
+ /* LDRB (immediate) 1111 1000 0001 xxxx xxxx 1xxx xxxx xxxx */
+ /* LDRSB (immediate) 1111 1001 0001 xxxx xxxx 1xxx xxxx xxxx */
+ /* LDRH (immediate) 1111 1000 0011 xxxx xxxx 1xxx xxxx xxxx */
+ /* LDRSH (immediate) 1111 1001 0011 xxxx xxxx 1xxx xxxx xxxx */
+ DECODE_OR (0xfec00800, 0xf8000800),
+ /* STRB (immediate) 1111 1000 1000 xxxx xxxx xxxx xxxx xxxx */
+ /* STRH (immediate) 1111 1000 1010 xxxx xxxx xxxx xxxx xxxx */
+ /* LDRB (immediate) 1111 1000 1001 xxxx xxxx xxxx xxxx xxxx */
+ /* LDRSB (immediate) 1111 1001 1001 xxxx xxxx xxxx xxxx xxxx */
+ /* LDRH (immediate) 1111 1000 1011 xxxx xxxx xxxx xxxx xxxx */
+ /* LDRSH (immediate) 1111 1001 1011 xxxx xxxx xxxx xxxx xxxx */
+ DECODE_EMULATEX (0xfec00000, 0xf8800000, t32_emulate_ldrstr,
+ REGS(NOPCX, NOSPPCX, 0, 0, 0)),
+
+ /* STRB (register) 1111 1000 0000 xxxx xxxx 0000 00xx xxxx */
+ /* STRH (register) 1111 1000 0010 xxxx xxxx 0000 00xx xxxx */
+ /* LDRB (register) 1111 1000 0001 xxxx xxxx 0000 00xx xxxx */
+ /* LDRSB (register) 1111 1001 0001 xxxx xxxx 0000 00xx xxxx */
+ /* LDRH (register) 1111 1000 0011 xxxx xxxx 0000 00xx xxxx */
+ /* LDRSH (register) 1111 1001 0011 xxxx xxxx 0000 00xx xxxx */
+ DECODE_EMULATEX (0xfe800fc0, 0xf8000000, t32_emulate_ldrstr,
+ REGS(NOPCX, NOSPPCX, 0, 0, NOSPPC)),
+
+ /* Other unallocated instructions... */
+ DECODE_END
+};
+
+static const union decode_item t32_table_1111_1010___1111[] = {
+ /* Data-processing (register) */
+
+ /* ??? 1111 1010 011x xxxx 1111 xxxx 1xxx xxxx */
+ DECODE_REJECT (0xffe0f080, 0xfa60f080),
+
+ /* SXTH 1111 1010 0000 1111 1111 xxxx 1xxx xxxx */
+ /* UXTH 1111 1010 0001 1111 1111 xxxx 1xxx xxxx */
+ /* SXTB16 1111 1010 0010 1111 1111 xxxx 1xxx xxxx */
+ /* UXTB16 1111 1010 0011 1111 1111 xxxx 1xxx xxxx */
+ /* SXTB 1111 1010 0100 1111 1111 xxxx 1xxx xxxx */
+ /* UXTB 1111 1010 0101 1111 1111 xxxx 1xxx xxxx */
+ DECODE_EMULATEX (0xff8ff080, 0xfa0ff080, t32_emulate_rd8rn16rm0_rwflags,
+ REGS(0, 0, NOSPPC, 0, NOSPPC)),
+
+
+ /* ??? 1111 1010 1xxx xxxx 1111 xxxx 0x11 xxxx */
+ DECODE_REJECT (0xff80f0b0, 0xfa80f030),
+ /* ??? 1111 1010 1x11 xxxx 1111 xxxx 0xxx xxxx */
+ DECODE_REJECT (0xffb0f080, 0xfab0f000),
+
+ /* SADD16 1111 1010 1001 xxxx 1111 xxxx 0000 xxxx */
+ /* SASX 1111 1010 1010 xxxx 1111 xxxx 0000 xxxx */
+ /* SSAX 1111 1010 1110 xxxx 1111 xxxx 0000 xxxx */
+ /* SSUB16 1111 1010 1101 xxxx 1111 xxxx 0000 xxxx */
+ /* SADD8 1111 1010 1000 xxxx 1111 xxxx 0000 xxxx */
+ /* SSUB8 1111 1010 1100 xxxx 1111 xxxx 0000 xxxx */
+
+ /* QADD16 1111 1010 1001 xxxx 1111 xxxx 0001 xxxx */
+ /* QASX 1111 1010 1010 xxxx 1111 xxxx 0001 xxxx */
+ /* QSAX 1111 1010 1110 xxxx 1111 xxxx 0001 xxxx */
+ /* QSUB16 1111 1010 1101 xxxx 1111 xxxx 0001 xxxx */
+ /* QADD8 1111 1010 1000 xxxx 1111 xxxx 0001 xxxx */
+ /* QSUB8 1111 1010 1100 xxxx 1111 xxxx 0001 xxxx */
+
+ /* SHADD16 1111 1010 1001 xxxx 1111 xxxx 0010 xxxx */
+ /* SHASX 1111 1010 1010 xxxx 1111 xxxx 0010 xxxx */
+ /* SHSAX 1111 1010 1110 xxxx 1111 xxxx 0010 xxxx */
+ /* SHSUB16 1111 1010 1101 xxxx 1111 xxxx 0010 xxxx */
+ /* SHADD8 1111 1010 1000 xxxx 1111 xxxx 0010 xxxx */
+ /* SHSUB8 1111 1010 1100 xxxx 1111 xxxx 0010 xxxx */
+
+ /* UADD16 1111 1010 1001 xxxx 1111 xxxx 0100 xxxx */
+ /* UASX 1111 1010 1010 xxxx 1111 xxxx 0100 xxxx */
+ /* USAX 1111 1010 1110 xxxx 1111 xxxx 0100 xxxx */
+ /* USUB16 1111 1010 1101 xxxx 1111 xxxx 0100 xxxx */
+ /* UADD8 1111 1010 1000 xxxx 1111 xxxx 0100 xxxx */
+ /* USUB8 1111 1010 1100 xxxx 1111 xxxx 0100 xxxx */
+
+ /* UQADD16 1111 1010 1001 xxxx 1111 xxxx 0101 xxxx */
+ /* UQASX 1111 1010 1010 xxxx 1111 xxxx 0101 xxxx */
+ /* UQSAX 1111 1010 1110 xxxx 1111 xxxx 0101 xxxx */
+ /* UQSUB16 1111 1010 1101 xxxx 1111 xxxx 0101 xxxx */
+ /* UQADD8 1111 1010 1000 xxxx 1111 xxxx 0101 xxxx */
+ /* UQSUB8 1111 1010 1100 xxxx 1111 xxxx 0101 xxxx */
+
+ /* UHADD16 1111 1010 1001 xxxx 1111 xxxx 0110 xxxx */
+ /* UHASX 1111 1010 1010 xxxx 1111 xxxx 0110 xxxx */
+ /* UHSAX 1111 1010 1110 xxxx 1111 xxxx 0110 xxxx */
+ /* UHSUB16 1111 1010 1101 xxxx 1111 xxxx 0110 xxxx */
+ /* UHADD8 1111 1010 1000 xxxx 1111 xxxx 0110 xxxx */
+ /* UHSUB8 1111 1010 1100 xxxx 1111 xxxx 0110 xxxx */
+ DECODE_OR (0xff80f080, 0xfa80f000),
+
+ /* SXTAH 1111 1010 0000 xxxx 1111 xxxx 1xxx xxxx */
+ /* UXTAH 1111 1010 0001 xxxx 1111 xxxx 1xxx xxxx */
+ /* SXTAB16 1111 1010 0010 xxxx 1111 xxxx 1xxx xxxx */
+ /* UXTAB16 1111 1010 0011 xxxx 1111 xxxx 1xxx xxxx */
+ /* SXTAB 1111 1010 0100 xxxx 1111 xxxx 1xxx xxxx */
+ /* UXTAB 1111 1010 0101 xxxx 1111 xxxx 1xxx xxxx */
+ DECODE_OR (0xff80f080, 0xfa00f080),
+
+ /* QADD 1111 1010 1000 xxxx 1111 xxxx 1000 xxxx */
+ /* QDADD 1111 1010 1000 xxxx 1111 xxxx 1001 xxxx */
+ /* QSUB 1111 1010 1000 xxxx 1111 xxxx 1010 xxxx */
+ /* QDSUB 1111 1010 1000 xxxx 1111 xxxx 1011 xxxx */
+ DECODE_OR (0xfff0f0c0, 0xfa80f080),
+
+ /* SEL 1111 1010 1010 xxxx 1111 xxxx 1000 xxxx */
+ DECODE_OR (0xfff0f0f0, 0xfaa0f080),
+
+ /* LSL 1111 1010 000x xxxx 1111 xxxx 0000 xxxx */
+ /* LSR 1111 1010 001x xxxx 1111 xxxx 0000 xxxx */
+ /* ASR 1111 1010 010x xxxx 1111 xxxx 0000 xxxx */
+ /* ROR 1111 1010 011x xxxx 1111 xxxx 0000 xxxx */
+ DECODE_EMULATEX (0xff80f0f0, 0xfa00f000, t32_emulate_rd8rn16rm0_rwflags,
+ REGS(NOSPPC, 0, NOSPPC, 0, NOSPPC)),
+
+ /* CLZ 1111 1010 1010 xxxx 1111 xxxx 1000 xxxx */
+ DECODE_OR (0xfff0f0f0, 0xfab0f080),
+
+ /* REV 1111 1010 1001 xxxx 1111 xxxx 1000 xxxx */
+ /* REV16 1111 1010 1001 xxxx 1111 xxxx 1001 xxxx */
+ /* RBIT 1111 1010 1001 xxxx 1111 xxxx 1010 xxxx */
+ /* REVSH 1111 1010 1001 xxxx 1111 xxxx 1011 xxxx */
+ DECODE_EMULATEX (0xfff0f0c0, 0xfa90f080, t32_emulate_rd8rn16_noflags,
+ REGS(NOSPPC, 0, NOSPPC, 0, SAMEAS16)),
+
+ /* Other unallocated instructions... */
+ DECODE_END
+};
+
+static const union decode_item t32_table_1111_1011_0[] = {
+ /* Multiply, multiply accumulate, and absolute difference */
+
+ /* ??? 1111 1011 0000 xxxx 1111 xxxx 0001 xxxx */
+ DECODE_REJECT (0xfff0f0f0, 0xfb00f010),
+ /* ??? 1111 1011 0111 xxxx 1111 xxxx 0001 xxxx */
+ DECODE_REJECT (0xfff0f0f0, 0xfb70f010),
+
+ /* SMULxy 1111 1011 0001 xxxx 1111 xxxx 00xx xxxx */
+ DECODE_OR (0xfff0f0c0, 0xfb10f000),
+ /* MUL 1111 1011 0000 xxxx 1111 xxxx 0000 xxxx */
+ /* SMUAD{X} 1111 1011 0010 xxxx 1111 xxxx 000x xxxx */
+ /* SMULWy 1111 1011 0011 xxxx 1111 xxxx 000x xxxx */
+ /* SMUSD{X} 1111 1011 0100 xxxx 1111 xxxx 000x xxxx */
+ /* SMMUL{R} 1111 1011 0101 xxxx 1111 xxxx 000x xxxx */
+ /* USAD8 1111 1011 0111 xxxx 1111 xxxx 0000 xxxx */
+ DECODE_EMULATEX (0xff80f0e0, 0xfb00f000, t32_emulate_rd8rn16rm0_rwflags,
+ REGS(NOSPPC, 0, NOSPPC, 0, NOSPPC)),
+
+ /* ??? 1111 1011 0111 xxxx xxxx xxxx 0001 xxxx */
+ DECODE_REJECT (0xfff000f0, 0xfb700010),
+
+ /* SMLAxy 1111 1011 0001 xxxx xxxx xxxx 00xx xxxx */
+ DECODE_OR (0xfff000c0, 0xfb100000),
+ /* MLA 1111 1011 0000 xxxx xxxx xxxx 0000 xxxx */
+ /* MLS 1111 1011 0000 xxxx xxxx xxxx 0001 xxxx */
+ /* SMLAD{X} 1111 1011 0010 xxxx xxxx xxxx 000x xxxx */
+ /* SMLAWy 1111 1011 0011 xxxx xxxx xxxx 000x xxxx */
+ /* SMLSD{X} 1111 1011 0100 xxxx xxxx xxxx 000x xxxx */
+ /* SMMLA{R} 1111 1011 0101 xxxx xxxx xxxx 000x xxxx */
+ /* SMMLS{R} 1111 1011 0110 xxxx xxxx xxxx 000x xxxx */
+ /* USADA8 1111 1011 0111 xxxx xxxx xxxx 0000 xxxx */
+ DECODE_EMULATEX (0xff8000c0, 0xfb000000, t32_emulate_rd8rn16rm0ra12_noflags,
+ REGS(NOSPPC, NOSPPCX, NOSPPC, 0, NOSPPC)),
+
+ /* Other unallocated instructions... */
+ DECODE_END
+};
+
+static const union decode_item t32_table_1111_1011_1[] = {
+ /* Long multiply, long multiply accumulate, and divide */
+
+ /* UMAAL 1111 1011 1110 xxxx xxxx xxxx 0110 xxxx */
+ DECODE_OR (0xfff000f0, 0xfbe00060),
+ /* SMLALxy 1111 1011 1100 xxxx xxxx xxxx 10xx xxxx */
+ DECODE_OR (0xfff000c0, 0xfbc00080),
+ /* SMLALD{X} 1111 1011 1100 xxxx xxxx xxxx 110x xxxx */
+ /* SMLSLD{X} 1111 1011 1101 xxxx xxxx xxxx 110x xxxx */
+ DECODE_OR (0xffe000e0, 0xfbc000c0),
+ /* SMULL 1111 1011 1000 xxxx xxxx xxxx 0000 xxxx */
+ /* UMULL 1111 1011 1010 xxxx xxxx xxxx 0000 xxxx */
+ /* SMLAL 1111 1011 1100 xxxx xxxx xxxx 0000 xxxx */
+ /* UMLAL 1111 1011 1110 xxxx xxxx xxxx 0000 xxxx */
+ DECODE_EMULATEX (0xff9000f0, 0xfb800000, t32_emulate_rdlo12rdhi8rn16rm0_noflags,
+ REGS(NOSPPC, NOSPPC, NOSPPC, 0, NOSPPC)),
+
+ /* SDIV 1111 1011 1001 xxxx xxxx xxxx 1111 xxxx */
+ /* UDIV 1111 1011 1011 xxxx xxxx xxxx 1111 xxxx */
+ /* Other unallocated instructions... */
+ DECODE_END
+};
+
+const union decode_item kprobe_decode_thumb32_table[] = {
+
+ /*
+ * Load/store multiple instructions
+ * 1110 100x x0xx xxxx xxxx xxxx xxxx xxxx
+ */
+ DECODE_TABLE (0xfe400000, 0xe8000000, t32_table_1110_100x_x0xx),
+
+ /*
+ * Load/store dual, load/store exclusive, table branch
+ * 1110 100x x1xx xxxx xxxx xxxx xxxx xxxx
+ */
+ DECODE_TABLE (0xfe400000, 0xe8400000, t32_table_1110_100x_x1xx),
+
+ /*
+ * Data-processing (shifted register)
+ * 1110 101x xxxx xxxx xxxx xxxx xxxx xxxx
+ */
+ DECODE_TABLE (0xfe000000, 0xea000000, t32_table_1110_101x),
+
+ /*
+ * Coprocessor instructions
+ * 1110 11xx xxxx xxxx xxxx xxxx xxxx xxxx
+ */
+ DECODE_REJECT (0xfc000000, 0xec000000),
+
+ /*
+ * Data-processing (modified immediate)
+ * 1111 0x0x xxxx xxxx 0xxx xxxx xxxx xxxx
+ */
+ DECODE_TABLE (0xfa008000, 0xf0000000, t32_table_1111_0x0x___0),
+
+ /*
+ * Data-processing (plain binary immediate)
+ * 1111 0x1x xxxx xxxx 0xxx xxxx xxxx xxxx
+ */
+ DECODE_TABLE (0xfa008000, 0xf2000000, t32_table_1111_0x1x___0),
+
+ /*
+ * Branches and miscellaneous control
+ * 1111 0xxx xxxx xxxx 1xxx xxxx xxxx xxxx
+ */
+ DECODE_TABLE (0xf8008000, 0xf0008000, t32_table_1111_0xxx___1),
+
+ /*
+ * Advanced SIMD element or structure load/store instructions
+ * 1111 1001 xxx0 xxxx xxxx xxxx xxxx xxxx
+ */
+ DECODE_REJECT (0xff100000, 0xf9000000),
+
+ /*
+ * Memory hints
+ * 1111 100x x0x1 xxxx 1111 xxxx xxxx xxxx
+ */
+ DECODE_TABLE (0xfe50f000, 0xf810f000, t32_table_1111_100x_x0x1__1111),
+
+ /*
+ * Store single data item
+ * 1111 1000 xxx0 xxxx xxxx xxxx xxxx xxxx
+ * Load single data items
+ * 1111 100x xxx1 xxxx xxxx xxxx xxxx xxxx
+ */
+ DECODE_TABLE (0xfe000000, 0xf8000000, t32_table_1111_100x),
+
+ /*
+ * Data-processing (register)
+ * 1111 1010 xxxx xxxx 1111 xxxx xxxx xxxx
+ */
+ DECODE_TABLE (0xff00f000, 0xfa00f000, t32_table_1111_1010___1111),
+
+ /*
+ * Multiply, multiply accumulate, and absolute difference
+ * 1111 1011 0xxx xxxx xxxx xxxx xxxx xxxx
+ */
+ DECODE_TABLE (0xff800000, 0xfb000000, t32_table_1111_1011_0),
+
+ /*
+ * Long multiply, long multiply accumulate, and divide
+ * 1111 1011 1xxx xxxx xxxx xxxx xxxx xxxx
+ */
+ DECODE_TABLE (0xff800000, 0xfb800000, t32_table_1111_1011_1),
+
+ /*
+ * Coprocessor instructions
+ * 1111 11xx xxxx xxxx xxxx xxxx xxxx xxxx
+ */
+ DECODE_END
+};
+
+static void __kprobes
+t16_simulate_bxblx(struct kprobe *p, struct pt_regs *regs)
+{
+ kprobe_opcode_t insn = p->opcode;
+ unsigned long pc = thumb_probe_pc(p);
+ int rm = (insn >> 3) & 0xf;
+ unsigned long rmv = (rm == 15) ? pc : regs->uregs[rm];
+
+ if (insn & (1 << 7)) /* BLX ? */
+ regs->ARM_lr = (unsigned long)p->addr + 2;
+
+ bx_write_pc(rmv, regs);
+}
+
+static void __kprobes
+t16_simulate_ldr_literal(struct kprobe *p, struct pt_regs *regs)
+{
+ kprobe_opcode_t insn = p->opcode;
+ unsigned long* base = (unsigned long *)(thumb_probe_pc(p) & ~3);
+ long index = insn & 0xff;
+ int rt = (insn >> 8) & 0x7;
+ regs->uregs[rt] = base[index];
+}
+
+static void __kprobes
+t16_simulate_ldrstr_sp_relative(struct kprobe *p, struct pt_regs *regs)
+{
+ kprobe_opcode_t insn = p->opcode;
+ unsigned long* base = (unsigned long *)regs->ARM_sp;
+ long index = insn & 0xff;
+ int rt = (insn >> 8) & 0x7;
+ if (insn & 0x800) /* LDR */
+ regs->uregs[rt] = base[index];
+ else /* STR */
+ base[index] = regs->uregs[rt];
+}
+
+static void __kprobes
+t16_simulate_reladr(struct kprobe *p, struct pt_regs *regs)
+{
+ kprobe_opcode_t insn = p->opcode;
+ unsigned long base = (insn & 0x800) ? regs->ARM_sp
+ : (thumb_probe_pc(p) & ~3);
+ long offset = insn & 0xff;
+ int rt = (insn >> 8) & 0x7;
+ regs->uregs[rt] = base + offset * 4;
+}
+
+static void __kprobes
+t16_simulate_add_sp_imm(struct kprobe *p, struct pt_regs *regs)
+{
+ kprobe_opcode_t insn = p->opcode;
+ long imm = insn & 0x7f;
+ if (insn & 0x80) /* SUB */
+ regs->ARM_sp -= imm * 4;
+ else /* ADD */
+ regs->ARM_sp += imm * 4;
+}
+
+static void __kprobes
+t16_simulate_cbz(struct kprobe *p, struct pt_regs *regs)
+{
+ kprobe_opcode_t insn = p->opcode;
+ int rn = insn & 0x7;
+ kprobe_opcode_t nonzero = regs->uregs[rn] ? insn : ~insn;
+ if (nonzero & 0x800) {
+ long i = insn & 0x200;
+ long imm5 = insn & 0xf8;
+ unsigned long pc = thumb_probe_pc(p);
+ regs->ARM_pc = pc + (i >> 3) + (imm5 >> 2);
+ }
+}
+
+static void __kprobes
+t16_simulate_it(struct kprobe *p, struct pt_regs *regs)
+{
+ /*
+ * The 8 IT state bits are split into two parts in CPSR:
+ * ITSTATE<1:0> are in CPSR<26:25>
+ * ITSTATE<7:2> are in CPSR<15:10>
+ * The new IT state is in the lower byte of insn.
+ */
+ kprobe_opcode_t insn = p->opcode;
+ unsigned long cpsr = regs->ARM_cpsr;
+ cpsr &= ~PSR_IT_MASK;
+ cpsr |= (insn & 0xfc) << 8;
+ cpsr |= (insn & 0x03) << 25;
+ regs->ARM_cpsr = cpsr;
+}
+
+static void __kprobes
+t16_singlestep_it(struct kprobe *p, struct pt_regs *regs)
+{
+ regs->ARM_pc += 2;
+ t16_simulate_it(p, regs);
+}
+
+static enum kprobe_insn __kprobes
+t16_decode_it(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+{
+ asi->insn_singlestep = t16_singlestep_it;
+ return INSN_GOOD_NO_SLOT;
+}
+
+static void __kprobes
+t16_simulate_cond_branch(struct kprobe *p, struct pt_regs *regs)
+{
+ kprobe_opcode_t insn = p->opcode;
+ unsigned long pc = thumb_probe_pc(p);
+ long offset = insn & 0x7f;
+ offset -= insn & 0x80; /* Apply sign bit */
+ regs->ARM_pc = pc + (offset * 2);
+}
+
+static enum kprobe_insn __kprobes
+t16_decode_cond_branch(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+{
+ int cc = (insn >> 8) & 0xf;
+ asi->insn_check_cc = kprobe_condition_checks[cc];
+ asi->insn_handler = t16_simulate_cond_branch;
+ return INSN_GOOD_NO_SLOT;
+}
+
+static void __kprobes
+t16_simulate_branch(struct kprobe *p, struct pt_regs *regs)
+{
+ kprobe_opcode_t insn = p->opcode;
+ unsigned long pc = thumb_probe_pc(p);
+ long offset = insn & 0x3ff;
+ offset -= insn & 0x400; /* Apply sign bit */
+ regs->ARM_pc = pc + (offset * 2);
+}
+
+static unsigned long __kprobes
+t16_emulate_loregs(struct kprobe *p, struct pt_regs *regs)
+{
+ unsigned long oldcpsr = regs->ARM_cpsr;
+ unsigned long newcpsr;
+
+ __asm__ __volatile__ (
+ "msr cpsr_fs, %[oldcpsr] \n\t"
+ "ldmia %[regs], {r0-r7} \n\t"
+ "blx %[fn] \n\t"
+ "stmia %[regs], {r0-r7} \n\t"
+ "mrs %[newcpsr], cpsr \n\t"
+ : [newcpsr] "=r" (newcpsr)
+ : [oldcpsr] "r" (oldcpsr), [regs] "r" (regs),
+ [fn] "r" (p->ainsn.insn_fn)
+ : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "lr", "memory", "cc"
+ );
+
+ return (oldcpsr & ~APSR_MASK) | (newcpsr & APSR_MASK);
+}
+
+static void __kprobes
+t16_emulate_loregs_rwflags(struct kprobe *p, struct pt_regs *regs)
+{
+ regs->ARM_cpsr = t16_emulate_loregs(p, regs);
+}
+
+static void __kprobes
+t16_emulate_loregs_noitrwflags(struct kprobe *p, struct pt_regs *regs)
+{
+ unsigned long cpsr = t16_emulate_loregs(p, regs);
+ if (!in_it_block(cpsr))
+ regs->ARM_cpsr = cpsr;
+}
+
+static void __kprobes
+t16_emulate_hiregs(struct kprobe *p, struct pt_regs *regs)
+{
+ kprobe_opcode_t insn = p->opcode;
+ unsigned long pc = thumb_probe_pc(p);
+ int rdn = (insn & 0x7) | ((insn & 0x80) >> 4);
+ int rm = (insn >> 3) & 0xf;
+
+ register unsigned long rdnv asm("r1");
+ register unsigned long rmv asm("r0");
+ unsigned long cpsr = regs->ARM_cpsr;
+
+ rdnv = (rdn == 15) ? pc : regs->uregs[rdn];
+ rmv = (rm == 15) ? pc : regs->uregs[rm];
+
+ __asm__ __volatile__ (
+ "msr cpsr_fs, %[cpsr] \n\t"
+ "blx %[fn] \n\t"
+ "mrs %[cpsr], cpsr \n\t"
+ : "=r" (rdnv), [cpsr] "=r" (cpsr)
+ : "0" (rdnv), "r" (rmv), "1" (cpsr), [fn] "r" (p->ainsn.insn_fn)
+ : "lr", "memory", "cc"
+ );
+
+ if (rdn == 15)
+ rdnv &= ~1;
+
+ regs->uregs[rdn] = rdnv;
+ regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK);
+}
+
+static enum kprobe_insn __kprobes
+t16_decode_hiregs(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+{
+ insn &= ~0x00ff;
+ insn |= 0x001; /* Set Rdn = R1 and Rm = R0 */
+ ((u16 *)asi->insn)[0] = insn;
+ asi->insn_handler = t16_emulate_hiregs;
+ return INSN_GOOD;
+}
+
+static void __kprobes
+t16_emulate_push(struct kprobe *p, struct pt_regs *regs)
+{
+ __asm__ __volatile__ (
+ "ldr r9, [%[regs], #13*4] \n\t"
+ "ldr r8, [%[regs], #14*4] \n\t"
+ "ldmia %[regs], {r0-r7} \n\t"
+ "blx %[fn] \n\t"
+ "str r9, [%[regs], #13*4] \n\t"
+ :
+ : [regs] "r" (regs), [fn] "r" (p->ainsn.insn_fn)
+ : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9",
+ "lr", "memory", "cc"
+ );
+}
+
+static enum kprobe_insn __kprobes
+t16_decode_push(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+{
+ /*
+ * To simulate a PUSH we use a Thumb-2 "STMDB R9!, {registers}"
+ * and call it with R9=SP and LR in the register list represented
+ * by R8.
+ */
+ ((u16 *)asi->insn)[0] = 0xe929; /* 1st half STMDB R9!,{} */
+ ((u16 *)asi->insn)[1] = insn & 0x1ff; /* 2nd half (register list) */
+ asi->insn_handler = t16_emulate_push;
+ return INSN_GOOD;
+}
+
+static void __kprobes
+t16_emulate_pop_nopc(struct kprobe *p, struct pt_regs *regs)
+{
+ __asm__ __volatile__ (
+ "ldr r9, [%[regs], #13*4] \n\t"
+ "ldmia %[regs], {r0-r7} \n\t"
+ "blx %[fn] \n\t"
+ "stmia %[regs], {r0-r7} \n\t"
+ "str r9, [%[regs], #13*4] \n\t"
+ :
+ : [regs] "r" (regs), [fn] "r" (p->ainsn.insn_fn)
+ : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r9",
+ "lr", "memory", "cc"
+ );
+}
+
+static void __kprobes
+t16_emulate_pop_pc(struct kprobe *p, struct pt_regs *regs)
+{
+ register unsigned long pc asm("r8");
+
+ __asm__ __volatile__ (
+ "ldr r9, [%[regs], #13*4] \n\t"
+ "ldmia %[regs], {r0-r7} \n\t"
+ "blx %[fn] \n\t"
+ "stmia %[regs], {r0-r7} \n\t"
+ "str r9, [%[regs], #13*4] \n\t"
+ : "=r" (pc)
+ : [regs] "r" (regs), [fn] "r" (p->ainsn.insn_fn)
+ : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r9",
+ "lr", "memory", "cc"
+ );
+
+ bx_write_pc(pc, regs);
+}
+
+static enum kprobe_insn __kprobes
+t16_decode_pop(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+{
+ /*
+ * To simulate a POP we use a Thumb-2 "LDMDB R9!, {registers}"
+ * and call it with R9=SP and PC in the register list represented
+ * by R8.
+ */
+ ((u16 *)asi->insn)[0] = 0xe8b9; /* 1st half LDMIA R9!,{} */
+ ((u16 *)asi->insn)[1] = insn & 0x1ff; /* 2nd half (register list) */
+ asi->insn_handler = insn & 0x100 ? t16_emulate_pop_pc
+ : t16_emulate_pop_nopc;
+ return INSN_GOOD;
+}
+
+static const union decode_item t16_table_1011[] = {
+ /* Miscellaneous 16-bit instructions */
+
+ /* ADD (SP plus immediate) 1011 0000 0xxx xxxx */
+ /* SUB (SP minus immediate) 1011 0000 1xxx xxxx */
+ DECODE_SIMULATE (0xff00, 0xb000, t16_simulate_add_sp_imm),
+
+ /* CBZ 1011 00x1 xxxx xxxx */
+ /* CBNZ 1011 10x1 xxxx xxxx */
+ DECODE_SIMULATE (0xf500, 0xb100, t16_simulate_cbz),
+
+ /* SXTH 1011 0010 00xx xxxx */
+ /* SXTB 1011 0010 01xx xxxx */
+ /* UXTH 1011 0010 10xx xxxx */
+ /* UXTB 1011 0010 11xx xxxx */
+ /* REV 1011 1010 00xx xxxx */
+ /* REV16 1011 1010 01xx xxxx */
+ /* ??? 1011 1010 10xx xxxx */
+ /* REVSH 1011 1010 11xx xxxx */
+ DECODE_REJECT (0xffc0, 0xba80),
+ DECODE_EMULATE (0xf500, 0xb000, t16_emulate_loregs_rwflags),
+
+ /* PUSH 1011 010x xxxx xxxx */
+ DECODE_CUSTOM (0xfe00, 0xb400, t16_decode_push),
+ /* POP 1011 110x xxxx xxxx */
+ DECODE_CUSTOM (0xfe00, 0xbc00, t16_decode_pop),
+
+ /*
+ * If-Then, and hints
+ * 1011 1111 xxxx xxxx
+ */
+
+ /* YIELD 1011 1111 0001 0000 */
+ DECODE_OR (0xffff, 0xbf10),
+ /* SEV 1011 1111 0100 0000 */
+ DECODE_EMULATE (0xffff, 0xbf40, kprobe_emulate_none),
+ /* NOP 1011 1111 0000 0000 */
+ /* WFE 1011 1111 0010 0000 */
+ /* WFI 1011 1111 0011 0000 */
+ DECODE_SIMULATE (0xffcf, 0xbf00, kprobe_simulate_nop),
+ /* Unassigned hints 1011 1111 xxxx 0000 */
+ DECODE_REJECT (0xff0f, 0xbf00),
+ /* IT 1011 1111 xxxx xxxx */
+ DECODE_CUSTOM (0xff00, 0xbf00, t16_decode_it),
+
+ /* SETEND 1011 0110 010x xxxx */
+ /* CPS 1011 0110 011x xxxx */
+ /* BKPT 1011 1110 xxxx xxxx */
+ /* And unallocated instructions... */
+ DECODE_END
+};
+
+const union decode_item kprobe_decode_thumb16_table[] = {
+
+ /*
+ * Shift (immediate), add, subtract, move, and compare
+ * 00xx xxxx xxxx xxxx
+ */
+
+ /* CMP (immediate) 0010 1xxx xxxx xxxx */
+ DECODE_EMULATE (0xf800, 0x2800, t16_emulate_loregs_rwflags),
+
+ /* ADD (register) 0001 100x xxxx xxxx */
+ /* SUB (register) 0001 101x xxxx xxxx */
+ /* LSL (immediate) 0000 0xxx xxxx xxxx */
+ /* LSR (immediate) 0000 1xxx xxxx xxxx */
+ /* ASR (immediate) 0001 0xxx xxxx xxxx */
+ /* ADD (immediate, Thumb) 0001 110x xxxx xxxx */
+ /* SUB (immediate, Thumb) 0001 111x xxxx xxxx */
+ /* MOV (immediate) 0010 0xxx xxxx xxxx */
+ /* ADD (immediate, Thumb) 0011 0xxx xxxx xxxx */
+ /* SUB (immediate, Thumb) 0011 1xxx xxxx xxxx */
+ DECODE_EMULATE (0xc000, 0x0000, t16_emulate_loregs_noitrwflags),
+
+ /*
+ * 16-bit Thumb data-processing instructions
+ * 0100 00xx xxxx xxxx
+ */
+
+ /* TST (register) 0100 0010 00xx xxxx */
+ DECODE_EMULATE (0xffc0, 0x4200, t16_emulate_loregs_rwflags),
+ /* CMP (register) 0100 0010 10xx xxxx */
+ /* CMN (register) 0100 0010 11xx xxxx */
+ DECODE_EMULATE (0xff80, 0x4280, t16_emulate_loregs_rwflags),
+ /* AND (register) 0100 0000 00xx xxxx */
+ /* EOR (register) 0100 0000 01xx xxxx */
+ /* LSL (register) 0100 0000 10xx xxxx */
+ /* LSR (register) 0100 0000 11xx xxxx */
+ /* ASR (register) 0100 0001 00xx xxxx */
+ /* ADC (register) 0100 0001 01xx xxxx */
+ /* SBC (register) 0100 0001 10xx xxxx */
+ /* ROR (register) 0100 0001 11xx xxxx */
+ /* RSB (immediate) 0100 0010 01xx xxxx */
+ /* ORR (register) 0100 0011 00xx xxxx */
+ /* MUL 0100 0011 00xx xxxx */
+ /* BIC (register) 0100 0011 10xx xxxx */
+ /* MVN (register) 0100 0011 10xx xxxx */
+ DECODE_EMULATE (0xfc00, 0x4000, t16_emulate_loregs_noitrwflags),
+
+ /*
+ * Special data instructions and branch and exchange
+ * 0100 01xx xxxx xxxx
+ */
+
+ /* BLX pc 0100 0111 1111 1xxx */
+ DECODE_REJECT (0xfff8, 0x47f8),
+
+ /* BX (register) 0100 0111 0xxx xxxx */
+ /* BLX (register) 0100 0111 1xxx xxxx */
+ DECODE_SIMULATE (0xff00, 0x4700, t16_simulate_bxblx),
+
+ /* ADD pc, pc 0100 0100 1111 1111 */
+ DECODE_REJECT (0xffff, 0x44ff),
+
+ /* ADD (register) 0100 0100 xxxx xxxx */
+ /* CMP (register) 0100 0101 xxxx xxxx */
+ /* MOV (register) 0100 0110 xxxx xxxx */
+ DECODE_CUSTOM (0xfc00, 0x4400, t16_decode_hiregs),
+
+ /*
+ * Load from Literal Pool
+ * LDR (literal) 0100 1xxx xxxx xxxx
+ */
+ DECODE_SIMULATE (0xf800, 0x4800, t16_simulate_ldr_literal),
+
+ /*
+ * 16-bit Thumb Load/store instructions
+ * 0101 xxxx xxxx xxxx
+ * 011x xxxx xxxx xxxx
+ * 100x xxxx xxxx xxxx
+ */
+
+ /* STR (register) 0101 000x xxxx xxxx */
+ /* STRH (register) 0101 001x xxxx xxxx */
+ /* STRB (register) 0101 010x xxxx xxxx */
+ /* LDRSB (register) 0101 011x xxxx xxxx */
+ /* LDR (register) 0101 100x xxxx xxxx */
+ /* LDRH (register) 0101 101x xxxx xxxx */
+ /* LDRB (register) 0101 110x xxxx xxxx */
+ /* LDRSH (register) 0101 111x xxxx xxxx */
+ /* STR (immediate, Thumb) 0110 0xxx xxxx xxxx */
+ /* LDR (immediate, Thumb) 0110 1xxx xxxx xxxx */
+ /* STRB (immediate, Thumb) 0111 0xxx xxxx xxxx */
+ /* LDRB (immediate, Thumb) 0111 1xxx xxxx xxxx */
+ DECODE_EMULATE (0xc000, 0x4000, t16_emulate_loregs_rwflags),
+ /* STRH (immediate, Thumb) 1000 0xxx xxxx xxxx */
+ /* LDRH (immediate, Thumb) 1000 1xxx xxxx xxxx */
+ DECODE_EMULATE (0xf000, 0x8000, t16_emulate_loregs_rwflags),
+ /* STR (immediate, Thumb) 1001 0xxx xxxx xxxx */
+ /* LDR (immediate, Thumb) 1001 1xxx xxxx xxxx */
+ DECODE_SIMULATE (0xf000, 0x9000, t16_simulate_ldrstr_sp_relative),
+
+ /*
+ * Generate PC-/SP-relative address
+ * ADR (literal) 1010 0xxx xxxx xxxx
+ * ADD (SP plus immediate) 1010 1xxx xxxx xxxx
+ */
+ DECODE_SIMULATE (0xf000, 0xa000, t16_simulate_reladr),
+
+ /*
+ * Miscellaneous 16-bit instructions
+ * 1011 xxxx xxxx xxxx
+ */
+ DECODE_TABLE (0xf000, 0xb000, t16_table_1011),
+
+ /* STM 1100 0xxx xxxx xxxx */
+ /* LDM 1100 1xxx xxxx xxxx */
+ DECODE_EMULATE (0xf000, 0xc000, t16_emulate_loregs_rwflags),
+
+ /*
+ * Conditional branch, and Supervisor Call
+ */
+
+ /* Permanently UNDEFINED 1101 1110 xxxx xxxx */
+ /* SVC 1101 1111 xxxx xxxx */
+ DECODE_REJECT (0xfe00, 0xde00),
+
+ /* Conditional branch 1101 xxxx xxxx xxxx */
+ DECODE_CUSTOM (0xf000, 0xd000, t16_decode_cond_branch),
+
+ /*
+ * Unconditional branch
+ * B 1110 0xxx xxxx xxxx
+ */
+ DECODE_SIMULATE (0xf800, 0xe000, t16_simulate_branch),
+
+ DECODE_END
+};
+
+static unsigned long __kprobes thumb_check_cc(unsigned long cpsr)
+{
+ if (unlikely(in_it_block(cpsr)))
+ return kprobe_condition_checks[current_cond(cpsr)](cpsr);
+ return true;
+}
+
+static void __kprobes thumb16_singlestep(struct kprobe *p, struct pt_regs *regs)
+{
+ regs->ARM_pc += 2;
+ p->ainsn.insn_handler(p, regs);
+ regs->ARM_cpsr = it_advance(regs->ARM_cpsr);
+}
+
+static void __kprobes thumb32_singlestep(struct kprobe *p, struct pt_regs *regs)
+{
+ regs->ARM_pc += 4;
+ p->ainsn.insn_handler(p, regs);
+ regs->ARM_cpsr = it_advance(regs->ARM_cpsr);
+}
+
+enum kprobe_insn __kprobes
+thumb16_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+{
+ asi->insn_singlestep = thumb16_singlestep;
+ asi->insn_check_cc = thumb_check_cc;
+ return kprobe_decode_insn(insn, asi, kprobe_decode_thumb16_table, true);
+}
+
+enum kprobe_insn __kprobes
+thumb32_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+{
+ asi->insn_singlestep = thumb32_singlestep;
+ asi->insn_check_cc = thumb_check_cc;
+ return kprobe_decode_insn(insn, asi, kprobe_decode_thumb32_table, true);
+}
diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c
index 1656c87501c0..129c1163248b 100644
--- a/arch/arm/kernel/kprobes.c
+++ b/arch/arm/kernel/kprobes.c
@@ -28,14 +28,16 @@
#include <asm/traps.h>
#include <asm/cacheflush.h>
+#include "kprobes.h"
+
#define MIN_STACK_SIZE(addr) \
min((unsigned long)MAX_STACK_SIZE, \
(unsigned long)current_thread_info() + THREAD_START_SP - (addr))
-#define flush_insns(addr, cnt) \
+#define flush_insns(addr, size) \
flush_icache_range((unsigned long)(addr), \
(unsigned long)(addr) + \
- sizeof(kprobe_opcode_t) * (cnt))
+ (size))
/* Used as a marker in ARM_pc to note when we're in a jprobe. */
#define JPROBE_MAGIC_ADDR 0xffffffff
@@ -49,16 +51,35 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
kprobe_opcode_t insn;
kprobe_opcode_t tmp_insn[MAX_INSN_SIZE];
unsigned long addr = (unsigned long)p->addr;
+ bool thumb;
+ kprobe_decode_insn_t *decode_insn;
int is;
- if (addr & 0x3 || in_exception_text(addr))
+ if (in_exception_text(addr))
return -EINVAL;
+#ifdef CONFIG_THUMB2_KERNEL
+ thumb = true;
+ addr &= ~1; /* Bit 0 would normally be set to indicate Thumb code */
+ insn = ((u16 *)addr)[0];
+ if (is_wide_instruction(insn)) {
+ insn <<= 16;
+ insn |= ((u16 *)addr)[1];
+ decode_insn = thumb32_kprobe_decode_insn;
+ } else
+ decode_insn = thumb16_kprobe_decode_insn;
+#else /* !CONFIG_THUMB2_KERNEL */
+ thumb = false;
+ if (addr & 0x3)
+ return -EINVAL;
insn = *p->addr;
+ decode_insn = arm_kprobe_decode_insn;
+#endif
+
p->opcode = insn;
p->ainsn.insn = tmp_insn;
- switch (arm_kprobe_decode_insn(insn, &p->ainsn)) {
+ switch ((*decode_insn)(insn, &p->ainsn)) {
case INSN_REJECTED: /* not supported */
return -EINVAL;
@@ -68,7 +89,10 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
return -ENOMEM;
for (is = 0; is < MAX_INSN_SIZE; ++is)
p->ainsn.insn[is] = tmp_insn[is];
- flush_insns(p->ainsn.insn, MAX_INSN_SIZE);
+ flush_insns(p->ainsn.insn,
+ sizeof(p->ainsn.insn[0]) * MAX_INSN_SIZE);
+ p->ainsn.insn_fn = (kprobe_insn_fn_t *)
+ ((uintptr_t)p->ainsn.insn | thumb);
break;
case INSN_GOOD_NO_SLOT: /* instruction doesn't need insn slot */
@@ -79,24 +103,88 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
return 0;
}
+#ifdef CONFIG_THUMB2_KERNEL
+
+/*
+ * For a 32-bit Thumb breakpoint spanning two memory words we need to take
+ * special precautions to insert the breakpoint atomically, especially on SMP
+ * systems. This is achieved by calling this arming function using stop_machine.
+ */
+static int __kprobes set_t32_breakpoint(void *addr)
+{
+ ((u16 *)addr)[0] = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION >> 16;
+ ((u16 *)addr)[1] = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION & 0xffff;
+ flush_insns(addr, 2*sizeof(u16));
+ return 0;
+}
+
+void __kprobes arch_arm_kprobe(struct kprobe *p)
+{
+ uintptr_t addr = (uintptr_t)p->addr & ~1; /* Remove any Thumb flag */
+
+ if (!is_wide_instruction(p->opcode)) {
+ *(u16 *)addr = KPROBE_THUMB16_BREAKPOINT_INSTRUCTION;
+ flush_insns(addr, sizeof(u16));
+ } else if (addr & 2) {
+ /* A 32-bit instruction spanning two words needs special care */
+ stop_machine(set_t32_breakpoint, (void *)addr, &cpu_online_map);
+ } else {
+ /* Word aligned 32-bit instruction can be written atomically */
+ u32 bkp = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION;
+#ifndef __ARMEB__ /* Swap halfwords for little-endian */
+ bkp = (bkp >> 16) | (bkp << 16);
+#endif
+ *(u32 *)addr = bkp;
+ flush_insns(addr, sizeof(u32));
+ }
+}
+
+#else /* !CONFIG_THUMB2_KERNEL */
+
void __kprobes arch_arm_kprobe(struct kprobe *p)
{
- *p->addr = KPROBE_BREAKPOINT_INSTRUCTION;
- flush_insns(p->addr, 1);
+ kprobe_opcode_t insn = p->opcode;
+ kprobe_opcode_t brkp = KPROBE_ARM_BREAKPOINT_INSTRUCTION;
+ if (insn >= 0xe0000000)
+ brkp |= 0xe0000000; /* Unconditional instruction */
+ else
+ brkp |= insn & 0xf0000000; /* Copy condition from insn */
+ *p->addr = brkp;
+ flush_insns(p->addr, sizeof(p->addr[0]));
}
+#endif /* !CONFIG_THUMB2_KERNEL */
+
/*
* The actual disarming is done here on each CPU and synchronized using
* stop_machine. This synchronization is necessary on SMP to avoid removing
* a probe between the moment the 'Undefined Instruction' exception is raised
* and the moment the exception handler reads the faulting instruction from
- * memory.
+ * memory. It is also needed to atomically set the two half-words of a 32-bit
+ * Thumb breakpoint.
*/
int __kprobes __arch_disarm_kprobe(void *p)
{
struct kprobe *kp = p;
+#ifdef CONFIG_THUMB2_KERNEL
+ u16 *addr = (u16 *)((uintptr_t)kp->addr & ~1);
+ kprobe_opcode_t insn = kp->opcode;
+ unsigned int len;
+
+ if (is_wide_instruction(insn)) {
+ ((u16 *)addr)[0] = insn>>16;
+ ((u16 *)addr)[1] = insn;
+ len = 2*sizeof(u16);
+ } else {
+ ((u16 *)addr)[0] = insn;
+ len = sizeof(u16);
+ }
+ flush_insns(addr, len);
+
+#else /* !CONFIG_THUMB2_KERNEL */
*kp->addr = kp->opcode;
- flush_insns(kp->addr, 1);
+ flush_insns(kp->addr, sizeof(kp->addr[0]));
+#endif
return 0;
}
@@ -130,12 +218,24 @@ static void __kprobes set_current_kprobe(struct kprobe *p)
__get_cpu_var(current_kprobe) = p;
}
-static void __kprobes singlestep(struct kprobe *p, struct pt_regs *regs,
- struct kprobe_ctlblk *kcb)
+static void __kprobes
+singlestep_skip(struct kprobe *p, struct pt_regs *regs)
{
+#ifdef CONFIG_THUMB2_KERNEL
+ regs->ARM_cpsr = it_advance(regs->ARM_cpsr);
+ if (is_wide_instruction(p->opcode))
+ regs->ARM_pc += 4;
+ else
+ regs->ARM_pc += 2;
+#else
regs->ARM_pc += 4;
- if (p->ainsn.insn_check_cc(regs->ARM_cpsr))
- p->ainsn.insn_handler(p, regs);
+#endif
+}
+
+static inline void __kprobes
+singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
+{
+ p->ainsn.insn_singlestep(p, regs);
}
/*
@@ -149,11 +249,23 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
{
struct kprobe *p, *cur;
struct kprobe_ctlblk *kcb;
- kprobe_opcode_t *addr = (kprobe_opcode_t *)regs->ARM_pc;
kcb = get_kprobe_ctlblk();
cur = kprobe_running();
- p = get_kprobe(addr);
+
+#ifdef CONFIG_THUMB2_KERNEL
+ /*
+ * First look for a probe which was registered using an address with
+ * bit 0 set, this is the usual situation for pointers to Thumb code.
+ * If not found, fallback to looking for one with bit 0 clear.
+ */
+ p = get_kprobe((kprobe_opcode_t *)(regs->ARM_pc | 1));
+ if (!p)
+ p = get_kprobe((kprobe_opcode_t *)regs->ARM_pc);
+
+#else /* ! CONFIG_THUMB2_KERNEL */
+ p = get_kprobe((kprobe_opcode_t *)regs->ARM_pc);
+#endif
if (p) {
if (cur) {
@@ -173,7 +285,8 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
/* impossible cases */
BUG();
}
- } else {
+ } else if (p->ainsn.insn_check_cc(regs->ARM_cpsr)) {
+ /* Probe hit and conditional execution check ok. */
set_current_kprobe(p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
@@ -193,6 +306,13 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
}
reset_current_kprobe();
}
+ } else {
+ /*
+ * Probe hit but conditional execution check failed,
+ * so just skip the instruction and continue as if
+ * nothing had happened.
+ */
+ singlestep_skip(p, regs);
}
} else if (cur) {
/* We probably hit a jprobe. Call its break handler. */
@@ -300,7 +420,11 @@ void __naked __kprobes kretprobe_trampoline(void)
"bl trampoline_handler \n\t"
"mov lr, r0 \n\t"
"ldmia sp!, {r0 - r11} \n\t"
+#ifdef CONFIG_THUMB2_KERNEL
+ "bx lr \n\t"
+#else
"mov pc, lr \n\t"
+#endif
: : : "memory");
}
@@ -378,11 +502,22 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
struct jprobe *jp = container_of(p, struct jprobe, kp);
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
long sp_addr = regs->ARM_sp;
+ long cpsr;
kcb->jprobe_saved_regs = *regs;
memcpy(kcb->jprobes_stack, (void *)sp_addr, MIN_STACK_SIZE(sp_addr));
regs->ARM_pc = (long)jp->entry;
- regs->ARM_cpsr |= PSR_I_BIT;
+
+ cpsr = regs->ARM_cpsr | PSR_I_BIT;
+#ifdef CONFIG_THUMB2_KERNEL
+ /* Set correct Thumb state in cpsr */
+ if (regs->ARM_pc & 1)
+ cpsr |= PSR_T_BIT;
+ else
+ cpsr &= ~PSR_T_BIT;
+#endif
+ regs->ARM_cpsr = cpsr;
+
preempt_disable();
return 1;
}
@@ -404,7 +539,12 @@ void __kprobes jprobe_return(void)
* This is to prevent any simulated instruction from writing
* over the regs when they are accessing the stack.
*/
+#ifdef CONFIG_THUMB2_KERNEL
+ "sub r0, %0, %1 \n\t"
+ "mov sp, r0 \n\t"
+#else
"sub sp, %0, %1 \n\t"
+#endif
"ldr r0, ="__stringify(JPROBE_MAGIC_ADDR)"\n\t"
"str %0, [sp, %2] \n\t"
"str r0, [sp, %3] \n\t"
@@ -415,15 +555,28 @@ void __kprobes jprobe_return(void)
* Return to the context saved by setjmp_pre_handler
* and restored by longjmp_break_handler.
*/
+#ifdef CONFIG_THUMB2_KERNEL
+ "ldr lr, [sp, %2] \n\t" /* lr = saved sp */
+ "ldrd r0, r1, [sp, %5] \n\t" /* r0,r1 = saved lr,pc */
+ "ldr r2, [sp, %4] \n\t" /* r2 = saved psr */
+ "stmdb lr!, {r0, r1, r2} \n\t" /* push saved lr and */
+ /* rfe context */
+ "ldmia sp, {r0 - r12} \n\t"
+ "mov sp, lr \n\t"
+ "ldr lr, [sp], #4 \n\t"
+ "rfeia sp! \n\t"
+#else
"ldr r0, [sp, %4] \n\t"
"msr cpsr_cxsf, r0 \n\t"
"ldmia sp, {r0 - pc} \n\t"
+#endif
:
: "r" (kcb->jprobe_saved_regs.ARM_sp),
"I" (sizeof(struct pt_regs) * 2),
"J" (offsetof(struct pt_regs, ARM_sp)),
"J" (offsetof(struct pt_regs, ARM_pc)),
- "J" (offsetof(struct pt_regs, ARM_cpsr))
+ "J" (offsetof(struct pt_regs, ARM_cpsr)),
+ "J" (offsetof(struct pt_regs, ARM_lr))
: "memory", "cc");
}
@@ -460,17 +613,44 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
return 0;
}
-static struct undef_hook kprobes_break_hook = {
+#ifdef CONFIG_THUMB2_KERNEL
+
+static struct undef_hook kprobes_thumb16_break_hook = {
+ .instr_mask = 0xffff,
+ .instr_val = KPROBE_THUMB16_BREAKPOINT_INSTRUCTION,
+ .cpsr_mask = MODE_MASK,
+ .cpsr_val = SVC_MODE,
+ .fn = kprobe_trap_handler,
+};
+
+static struct undef_hook kprobes_thumb32_break_hook = {
.instr_mask = 0xffffffff,
- .instr_val = KPROBE_BREAKPOINT_INSTRUCTION,
+ .instr_val = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION,
.cpsr_mask = MODE_MASK,
.cpsr_val = SVC_MODE,
.fn = kprobe_trap_handler,
};
+#else /* !CONFIG_THUMB2_KERNEL */
+
+static struct undef_hook kprobes_arm_break_hook = {
+ .instr_mask = 0x0fffffff,
+ .instr_val = KPROBE_ARM_BREAKPOINT_INSTRUCTION,
+ .cpsr_mask = MODE_MASK,
+ .cpsr_val = SVC_MODE,
+ .fn = kprobe_trap_handler,
+};
+
+#endif /* !CONFIG_THUMB2_KERNEL */
+
int __init arch_init_kprobes()
{
arm_kprobe_decode_init();
- register_undef_hook(&kprobes_break_hook);
+#ifdef CONFIG_THUMB2_KERNEL
+ register_undef_hook(&kprobes_thumb16_break_hook);
+ register_undef_hook(&kprobes_thumb32_break_hook);
+#else
+ register_undef_hook(&kprobes_arm_break_hook);
+#endif
return 0;
}
diff --git a/arch/arm/kernel/kprobes.h b/arch/arm/kernel/kprobes.h
new file mode 100644
index 000000000000..a6aeda0a6c7f
--- /dev/null
+++ b/arch/arm/kernel/kprobes.h
@@ -0,0 +1,420 @@
+/*
+ * arch/arm/kernel/kprobes.h
+ *
+ * Copyright (C) 2011 Jon Medhurst <tixy@yxit.co.uk>.
+ *
+ * Some contents moved here from arch/arm/include/asm/kprobes.h which is
+ * Copyright (C) 2006, 2007 Motorola Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef _ARM_KERNEL_KPROBES_H
+#define _ARM_KERNEL_KPROBES_H
+
+/*
+ * These undefined instructions must be unique and
+ * reserved solely for kprobes' use.
+ */
+#define KPROBE_ARM_BREAKPOINT_INSTRUCTION 0x07f001f8
+#define KPROBE_THUMB16_BREAKPOINT_INSTRUCTION 0xde18
+#define KPROBE_THUMB32_BREAKPOINT_INSTRUCTION 0xf7f0a018
+
+
+enum kprobe_insn {
+ INSN_REJECTED,
+ INSN_GOOD,
+ INSN_GOOD_NO_SLOT
+};
+
+typedef enum kprobe_insn (kprobe_decode_insn_t)(kprobe_opcode_t,
+ struct arch_specific_insn *);
+
+#ifdef CONFIG_THUMB2_KERNEL
+
+enum kprobe_insn thumb16_kprobe_decode_insn(kprobe_opcode_t,
+ struct arch_specific_insn *);
+enum kprobe_insn thumb32_kprobe_decode_insn(kprobe_opcode_t,
+ struct arch_specific_insn *);
+
+#else /* !CONFIG_THUMB2_KERNEL */
+
+enum kprobe_insn arm_kprobe_decode_insn(kprobe_opcode_t,
+ struct arch_specific_insn *);
+#endif
+
+void __init arm_kprobe_decode_init(void);
+
+extern kprobe_check_cc * const kprobe_condition_checks[16];
+
+
+#if __LINUX_ARM_ARCH__ >= 7
+
+/* str_pc_offset is architecturally defined from ARMv7 onwards */
+#define str_pc_offset 8
+#define find_str_pc_offset()
+
+#else /* __LINUX_ARM_ARCH__ < 7 */
+
+/* We need a run-time check to determine str_pc_offset */
+extern int str_pc_offset;
+void __init find_str_pc_offset(void);
+
+#endif
+
+
+/*
+ * Update ITSTATE after normal execution of an IT block instruction.
+ *
+ * The 8 IT state bits are split into two parts in CPSR:
+ * ITSTATE<1:0> are in CPSR<26:25>
+ * ITSTATE<7:2> are in CPSR<15:10>
+ */
+static inline unsigned long it_advance(unsigned long cpsr)
+ {
+ if ((cpsr & 0x06000400) == 0) {
+ /* ITSTATE<2:0> == 0 means end of IT block, so clear IT state */
+ cpsr &= ~PSR_IT_MASK;
+ } else {
+ /* We need to shift left ITSTATE<4:0> */
+ const unsigned long mask = 0x06001c00; /* Mask ITSTATE<4:0> */
+ unsigned long it = cpsr & mask;
+ it <<= 1;
+ it |= it >> (27 - 10); /* Carry ITSTATE<2> to correct place */
+ it &= mask;
+ cpsr &= ~mask;
+ cpsr |= it;
+ }
+ return cpsr;
+}
+
+static inline void __kprobes bx_write_pc(long pcv, struct pt_regs *regs)
+{
+ long cpsr = regs->ARM_cpsr;
+ if (pcv & 0x1) {
+ cpsr |= PSR_T_BIT;
+ pcv &= ~0x1;
+ } else {
+ cpsr &= ~PSR_T_BIT;
+ pcv &= ~0x2; /* Avoid UNPREDICTABLE address allignment */
+ }
+ regs->ARM_cpsr = cpsr;
+ regs->ARM_pc = pcv;
+}
+
+
+#if __LINUX_ARM_ARCH__ >= 6
+
+/* Kernels built for >= ARMv6 should never run on <= ARMv5 hardware, so... */
+#define load_write_pc_interworks true
+#define test_load_write_pc_interworking()
+
+#else /* __LINUX_ARM_ARCH__ < 6 */
+
+/* We need run-time testing to determine if load_write_pc() should interwork. */
+extern bool load_write_pc_interworks;
+void __init test_load_write_pc_interworking(void);
+
+#endif
+
+static inline void __kprobes load_write_pc(long pcv, struct pt_regs *regs)
+{
+ if (load_write_pc_interworks)
+ bx_write_pc(pcv, regs);
+ else
+ regs->ARM_pc = pcv;
+}
+
+
+#if __LINUX_ARM_ARCH__ >= 7
+
+#define alu_write_pc_interworks true
+#define test_alu_write_pc_interworking()
+
+#elif __LINUX_ARM_ARCH__ <= 5
+
+/* Kernels built for <= ARMv5 should never run on >= ARMv6 hardware, so... */
+#define alu_write_pc_interworks false
+#define test_alu_write_pc_interworking()
+
+#else /* __LINUX_ARM_ARCH__ == 6 */
+
+/* We could be an ARMv6 binary on ARMv7 hardware so we need a run-time check. */
+extern bool alu_write_pc_interworks;
+void __init test_alu_write_pc_interworking(void);
+
+#endif /* __LINUX_ARM_ARCH__ == 6 */
+
+static inline void __kprobes alu_write_pc(long pcv, struct pt_regs *regs)
+{
+ if (alu_write_pc_interworks)
+ bx_write_pc(pcv, regs);
+ else
+ regs->ARM_pc = pcv;
+}
+
+
+void __kprobes kprobe_simulate_nop(struct kprobe *p, struct pt_regs *regs);
+void __kprobes kprobe_emulate_none(struct kprobe *p, struct pt_regs *regs);
+
+enum kprobe_insn __kprobes
+kprobe_decode_ldmstm(kprobe_opcode_t insn, struct arch_specific_insn *asi);
+
+/*
+ * Test if load/store instructions writeback the address register.
+ * if P (bit 24) == 0 or W (bit 21) == 1
+ */
+#define is_writeback(insn) ((insn ^ 0x01000000) & 0x01200000)
+
+/*
+ * The following definitions and macros are used to build instruction
+ * decoding tables for use by kprobe_decode_insn.
+ *
+ * These tables are a concatenation of entries each of which consist of one of
+ * the decode_* structs. All of the fields in every type of decode structure
+ * are of the union type decode_item, therefore the entire decode table can be
+ * viewed as an array of these and declared like:
+ *
+ * static const union decode_item table_name[] = {};
+ *
+ * In order to construct each entry in the table, macros are used to
+ * initialise a number of sequential decode_item values in a layout which
+ * matches the relevant struct. E.g. DECODE_SIMULATE initialise a struct
+ * decode_simulate by initialising four decode_item objects like this...
+ *
+ * {.bits = _type},
+ * {.bits = _mask},
+ * {.bits = _value},
+ * {.handler = _handler},
+ *
+ * Initialising a specified member of the union means that the compiler
+ * will produce a warning if the argument is of an incorrect type.
+ *
+ * Below is a list of each of the macros used to initialise entries and a
+ * description of the action performed when that entry is matched to an
+ * instruction. A match is found when (instruction & mask) == value.
+ *
+ * DECODE_TABLE(mask, value, table)
+ * Instruction decoding jumps to parsing the new sub-table 'table'.
+ *
+ * DECODE_CUSTOM(mask, value, decoder)
+ * The custom function 'decoder' is called to the complete decoding
+ * of an instruction.
+ *
+ * DECODE_SIMULATE(mask, value, handler)
+ * Set the probes instruction handler to 'handler', this will be used
+ * to simulate the instruction when the probe is hit. Decoding returns
+ * with INSN_GOOD_NO_SLOT.
+ *
+ * DECODE_EMULATE(mask, value, handler)
+ * Set the probes instruction handler to 'handler', this will be used
+ * to emulate the instruction when the probe is hit. The modified
+ * instruction (see below) is placed in the probes instruction slot so it
+ * may be called by the emulation code. Decoding returns with INSN_GOOD.
+ *
+ * DECODE_REJECT(mask, value)
+ * Instruction decoding fails with INSN_REJECTED
+ *
+ * DECODE_OR(mask, value)
+ * This allows the mask/value test of multiple table entries to be
+ * logically ORed. Once an 'or' entry is matched the decoding action to
+ * be performed is that of the next entry which isn't an 'or'. E.g.
+ *
+ * DECODE_OR (mask1, value1)
+ * DECODE_OR (mask2, value2)
+ * DECODE_SIMULATE (mask3, value3, simulation_handler)
+ *
+ * This means that if any of the three mask/value pairs match the
+ * instruction being decoded, then 'simulation_handler' will be used
+ * for it.
+ *
+ * Both the SIMULATE and EMULATE macros have a second form which take an
+ * additional 'regs' argument.
+ *
+ * DECODE_SIMULATEX(mask, value, handler, regs)
+ * DECODE_EMULATEX (mask, value, handler, regs)
+ *
+ * These are used to specify what kind of CPU register is encoded in each of the
+ * least significant 5 nibbles of the instruction being decoded. The regs value
+ * is specified using the REGS macro, this takes any of the REG_TYPE_* values
+ * from enum decode_reg_type as arguments; only the '*' part of the name is
+ * given. E.g.
+ *
+ * REGS(0, ANY, NOPC, 0, ANY)
+ *
+ * This indicates an instruction is encoded like:
+ *
+ * bits 19..16 ignore
+ * bits 15..12 any register allowed here
+ * bits 11.. 8 any register except PC allowed here
+ * bits 7.. 4 ignore
+ * bits 3.. 0 any register allowed here
+ *
+ * This register specification is checked after a decode table entry is found to
+ * match an instruction (through the mask/value test). Any invalid register then
+ * found in the instruction will cause decoding to fail with INSN_REJECTED. In
+ * the above example this would happen if bits 11..8 of the instruction were
+ * 1111, indicating R15 or PC.
+ *
+ * As well as checking for legal combinations of registers, this data is also
+ * used to modify the registers encoded in the instructions so that an
+ * emulation routines can use it. (See decode_regs() and INSN_NEW_BITS.)
+ *
+ * Here is a real example which matches ARM instructions of the form
+ * "AND <Rd>,<Rn>,<Rm>,<shift> <Rs>"
+ *
+ * DECODE_EMULATEX (0x0e000090, 0x00000010, emulate_rd12rn16rm0rs8_rwflags,
+ * REGS(ANY, ANY, NOPC, 0, ANY)),
+ * ^ ^ ^ ^
+ * Rn Rd Rs Rm
+ *
+ * Decoding the instruction "AND R4, R5, R6, ASL R15" will be rejected because
+ * Rs == R15
+ *
+ * Decoding the instruction "AND R4, R5, R6, ASL R7" will be accepted and the
+ * instruction will be modified to "AND R0, R2, R3, ASL R1" and then placed into
+ * the kprobes instruction slot. This can then be called later by the handler
+ * function emulate_rd12rn16rm0rs8_rwflags in order to simulate the instruction.
+ */
+
+enum decode_type {
+ DECODE_TYPE_END,
+ DECODE_TYPE_TABLE,
+ DECODE_TYPE_CUSTOM,
+ DECODE_TYPE_SIMULATE,
+ DECODE_TYPE_EMULATE,
+ DECODE_TYPE_OR,
+ DECODE_TYPE_REJECT,
+ NUM_DECODE_TYPES /* Must be last enum */
+};
+
+#define DECODE_TYPE_BITS 4
+#define DECODE_TYPE_MASK ((1 << DECODE_TYPE_BITS) - 1)
+
+enum decode_reg_type {
+ REG_TYPE_NONE = 0, /* Not a register, ignore */
+ REG_TYPE_ANY, /* Any register allowed */
+ REG_TYPE_SAMEAS16, /* Register should be same as that at bits 19..16 */
+ REG_TYPE_SP, /* Register must be SP */
+ REG_TYPE_PC, /* Register must be PC */
+ REG_TYPE_NOSP, /* Register must not be SP */
+ REG_TYPE_NOSPPC, /* Register must not be SP or PC */
+ REG_TYPE_NOPC, /* Register must not be PC */
+ REG_TYPE_NOPCWB, /* No PC if load/store write-back flag also set */
+
+ /* The following types are used when the encoding for PC indicates
+ * another instruction form. This distiction only matters for test
+ * case coverage checks.
+ */
+ REG_TYPE_NOPCX, /* Register must not be PC */
+ REG_TYPE_NOSPPCX, /* Register must not be SP or PC */
+
+ /* Alias to allow '0' arg to be used in REGS macro. */
+ REG_TYPE_0 = REG_TYPE_NONE
+};
+
+#define REGS(r16, r12, r8, r4, r0) \
+ ((REG_TYPE_##r16) << 16) + \
+ ((REG_TYPE_##r12) << 12) + \
+ ((REG_TYPE_##r8) << 8) + \
+ ((REG_TYPE_##r4) << 4) + \
+ (REG_TYPE_##r0)
+
+union decode_item {
+ u32 bits;
+ const union decode_item *table;
+ kprobe_insn_handler_t *handler;
+ kprobe_decode_insn_t *decoder;
+};
+
+
+#define DECODE_END \
+ {.bits = DECODE_TYPE_END}
+
+
+struct decode_header {
+ union decode_item type_regs;
+ union decode_item mask;
+ union decode_item value;
+};
+
+#define DECODE_HEADER(_type, _mask, _value, _regs) \
+ {.bits = (_type) | ((_regs) << DECODE_TYPE_BITS)}, \
+ {.bits = (_mask)}, \
+ {.bits = (_value)}
+
+
+struct decode_table {
+ struct decode_header header;
+ union decode_item table;
+};
+
+#define DECODE_TABLE(_mask, _value, _table) \
+ DECODE_HEADER(DECODE_TYPE_TABLE, _mask, _value, 0), \
+ {.table = (_table)}
+
+
+struct decode_custom {
+ struct decode_header header;
+ union decode_item decoder;
+};
+
+#define DECODE_CUSTOM(_mask, _value, _decoder) \
+ DECODE_HEADER(DECODE_TYPE_CUSTOM, _mask, _value, 0), \
+ {.decoder = (_decoder)}
+
+
+struct decode_simulate {
+ struct decode_header header;
+ union decode_item handler;
+};
+
+#define DECODE_SIMULATEX(_mask, _value, _handler, _regs) \
+ DECODE_HEADER(DECODE_TYPE_SIMULATE, _mask, _value, _regs), \
+ {.handler = (_handler)}
+
+#define DECODE_SIMULATE(_mask, _value, _handler) \
+ DECODE_SIMULATEX(_mask, _value, _handler, 0)
+
+
+struct decode_emulate {
+ struct decode_header header;
+ union decode_item handler;
+};
+
+#define DECODE_EMULATEX(_mask, _value, _handler, _regs) \
+ DECODE_HEADER(DECODE_TYPE_EMULATE, _mask, _value, _regs), \
+ {.handler = (_handler)}
+
+#define DECODE_EMULATE(_mask, _value, _handler) \
+ DECODE_EMULATEX(_mask, _value, _handler, 0)
+
+
+struct decode_or {
+ struct decode_header header;
+};
+
+#define DECODE_OR(_mask, _value) \
+ DECODE_HEADER(DECODE_TYPE_OR, _mask, _value, 0)
+
+
+struct decode_reject {
+ struct decode_header header;
+};
+
+#define DECODE_REJECT(_mask, _value) \
+ DECODE_HEADER(DECODE_TYPE_REJECT, _mask, _value, 0)
+
+
+int kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi,
+ const union decode_item *table, bool thumb16);
+
+
+#endif /* _ARM_KERNEL_KPROBES_H */
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index 016d6a0830a3..05b377616fd5 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -43,25 +43,7 @@ void *module_alloc(unsigned long size)
GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
__builtin_return_address(0));
}
-#else /* CONFIG_MMU */
-void *module_alloc(unsigned long size)
-{
- return size == 0 ? NULL : vmalloc(size);
-}
-#endif /* !CONFIG_MMU */
-
-void module_free(struct module *module, void *region)
-{
- vfree(region);
-}
-
-int module_frob_arch_sections(Elf_Ehdr *hdr,
- Elf_Shdr *sechdrs,
- char *secstrings,
- struct module *mod)
-{
- return 0;
-}
+#endif
int
apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
@@ -265,15 +247,6 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
return 0;
}
-int
-apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
- unsigned int symindex, unsigned int relsec, struct module *module)
-{
- printk(KERN_ERR "module %s: ADD RELOCATION unsupported\n",
- module->name);
- return -ENOEXEC;
-}
-
struct mod_unwind_map {
const Elf_Shdr *unw_sec;
const Elf_Shdr *txt_sec;
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 2b5b1421596c..53c9c2610cbc 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -435,7 +435,7 @@ armpmu_reserve_hardware(void)
if (irq >= 0)
free_irq(irq, NULL);
}
- release_pmu(pmu_device);
+ release_pmu(ARM_PMU_DEVICE_CPU);
pmu_device = NULL;
}
@@ -454,7 +454,7 @@ armpmu_release_hardware(void)
}
armpmu->stop();
- release_pmu(pmu_device);
+ release_pmu(ARM_PMU_DEVICE_CPU);
pmu_device = NULL;
}
@@ -662,6 +662,12 @@ init_hw_perf_events(void)
case 0xC090: /* Cortex-A9 */
armpmu = armv7_a9_pmu_init();
break;
+ case 0xC050: /* Cortex-A5 */
+ armpmu = armv7_a5_pmu_init();
+ break;
+ case 0xC0F0: /* Cortex-A15 */
+ armpmu = armv7_a15_pmu_init();
+ break;
}
/* Intel CPUs [xscale]. */
} else if (0x69 == implementor) {
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index e20ca9cafef5..4c851834f68e 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -17,17 +17,23 @@
*/
#ifdef CONFIG_CPU_V7
-/* Common ARMv7 event types */
+/*
+ * Common ARMv7 event types
+ *
+ * Note: An implementation may not be able to count all of these events
+ * but the encodings are considered to be `reserved' in the case that
+ * they are not available.
+ */
enum armv7_perf_types {
ARMV7_PERFCTR_PMNC_SW_INCR = 0x00,
ARMV7_PERFCTR_IFETCH_MISS = 0x01,
ARMV7_PERFCTR_ITLB_MISS = 0x02,
- ARMV7_PERFCTR_DCACHE_REFILL = 0x03,
- ARMV7_PERFCTR_DCACHE_ACCESS = 0x04,
+ ARMV7_PERFCTR_DCACHE_REFILL = 0x03, /* L1 */
+ ARMV7_PERFCTR_DCACHE_ACCESS = 0x04, /* L1 */
ARMV7_PERFCTR_DTLB_REFILL = 0x05,
ARMV7_PERFCTR_DREAD = 0x06,
ARMV7_PERFCTR_DWRITE = 0x07,
-
+ ARMV7_PERFCTR_INSTR_EXECUTED = 0x08,
ARMV7_PERFCTR_EXC_TAKEN = 0x09,
ARMV7_PERFCTR_EXC_EXECUTED = 0x0A,
ARMV7_PERFCTR_CID_WRITE = 0x0B,
@@ -39,21 +45,30 @@ enum armv7_perf_types {
*/
ARMV7_PERFCTR_PC_WRITE = 0x0C,
ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D,
+ ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E,
ARMV7_PERFCTR_UNALIGNED_ACCESS = 0x0F,
+
+ /* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
ARMV7_PERFCTR_CLOCK_CYCLES = 0x11,
-
- ARMV7_PERFCTR_PC_BRANCH_MIS_USED = 0x12,
+ ARMV7_PERFCTR_PC_BRANCH_PRED = 0x12,
+ ARMV7_PERFCTR_MEM_ACCESS = 0x13,
+ ARMV7_PERFCTR_L1_ICACHE_ACCESS = 0x14,
+ ARMV7_PERFCTR_L1_DCACHE_WB = 0x15,
+ ARMV7_PERFCTR_L2_DCACHE_ACCESS = 0x16,
+ ARMV7_PERFCTR_L2_DCACHE_REFILL = 0x17,
+ ARMV7_PERFCTR_L2_DCACHE_WB = 0x18,
+ ARMV7_PERFCTR_BUS_ACCESS = 0x19,
+ ARMV7_PERFCTR_MEMORY_ERROR = 0x1A,
+ ARMV7_PERFCTR_INSTR_SPEC = 0x1B,
+ ARMV7_PERFCTR_TTBR_WRITE = 0x1C,
+ ARMV7_PERFCTR_BUS_CYCLES = 0x1D,
ARMV7_PERFCTR_CPU_CYCLES = 0xFF
};
/* ARMv7 Cortex-A8 specific event types */
enum armv7_a8_perf_types {
- ARMV7_PERFCTR_INSTR_EXECUTED = 0x08,
-
- ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E,
-
ARMV7_PERFCTR_WRITE_BUFFER_FULL = 0x40,
ARMV7_PERFCTR_L2_STORE_MERGED = 0x41,
ARMV7_PERFCTR_L2_STORE_BUFF = 0x42,
@@ -138,6 +153,39 @@ enum armv7_a9_perf_types {
ARMV7_PERFCTR_PLE_RQST_PROG = 0xA5
};
+/* ARMv7 Cortex-A5 specific event types */
+enum armv7_a5_perf_types {
+ ARMV7_PERFCTR_IRQ_TAKEN = 0x86,
+ ARMV7_PERFCTR_FIQ_TAKEN = 0x87,
+
+ ARMV7_PERFCTR_EXT_MEM_RQST = 0xc0,
+ ARMV7_PERFCTR_NC_EXT_MEM_RQST = 0xc1,
+ ARMV7_PERFCTR_PREFETCH_LINEFILL = 0xc2,
+ ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP = 0xc3,
+ ARMV7_PERFCTR_ENTER_READ_ALLOC = 0xc4,
+ ARMV7_PERFCTR_READ_ALLOC = 0xc5,
+
+ ARMV7_PERFCTR_STALL_SB_FULL = 0xc9,
+};
+
+/* ARMv7 Cortex-A15 specific event types */
+enum armv7_a15_perf_types {
+ ARMV7_PERFCTR_L1_DCACHE_READ_ACCESS = 0x40,
+ ARMV7_PERFCTR_L1_DCACHE_WRITE_ACCESS = 0x41,
+ ARMV7_PERFCTR_L1_DCACHE_READ_REFILL = 0x42,
+ ARMV7_PERFCTR_L1_DCACHE_WRITE_REFILL = 0x43,
+
+ ARMV7_PERFCTR_L1_DTLB_READ_REFILL = 0x4C,
+ ARMV7_PERFCTR_L1_DTLB_WRITE_REFILL = 0x4D,
+
+ ARMV7_PERFCTR_L2_DCACHE_READ_ACCESS = 0x50,
+ ARMV7_PERFCTR_L2_DCACHE_WRITE_ACCESS = 0x51,
+ ARMV7_PERFCTR_L2_DCACHE_READ_REFILL = 0x52,
+ ARMV7_PERFCTR_L2_DCACHE_WRITE_REFILL = 0x53,
+
+ ARMV7_PERFCTR_SPEC_PC_WRITE = 0x76,
+};
+
/*
* Cortex-A8 HW events mapping
*
@@ -207,11 +255,6 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
},
},
[C(DTLB)] = {
- /*
- * Only ITLB misses and DTLB refills are supported.
- * If users want the DTLB refills misses a raw counter
- * must be used.
- */
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
@@ -337,11 +380,6 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
},
},
[C(DTLB)] = {
- /*
- * Only ITLB misses and DTLB refills are supported.
- * If users want the DTLB refills misses a raw counter
- * must be used.
- */
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
@@ -402,6 +440,242 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
};
/*
+ * Cortex-A5 HW events mapping
+ */
+static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
+ [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
+ [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
+};
+
+static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ [C(L1D)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)]
+ = ARMV7_PERFCTR_DCACHE_ACCESS,
+ [C(RESULT_MISS)]
+ = ARMV7_PERFCTR_DCACHE_REFILL,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)]
+ = ARMV7_PERFCTR_DCACHE_ACCESS,
+ [C(RESULT_MISS)]
+ = ARMV7_PERFCTR_DCACHE_REFILL,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)]
+ = ARMV7_PERFCTR_PREFETCH_LINEFILL,
+ [C(RESULT_MISS)]
+ = ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP,
+ },
+ },
+ [C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
+ },
+ /*
+ * The prefetch counters don't differentiate between the I
+ * side and the D side.
+ */
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)]
+ = ARMV7_PERFCTR_PREFETCH_LINEFILL,
+ [C(RESULT_MISS)]
+ = ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP,
+ },
+ },
+ [C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(DTLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(BPU)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
+ [C(RESULT_MISS)]
+ = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
+ [C(RESULT_MISS)]
+ = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+};
+
+/*
+ * Cortex-A15 HW events mapping
+ */
+static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
+ [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_SPEC_PC_WRITE,
+ [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
+};
+
+static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ [C(L1D)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)]
+ = ARMV7_PERFCTR_L1_DCACHE_READ_ACCESS,
+ [C(RESULT_MISS)]
+ = ARMV7_PERFCTR_L1_DCACHE_READ_REFILL,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)]
+ = ARMV7_PERFCTR_L1_DCACHE_WRITE_ACCESS,
+ [C(RESULT_MISS)]
+ = ARMV7_PERFCTR_L1_DCACHE_WRITE_REFILL,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(L1I)] = {
+ /*
+ * Not all performance counters differentiate between read
+ * and write accesses/misses so we're not always strictly
+ * correct, but it's the best we can do. Writes and reads get
+ * combined in these cases.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)]
+ = ARMV7_PERFCTR_L2_DCACHE_READ_ACCESS,
+ [C(RESULT_MISS)]
+ = ARMV7_PERFCTR_L2_DCACHE_READ_REFILL,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)]
+ = ARMV7_PERFCTR_L2_DCACHE_WRITE_ACCESS,
+ [C(RESULT_MISS)]
+ = ARMV7_PERFCTR_L2_DCACHE_WRITE_REFILL,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(DTLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)]
+ = ARMV7_PERFCTR_L1_DTLB_READ_REFILL,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)]
+ = ARMV7_PERFCTR_L1_DTLB_WRITE_REFILL,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(BPU)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
+ [C(RESULT_MISS)]
+ = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
+ [C(RESULT_MISS)]
+ = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+};
+
+/*
* Perf Events counters
*/
enum armv7_counters {
@@ -933,6 +1207,26 @@ static const struct arm_pmu *__init armv7_a9_pmu_init(void)
armv7pmu.num_events = armv7_read_num_pmnc_events();
return &armv7pmu;
}
+
+static const struct arm_pmu *__init armv7_a5_pmu_init(void)
+{
+ armv7pmu.id = ARM_PERF_PMU_ID_CA5;
+ armv7pmu.name = "ARMv7 Cortex-A5";
+ armv7pmu.cache_map = &armv7_a5_perf_cache_map;
+ armv7pmu.event_map = &armv7_a5_perf_map;
+ armv7pmu.num_events = armv7_read_num_pmnc_events();
+ return &armv7pmu;
+}
+
+static const struct arm_pmu *__init armv7_a15_pmu_init(void)
+{
+ armv7pmu.id = ARM_PERF_PMU_ID_CA15;
+ armv7pmu.name = "ARMv7 Cortex-A15";
+ armv7pmu.cache_map = &armv7_a15_perf_cache_map;
+ armv7pmu.event_map = &armv7_a15_perf_map;
+ armv7pmu.num_events = armv7_read_num_pmnc_events();
+ return &armv7pmu;
+}
#else
static const struct arm_pmu *__init armv7_a8_pmu_init(void)
{
@@ -943,4 +1237,14 @@ static const struct arm_pmu *__init armv7_a9_pmu_init(void)
{
return NULL;
}
+
+static const struct arm_pmu *__init armv7_a5_pmu_init(void)
+{
+ return NULL;
+}
+
+static const struct arm_pmu *__init armv7_a15_pmu_init(void)
+{
+ return NULL;
+}
#endif /* CONFIG_CPU_V7 */
diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c
index 2c79eec19262..2b70709376c3 100644
--- a/arch/arm/kernel/pmu.c
+++ b/arch/arm/kernel/pmu.c
@@ -17,6 +17,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <asm/pmu.h>
@@ -25,36 +26,88 @@ static volatile long pmu_lock;
static struct platform_device *pmu_devices[ARM_NUM_PMU_DEVICES];
-static int __devinit pmu_device_probe(struct platform_device *pdev)
+static int __devinit pmu_register(struct platform_device *pdev,
+ enum arm_pmu_type type)
{
-
- if (pdev->id < 0 || pdev->id >= ARM_NUM_PMU_DEVICES) {
+ if (type < 0 || type >= ARM_NUM_PMU_DEVICES) {
pr_warning("received registration request for unknown "
- "device %d\n", pdev->id);
+ "device %d\n", type);
return -EINVAL;
}
- if (pmu_devices[pdev->id])
- pr_warning("registering new PMU device type %d overwrites "
- "previous registration!\n", pdev->id);
- else
- pr_info("registered new PMU device of type %d\n",
- pdev->id);
+ if (pmu_devices[type]) {
+ pr_warning("rejecting duplicate registration of PMU device "
+ "type %d.", type);
+ return -ENOSPC;
+ }
- pmu_devices[pdev->id] = pdev;
+ pr_info("registered new PMU device of type %d\n", type);
+ pmu_devices[type] = pdev;
return 0;
}
-static struct platform_driver pmu_driver = {
+#define OF_MATCH_PMU(_name, _type) { \
+ .compatible = _name, \
+ .data = (void *)_type, \
+}
+
+#define OF_MATCH_CPU(name) OF_MATCH_PMU(name, ARM_PMU_DEVICE_CPU)
+
+static struct of_device_id armpmu_of_device_ids[] = {
+ OF_MATCH_CPU("arm,cortex-a9-pmu"),
+ OF_MATCH_CPU("arm,cortex-a8-pmu"),
+ OF_MATCH_CPU("arm,arm1136-pmu"),
+ OF_MATCH_CPU("arm,arm1176-pmu"),
+ {},
+};
+
+#define PLAT_MATCH_PMU(_name, _type) { \
+ .name = _name, \
+ .driver_data = _type, \
+}
+
+#define PLAT_MATCH_CPU(_name) PLAT_MATCH_PMU(_name, ARM_PMU_DEVICE_CPU)
+
+static struct platform_device_id armpmu_plat_device_ids[] = {
+ PLAT_MATCH_CPU("arm-pmu"),
+ {},
+};
+
+enum arm_pmu_type armpmu_device_type(struct platform_device *pdev)
+{
+ const struct of_device_id *of_id;
+ const struct platform_device_id *pdev_id;
+
+ /* provided by of_device_id table */
+ if (pdev->dev.of_node) {
+ of_id = of_match_device(armpmu_of_device_ids, &pdev->dev);
+ BUG_ON(!of_id);
+ return (enum arm_pmu_type)of_id->data;
+ }
+
+ /* Provided by platform_device_id table */
+ pdev_id = platform_get_device_id(pdev);
+ BUG_ON(!pdev_id);
+ return pdev_id->driver_data;
+}
+
+static int __devinit armpmu_device_probe(struct platform_device *pdev)
+{
+ return pmu_register(pdev, armpmu_device_type(pdev));
+}
+
+static struct platform_driver armpmu_driver = {
.driver = {
.name = "arm-pmu",
+ .of_match_table = armpmu_of_device_ids,
},
- .probe = pmu_device_probe,
+ .probe = armpmu_device_probe,
+ .id_table = armpmu_plat_device_ids,
};
static int __init register_pmu_driver(void)
{
- return platform_driver_register(&pmu_driver);
+ return platform_driver_register(&armpmu_driver);
}
device_initcall(register_pmu_driver);
@@ -77,11 +130,11 @@ reserve_pmu(enum arm_pmu_type device)
EXPORT_SYMBOL_GPL(reserve_pmu);
int
-release_pmu(struct platform_device *pdev)
+release_pmu(enum arm_pmu_type device)
{
- if (WARN_ON(pdev != pmu_devices[pdev->id]))
+ if (WARN_ON(!pmu_devices[device]))
return -EINVAL;
- clear_bit_unlock(pdev->id, &pmu_lock);
+ clear_bit_unlock(device, &pmu_lock);
return 0;
}
EXPORT_SYMBOL_GPL(release_pmu);
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 5c199610719f..2491f3b406bc 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -228,34 +228,12 @@ static struct undef_hook thumb_break_hook = {
.fn = break_trap,
};
-static int thumb2_break_trap(struct pt_regs *regs, unsigned int instr)
-{
- unsigned int instr2;
- void __user *pc;
-
- /* Check the second half of the instruction. */
- pc = (void __user *)(instruction_pointer(regs) + 2);
-
- if (processor_mode(regs) == SVC_MODE) {
- instr2 = *(u16 *) pc;
- } else {
- get_user(instr2, (u16 __user *)pc);
- }
-
- if (instr2 == 0xa000) {
- ptrace_break(current, regs);
- return 0;
- } else {
- return 1;
- }
-}
-
static struct undef_hook thumb2_break_hook = {
- .instr_mask = 0xffff,
- .instr_val = 0xf7f0,
+ .instr_mask = 0xffffffff,
+ .instr_val = 0xf7f0a000,
.cpsr_mask = PSR_T_BIT,
.cpsr_val = PSR_T_BIT,
- .fn = thumb2_break_trap,
+ .fn = break_trap,
};
static int __init ptrace_break_init(void)
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index acbb447ac6b5..70bca649e925 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -343,54 +343,6 @@ static void __init feat_v6_fixup(void)
elf_hwcap &= ~HWCAP_TLS;
}
-static void __init setup_processor(void)
-{
- struct proc_info_list *list;
-
- /*
- * locate processor in the list of supported processor
- * types. The linker builds this table for us from the
- * entries in arch/arm/mm/proc-*.S
- */
- list = lookup_processor_type(read_cpuid_id());
- if (!list) {
- printk("CPU configuration botched (ID %08x), unable "
- "to continue.\n", read_cpuid_id());
- while (1);
- }
-
- cpu_name = list->cpu_name;
-
-#ifdef MULTI_CPU
- processor = *list->proc;
-#endif
-#ifdef MULTI_TLB
- cpu_tlb = *list->tlb;
-#endif
-#ifdef MULTI_USER
- cpu_user = *list->user;
-#endif
-#ifdef MULTI_CACHE
- cpu_cache = *list->cache;
-#endif
-
- printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
- cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
- proc_arch[cpu_architecture()], cr_alignment);
-
- sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
- sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
- elf_hwcap = list->elf_hwcap;
-#ifndef CONFIG_ARM_THUMB
- elf_hwcap &= ~HWCAP_THUMB;
-#endif
-
- feat_v6_fixup();
-
- cacheid_init();
- cpu_proc_init();
-}
-
/*
* cpu_init - initialise one CPU.
*
@@ -406,6 +358,8 @@ void cpu_init(void)
BUG();
}
+ cpu_proc_init();
+
/*
* Define the placement constraint for the inline asm directive below.
* In Thumb-2, msr with an immediate value is not allowed.
@@ -442,6 +396,54 @@ void cpu_init(void)
: "r14");
}
+static void __init setup_processor(void)
+{
+ struct proc_info_list *list;
+
+ /*
+ * locate processor in the list of supported processor
+ * types. The linker builds this table for us from the
+ * entries in arch/arm/mm/proc-*.S
+ */
+ list = lookup_processor_type(read_cpuid_id());
+ if (!list) {
+ printk("CPU configuration botched (ID %08x), unable "
+ "to continue.\n", read_cpuid_id());
+ while (1);
+ }
+
+ cpu_name = list->cpu_name;
+
+#ifdef MULTI_CPU
+ processor = *list->proc;
+#endif
+#ifdef MULTI_TLB
+ cpu_tlb = *list->tlb;
+#endif
+#ifdef MULTI_USER
+ cpu_user = *list->user;
+#endif
+#ifdef MULTI_CACHE
+ cpu_cache = *list->cache;
+#endif
+
+ printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
+ cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
+ proc_arch[cpu_architecture()], cr_alignment);
+
+ sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
+ sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
+ elf_hwcap = list->elf_hwcap;
+#ifndef CONFIG_ARM_THUMB
+ elf_hwcap &= ~HWCAP_THUMB;
+#endif
+
+ feat_v6_fixup();
+
+ cacheid_init();
+ cpu_init();
+}
+
void __init dump_machine_table(void)
{
struct machine_desc *p;
@@ -915,9 +917,14 @@ void __init setup_arch(char **cmdline_p)
#endif
reserve_crashkernel();
- cpu_init();
tcm_init();
+#ifdef CONFIG_ZONE_DMA
+ if (mdesc->dma_zone_size) {
+ extern unsigned long arm_dma_zone_size;
+ arm_dma_zone_size = mdesc->dma_zone_size;
+ }
+#endif
#ifdef CONFIG_MULTI_IRQ_HANDLER
handle_arch_irq = mdesc->handle_irq;
#endif
@@ -979,6 +986,10 @@ static const char *hwcap_str[] = {
"neon",
"vfpv3",
"vfpv3d16",
+ "tls",
+ "vfpv4",
+ "idiva",
+ "idivt",
NULL
};
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
index 6398ead9d1c0..dc902f2c6845 100644
--- a/arch/arm/kernel/sleep.S
+++ b/arch/arm/kernel/sleep.S
@@ -10,64 +10,61 @@
/*
* Save CPU state for a suspend
* r1 = v:p offset
- * r3 = virtual return function
- * Note: sp is decremented to allocate space for CPU state on stack
- * r0-r3,r9,r10,lr corrupted
+ * r2 = suspend function arg0
+ * r3 = suspend function
*/
-ENTRY(cpu_suspend)
- mov r9, lr
+ENTRY(__cpu_suspend)
+ stmfd sp!, {r4 - r11, lr}
#ifdef MULTI_CPU
ldr r10, =processor
- mov r2, sp @ current virtual SP
- ldr r0, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state
+ ldr r5, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state
ldr ip, [r10, #CPU_DO_RESUME] @ virtual resume function
- sub sp, sp, r0 @ allocate CPU state on stack
- mov r0, sp @ save pointer
+#else
+ ldr r5, =cpu_suspend_size
+ ldr ip, =cpu_do_resume
+#endif
+ mov r6, sp @ current virtual SP
+ sub sp, sp, r5 @ allocate CPU state on stack
+ mov r0, sp @ save pointer to CPU save block
add ip, ip, r1 @ convert resume fn to phys
- stmfd sp!, {r1, r2, r3, ip} @ save v:p, virt SP, retfn, phys resume fn
- ldr r3, =sleep_save_sp
- add r2, sp, r1 @ convert SP to phys
+ stmfd sp!, {r1, r6, ip} @ save v:p, virt SP, phys resume fn
+ ldr r5, =sleep_save_sp
+ add r6, sp, r1 @ convert SP to phys
+ stmfd sp!, {r2, r3} @ save suspend func arg and pointer
#ifdef CONFIG_SMP
ALT_SMP(mrc p15, 0, lr, c0, c0, 5)
ALT_UP(mov lr, #0)
and lr, lr, #15
- str r2, [r3, lr, lsl #2] @ save phys SP
+ str r6, [r5, lr, lsl #2] @ save phys SP
#else
- str r2, [r3] @ save phys SP
+ str r6, [r5] @ save phys SP
#endif
+#ifdef MULTI_CPU
mov lr, pc
ldr pc, [r10, #CPU_DO_SUSPEND] @ save CPU state
#else
- mov r2, sp @ current virtual SP
- ldr r0, =cpu_suspend_size
- sub sp, sp, r0 @ allocate CPU state on stack
- mov r0, sp @ save pointer
- stmfd sp!, {r1, r2, r3} @ save v:p, virt SP, return fn
- ldr r3, =sleep_save_sp
- add r2, sp, r1 @ convert SP to phys
-#ifdef CONFIG_SMP
- ALT_SMP(mrc p15, 0, lr, c0, c0, 5)
- ALT_UP(mov lr, #0)
- and lr, lr, #15
- str r2, [r3, lr, lsl #2] @ save phys SP
-#else
- str r2, [r3] @ save phys SP
-#endif
bl cpu_do_suspend
#endif
@ flush data cache
#ifdef MULTI_CACHE
ldr r10, =cpu_cache
- mov lr, r9
+ mov lr, pc
ldr pc, [r10, #CACHE_FLUSH_KERN_ALL]
#else
- mov lr, r9
- b __cpuc_flush_kern_all
+ bl __cpuc_flush_kern_all
#endif
-ENDPROC(cpu_suspend)
+ adr lr, BSYM(cpu_suspend_abort)
+ ldmfd sp!, {r0, pc} @ call suspend fn
+ENDPROC(__cpu_suspend)
.ltorg
+cpu_suspend_abort:
+ ldmia sp!, {r1 - r3} @ pop v:p, virt SP, phys resume fn
+ mov sp, r2
+ ldmfd sp!, {r4 - r11, pc}
+ENDPROC(cpu_suspend_abort)
+
/*
* r0 = control register value
* r1 = v:p offset (preserved by cpu_do_resume)
@@ -97,7 +94,9 @@ ENDPROC(cpu_resume_turn_mmu_on)
cpu_resume_after_mmu:
str r5, [r2, r4, lsl #2] @ restore old mapping
mcr p15, 0, r0, c1, c0, 0 @ turn on D-cache
- mov pc, lr
+ bl cpu_init @ restore the und/abt/irq banked regs
+ mov r0, #0 @ return zero on success
+ ldmfd sp!, {r4 - r11, pc}
ENDPROC(cpu_resume_after_mmu)
/*
@@ -120,20 +119,11 @@ ENTRY(cpu_resume)
ldr r0, sleep_save_sp @ stack phys addr
#endif
setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off
-#ifdef MULTI_CPU
- @ load v:p, stack, return fn, resume fn
- ARM( ldmia r0!, {r1, sp, lr, pc} )
-THUMB( ldmia r0!, {r1, r2, r3, r4} )
+ @ load v:p, stack, resume fn
+ ARM( ldmia r0!, {r1, sp, pc} )
+THUMB( ldmia r0!, {r1, r2, r3} )
THUMB( mov sp, r2 )
-THUMB( mov lr, r3 )
-THUMB( bx r4 )
-#else
- @ load v:p, stack, return fn
- ARM( ldmia r0!, {r1, sp, lr} )
-THUMB( ldmia r0!, {r1, r2, lr} )
-THUMB( mov sp, r2 )
- b cpu_do_resume
-#endif
+THUMB( bx r3 )
ENDPROC(cpu_resume)
sleep_save_sp:
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index e7f92a4321f3..167e3cbe1f2f 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -365,8 +365,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
*/
if (max_cpus > ncores)
max_cpus = ncores;
-
- if (max_cpus > 1) {
+ if (ncores > 1 && max_cpus) {
/*
* Enable the local timer or broadcast device for the
* boot CPU, but only if we have more than one CPU.
@@ -374,6 +373,14 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
percpu_timer_setup();
/*
+ * Initialise the present map, which describes the set of CPUs
+ * actually populated at the present time. A platform should
+ * re-initialize the map in platform_smp_prepare_cpus() if
+ * present != possible (e.g. physical hotplug).
+ */
+ init_cpu_present(&cpu_possible_map);
+
+ /*
* Initialise the SCU if there are more than one CPU
* and let them know where to start.
*/
diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c
index a1e757c3439b..79ed5e7f204a 100644
--- a/arch/arm/kernel/smp_scu.c
+++ b/arch/arm/kernel/smp_scu.c
@@ -20,6 +20,7 @@
#define SCU_INVALIDATE 0x0c
#define SCU_FPGA_REVISION 0x10
+#ifdef CONFIG_SMP
/*
* Get the number of CPU cores from the SCU configuration
*/
@@ -50,6 +51,7 @@ void __init scu_enable(void __iomem *scu_base)
*/
flush_cache_all();
}
+#endif
/*
* Set the executing CPUs power mode as defined. This will be in
diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
index f5cf660eefcc..30e302d33e0a 100644
--- a/arch/arm/kernel/tcm.c
+++ b/arch/arm/kernel/tcm.c
@@ -19,6 +19,8 @@
#include "tcm.h"
static struct gen_pool *tcm_pool;
+static bool dtcm_present;
+static bool itcm_present;
/* TCM section definitions from the linker */
extern char __itcm_start, __sitcm_text, __eitcm_text;
@@ -90,6 +92,18 @@ void tcm_free(void *addr, size_t len)
}
EXPORT_SYMBOL(tcm_free);
+bool tcm_dtcm_present(void)
+{
+ return dtcm_present;
+}
+EXPORT_SYMBOL(tcm_dtcm_present);
+
+bool tcm_itcm_present(void)
+{
+ return itcm_present;
+}
+EXPORT_SYMBOL(tcm_itcm_present);
+
static int __init setup_tcm_bank(u8 type, u8 bank, u8 banks,
u32 *offset)
{
@@ -134,6 +148,10 @@ static int __init setup_tcm_bank(u8 type, u8 bank, u8 banks,
(tcm_region & 1) ? "" : "not ");
}
+ /* Not much fun you can do with a size 0 bank */
+ if (tcm_size == 0)
+ return 0;
+
/* Force move the TCM bank to where we want it, enable */
tcm_region = *offset | (tcm_region & 0x00000ffeU) | 1;
@@ -165,12 +183,20 @@ void __init tcm_init(void)
u32 tcm_status = read_cpuid_tcmstatus();
u8 dtcm_banks = (tcm_status >> 16) & 0x03;
u8 itcm_banks = (tcm_status & 0x03);
+ size_t dtcm_code_sz = &__edtcm_data - &__sdtcm_data;
+ size_t itcm_code_sz = &__eitcm_text - &__sitcm_text;
char *start;
char *end;
char *ram;
int ret;
int i;
+ /* Values greater than 2 for D/ITCM banks are "reserved" */
+ if (dtcm_banks > 2)
+ dtcm_banks = 0;
+ if (itcm_banks > 2)
+ itcm_banks = 0;
+
/* Setup DTCM if present */
if (dtcm_banks > 0) {
for (i = 0; i < dtcm_banks; i++) {
@@ -178,6 +204,13 @@ void __init tcm_init(void)
if (ret)
return;
}
+ /* This means you compiled more code than fits into DTCM */
+ if (dtcm_code_sz > (dtcm_end - DTCM_OFFSET)) {
+ pr_info("CPU DTCM: %u bytes of code compiled to "
+ "DTCM but only %lu bytes of DTCM present\n",
+ dtcm_code_sz, (dtcm_end - DTCM_OFFSET));
+ goto no_dtcm;
+ }
dtcm_res.end = dtcm_end - 1;
request_resource(&iomem_resource, &dtcm_res);
dtcm_iomap[0].length = dtcm_end - DTCM_OFFSET;
@@ -186,12 +219,16 @@ void __init tcm_init(void)
start = &__sdtcm_data;
end = &__edtcm_data;
ram = &__dtcm_start;
- /* This means you compiled more code than fits into DTCM */
- BUG_ON((end - start) > (dtcm_end - DTCM_OFFSET));
- memcpy(start, ram, (end-start));
- pr_debug("CPU DTCM: copied data from %p - %p\n", start, end);
+ memcpy(start, ram, dtcm_code_sz);
+ pr_debug("CPU DTCM: copied data from %p - %p\n",
+ start, end);
+ dtcm_present = true;
+ } else if (dtcm_code_sz) {
+ pr_info("CPU DTCM: %u bytes of code compiled to DTCM but no "
+ "DTCM banks present in CPU\n", dtcm_code_sz);
}
+no_dtcm:
/* Setup ITCM if present */
if (itcm_banks > 0) {
for (i = 0; i < itcm_banks; i++) {
@@ -199,6 +236,13 @@ void __init tcm_init(void)
if (ret)
return;
}
+ /* This means you compiled more code than fits into ITCM */
+ if (itcm_code_sz > (itcm_end - ITCM_OFFSET)) {
+ pr_info("CPU ITCM: %u bytes of code compiled to "
+ "ITCM but only %lu bytes of ITCM present\n",
+ itcm_code_sz, (itcm_end - ITCM_OFFSET));
+ return;
+ }
itcm_res.end = itcm_end - 1;
request_resource(&iomem_resource, &itcm_res);
itcm_iomap[0].length = itcm_end - ITCM_OFFSET;
@@ -207,10 +251,13 @@ void __init tcm_init(void)
start = &__sitcm_text;
end = &__eitcm_text;
ram = &__itcm_start;
- /* This means you compiled more code than fits into ITCM */
- BUG_ON((end - start) > (itcm_end - ITCM_OFFSET));
- memcpy(start, ram, (end-start));
- pr_debug("CPU ITCM: copied code from %p - %p\n", start, end);
+ memcpy(start, ram, itcm_code_sz);
+ pr_debug("CPU ITCM: copied code from %p - %p\n",
+ start, end);
+ itcm_present = true;
+ } else if (itcm_code_sz) {
+ pr_info("CPU ITCM: %u bytes of code compiled to ITCM but no "
+ "ITCM banks present in CPU\n", itcm_code_sz);
}
}
@@ -221,7 +268,6 @@ void __init tcm_init(void)
*/
static int __init setup_tcm_pool(void)
{
- u32 tcm_status = read_cpuid_tcmstatus();
u32 dtcm_pool_start = (u32) &__edtcm_data;
u32 itcm_pool_start = (u32) &__eitcm_text;
int ret;
@@ -236,7 +282,7 @@ static int __init setup_tcm_pool(void)
pr_debug("Setting up TCM memory pool\n");
/* Add the rest of DTCM to the TCM pool */
- if (tcm_status & (0x03 << 16)) {
+ if (dtcm_present) {
if (dtcm_pool_start < dtcm_end) {
ret = gen_pool_add(tcm_pool, dtcm_pool_start,
dtcm_end - dtcm_pool_start, -1);
@@ -253,7 +299,7 @@ static int __init setup_tcm_pool(void)
}
/* Add the rest of ITCM to the TCM pool */
- if (tcm_status & 0x03) {
+ if (itcm_present) {
if (itcm_pool_start < itcm_end) {
ret = gen_pool_add(tcm_pool, itcm_pool_start,
itcm_end - itcm_pool_start, -1);
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 6807cb1e76dd..2d3436e9f71f 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -355,9 +355,24 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
pc = (void __user *)instruction_pointer(regs);
if (processor_mode(regs) == SVC_MODE) {
- instr = *(u32 *) pc;
+#ifdef CONFIG_THUMB2_KERNEL
+ if (thumb_mode(regs)) {
+ instr = ((u16 *)pc)[0];
+ if (is_wide_instruction(instr)) {
+ instr <<= 16;
+ instr |= ((u16 *)pc)[1];
+ }
+ } else
+#endif
+ instr = *(u32 *) pc;
} else if (thumb_mode(regs)) {
get_user(instr, (u16 __user *)pc);
+ if (is_wide_instruction(instr)) {
+ unsigned int instr2;
+ get_user(instr2, (u16 __user *)pc+1);
+ instr <<= 16;
+ instr |= instr2;
+ }
} else {
get_user(instr, (u32 __user *)pc);
}
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index e5287f21badc..bf977f8514f6 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -38,57 +38,6 @@ jiffies = jiffies_64 + 4;
SECTIONS
{
-#ifdef CONFIG_XIP_KERNEL
- . = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR);
-#else
- . = PAGE_OFFSET + TEXT_OFFSET;
-#endif
-
- .init : { /* Init code and data */
- _stext = .;
- _sinittext = .;
- HEAD_TEXT
- INIT_TEXT
- ARM_EXIT_KEEP(EXIT_TEXT)
- _einittext = .;
- ARM_CPU_DISCARD(PROC_INFO)
- __arch_info_begin = .;
- *(.arch.info.init)
- __arch_info_end = .;
- __tagtable_begin = .;
- *(.taglist.init)
- __tagtable_end = .;
-#ifdef CONFIG_SMP_ON_UP
- __smpalt_begin = .;
- *(.alt.smp.init)
- __smpalt_end = .;
-#endif
-
- __pv_table_begin = .;
- *(.pv_table)
- __pv_table_end = .;
-
- INIT_SETUP(16)
-
- INIT_CALLS
- CON_INITCALL
- SECURITY_INITCALL
- INIT_RAM_FS
-
-#ifndef CONFIG_XIP_KERNEL
- __init_begin = _stext;
- INIT_DATA
- ARM_EXIT_KEEP(EXIT_DATA)
-#endif
- }
-
- PERCPU_SECTION(32)
-
-#ifndef CONFIG_XIP_KERNEL
- . = ALIGN(PAGE_SIZE);
- __init_end = .;
-#endif
-
/*
* unwind exit sections must be discarded before the rest of the
* unwind sections get included.
@@ -106,10 +55,22 @@ SECTIONS
*(.fixup)
*(__ex_table)
#endif
+#ifndef CONFIG_SMP_ON_UP
+ *(.alt.smp.init)
+#endif
}
+#ifdef CONFIG_XIP_KERNEL
+ . = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR);
+#else
+ . = PAGE_OFFSET + TEXT_OFFSET;
+#endif
+ .head.text : {
+ _text = .;
+ HEAD_TEXT
+ }
.text : { /* Real text segment */
- _text = .; /* Text and read-only data */
+ _stext = .; /* Text and read-only data */
__exception_text_start = .;
*(.exception.text)
__exception_text_end = .;
@@ -122,8 +83,6 @@ SECTIONS
*(.fixup)
#endif
*(.gnu.warning)
- *(.rodata)
- *(.rodata.*)
*(.glue_7)
*(.glue_7t)
. = ALIGN(4);
@@ -152,10 +111,63 @@ SECTIONS
_etext = .; /* End of text and rodata section */
+#ifndef CONFIG_XIP_KERNEL
+ . = ALIGN(PAGE_SIZE);
+ __init_begin = .;
+#endif
+
+ INIT_TEXT_SECTION(8)
+ .exit.text : {
+ ARM_EXIT_KEEP(EXIT_TEXT)
+ }
+ .init.proc.info : {
+ ARM_CPU_DISCARD(PROC_INFO)
+ }
+ .init.arch.info : {
+ __arch_info_begin = .;
+ *(.arch.info.init)
+ __arch_info_end = .;
+ }
+ .init.tagtable : {
+ __tagtable_begin = .;
+ *(.taglist.init)
+ __tagtable_end = .;
+ }
+#ifdef CONFIG_SMP_ON_UP
+ .init.smpalt : {
+ __smpalt_begin = .;
+ *(.alt.smp.init)
+ __smpalt_end = .;
+ }
+#endif
+ .init.pv_table : {
+ __pv_table_begin = .;
+ *(.pv_table)
+ __pv_table_end = .;
+ }
+ .init.data : {
+#ifndef CONFIG_XIP_KERNEL
+ INIT_DATA
+#endif
+ INIT_SETUP(16)
+ INIT_CALLS
+ CON_INITCALL
+ SECURITY_INITCALL
+ INIT_RAM_FS
+ }
+#ifndef CONFIG_XIP_KERNEL
+ .exit.data : {
+ ARM_EXIT_KEEP(EXIT_DATA)
+ }
+#endif
+
+ PERCPU_SECTION(32)
+
#ifdef CONFIG_XIP_KERNEL
__data_loc = ALIGN(4); /* location in binary */
. = PAGE_OFFSET + TEXT_OFFSET;
#else
+ __init_end = .;
. = ALIGN(THREAD_SIZE);
__data_loc = .;
#endif
@@ -270,12 +282,6 @@ SECTIONS
/* Default discards */
DISCARDS
-
-#ifndef CONFIG_SMP_ON_UP
- /DISCARD/ : {
- *(.alt.smp.init)
- }
-#endif
}
/*
diff --git a/arch/arm/mach-bcmring/include/mach/entry-macro.S b/arch/arm/mach-bcmring/include/mach/entry-macro.S
index 7d393ca010ac..94c950d783ba 100644
--- a/arch/arm/mach-bcmring/include/mach/entry-macro.S
+++ b/arch/arm/mach-bcmring/include/mach/entry-macro.S
@@ -80,7 +80,3 @@
.macro arch_ret_to_user, tmp1, tmp2
.endm
-
- .macro irq_prio_table
- .endm
-
diff --git a/arch/arm/mach-cns3xxx/include/mach/vmalloc.h b/arch/arm/mach-cns3xxx/include/mach/vmalloc.h
index 4d381ec05278..1dd231d2f772 100644
--- a/arch/arm/mach-cns3xxx/include/mach/vmalloc.h
+++ b/arch/arm/mach-cns3xxx/include/mach/vmalloc.h
@@ -8,4 +8,4 @@
* published by the Free Software Foundation.
*/
-#define VMALLOC_END 0xd8000000
+#define VMALLOC_END 0xd8000000UL
diff --git a/arch/arm/mach-davinci/board-da830-evm.c b/arch/arm/mach-davinci/board-da830-evm.c
index 8bc3701aa05c..84fd78684868 100644
--- a/arch/arm/mach-davinci/board-da830-evm.c
+++ b/arch/arm/mach-davinci/board-da830-evm.c
@@ -681,4 +681,5 @@ MACHINE_START(DAVINCI_DA830_EVM, "DaVinci DA830/OMAP-L137/AM17x EVM")
.init_irq = cp_intc_init,
.timer = &davinci_timer,
.init_machine = da830_evm_init,
+ .dma_zone_size = SZ_128M,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
index a7b41bf505f1..29671ef07152 100644
--- a/arch/arm/mach-davinci/board-da850-evm.c
+++ b/arch/arm/mach-davinci/board-da850-evm.c
@@ -1261,4 +1261,5 @@ MACHINE_START(DAVINCI_DA850_EVM, "DaVinci DA850/OMAP-L138/AM18x EVM")
.init_irq = cp_intc_init,
.timer = &davinci_timer,
.init_machine = da850_evm_init,
+ .dma_zone_size = SZ_128M,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c
index 6e7cad13352c..241a6bd67408 100644
--- a/arch/arm/mach-davinci/board-dm355-evm.c
+++ b/arch/arm/mach-davinci/board-dm355-evm.c
@@ -356,4 +356,5 @@ MACHINE_START(DAVINCI_DM355_EVM, "DaVinci DM355 EVM")
.init_irq = davinci_irq_init,
.timer = &davinci_timer,
.init_machine = dm355_evm_init,
+ .dma_zone_size = SZ_128M,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-dm355-leopard.c b/arch/arm/mach-davinci/board-dm355-leopard.c
index 543f9911b281..bee284ca7fd6 100644
--- a/arch/arm/mach-davinci/board-dm355-leopard.c
+++ b/arch/arm/mach-davinci/board-dm355-leopard.c
@@ -275,4 +275,5 @@ MACHINE_START(DM355_LEOPARD, "DaVinci DM355 leopard")
.init_irq = davinci_irq_init,
.timer = &davinci_timer,
.init_machine = dm355_leopard_init,
+ .dma_zone_size = SZ_128M,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c
index 09a87e61ffcf..9818f214d4f0 100644
--- a/arch/arm/mach-davinci/board-dm365-evm.c
+++ b/arch/arm/mach-davinci/board-dm365-evm.c
@@ -617,5 +617,6 @@ MACHINE_START(DAVINCI_DM365_EVM, "DaVinci DM365 EVM")
.init_irq = davinci_irq_init,
.timer = &davinci_timer,
.init_machine = dm365_evm_init,
+ .dma_zone_size = SZ_128M,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
index 556bbd468db3..95607a191e03 100644
--- a/arch/arm/mach-davinci/board-dm644x-evm.c
+++ b/arch/arm/mach-davinci/board-dm644x-evm.c
@@ -717,4 +717,5 @@ MACHINE_START(DAVINCI_EVM, "DaVinci DM644x EVM")
.init_irq = davinci_irq_init,
.timer = &davinci_timer,
.init_machine = davinci_evm_init,
+ .dma_zone_size = SZ_128M,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c
index f6ac9ba74878..993a3146fd35 100644
--- a/arch/arm/mach-davinci/board-dm646x-evm.c
+++ b/arch/arm/mach-davinci/board-dm646x-evm.c
@@ -719,9 +719,15 @@ static void __init cdce_clk_init(void)
}
}
+#define DM6467T_EVM_REF_FREQ 33000000
+
static void __init davinci_map_io(void)
{
dm646x_init();
+
+ if (machine_is_davinci_dm6467tevm())
+ davinci_set_refclk_rate(DM6467T_EVM_REF_FREQ);
+
cdce_clk_init();
}
@@ -785,23 +791,13 @@ static __init void evm_init(void)
soc_info->emac_pdata->phy_id = DM646X_EVM_PHY_ID;
}
-#define DM646X_EVM_REF_FREQ 27000000
-#define DM6467T_EVM_REF_FREQ 33000000
-
-void __init dm646x_board_setup_refclk(struct clk *clk)
-{
- if (machine_is_davinci_dm6467tevm())
- clk->rate = DM6467T_EVM_REF_FREQ;
- else
- clk->rate = DM646X_EVM_REF_FREQ;
-}
-
MACHINE_START(DAVINCI_DM6467_EVM, "DaVinci DM646x EVM")
.boot_params = (0x80000100),
.map_io = davinci_map_io,
.init_irq = davinci_irq_init,
.timer = &davinci_timer,
.init_machine = evm_init,
+ .dma_zone_size = SZ_128M,
MACHINE_END
MACHINE_START(DAVINCI_DM6467TEVM, "DaVinci DM6467T EVM")
@@ -810,5 +806,6 @@ MACHINE_START(DAVINCI_DM6467TEVM, "DaVinci DM6467T EVM")
.init_irq = davinci_irq_init,
.timer = &davinci_timer,
.init_machine = evm_init,
+ .dma_zone_size = SZ_128M,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-mityomapl138.c b/arch/arm/mach-davinci/board-mityomapl138.c
index 5f5d78308873..c278226627ad 100644
--- a/arch/arm/mach-davinci/board-mityomapl138.c
+++ b/arch/arm/mach-davinci/board-mityomapl138.c
@@ -571,4 +571,5 @@ MACHINE_START(MITYOMAPL138, "MityDSP-L138/MityARM-1808")
.init_irq = cp_intc_init,
.timer = &davinci_timer,
.init_machine = mityomapl138_init,
+ .dma_zone_size = SZ_128M,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c
index 3e7be2de96de..d60a80028ba3 100644
--- a/arch/arm/mach-davinci/board-neuros-osd2.c
+++ b/arch/arm/mach-davinci/board-neuros-osd2.c
@@ -277,4 +277,5 @@ MACHINE_START(NEUROS_OSD2, "Neuros OSD2")
.init_irq = davinci_irq_init,
.timer = &davinci_timer,
.init_machine = davinci_ntosd2_init,
+ .dma_zone_size = SZ_128M,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-omapl138-hawk.c b/arch/arm/mach-davinci/board-omapl138-hawk.c
index 67c38d0ecd10..237332a11421 100644
--- a/arch/arm/mach-davinci/board-omapl138-hawk.c
+++ b/arch/arm/mach-davinci/board-omapl138-hawk.c
@@ -343,4 +343,5 @@ MACHINE_START(OMAPL138_HAWKBOARD, "AM18x/OMAP-L138 Hawkboard")
.init_irq = cp_intc_init,
.timer = &davinci_timer,
.init_machine = omapl138_hawk_init,
+ .dma_zone_size = SZ_128M,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-sffsdr.c b/arch/arm/mach-davinci/board-sffsdr.c
index 61ac96d8f00d..5f4385c0a089 100644
--- a/arch/arm/mach-davinci/board-sffsdr.c
+++ b/arch/arm/mach-davinci/board-sffsdr.c
@@ -156,4 +156,5 @@ MACHINE_START(SFFSDR, "Lyrtech SFFSDR")
.init_irq = davinci_irq_init,
.timer = &davinci_timer,
.init_machine = davinci_sffsdr_init,
+ .dma_zone_size = SZ_128M,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-tnetv107x-evm.c b/arch/arm/mach-davinci/board-tnetv107x-evm.c
index 1a656e882262..782892065682 100644
--- a/arch/arm/mach-davinci/board-tnetv107x-evm.c
+++ b/arch/arm/mach-davinci/board-tnetv107x-evm.c
@@ -282,4 +282,5 @@ MACHINE_START(TNETV107X, "TNETV107X EVM")
.init_irq = cp_intc_init,
.timer = &davinci_timer,
.init_machine = tnetv107x_evm_board_init,
+ .dma_zone_size = SZ_128M,
MACHINE_END
diff --git a/arch/arm/mach-davinci/clock.c b/arch/arm/mach-davinci/clock.c
index e4e3af179f02..ae653194b645 100644
--- a/arch/arm/mach-davinci/clock.c
+++ b/arch/arm/mach-davinci/clock.c
@@ -368,6 +368,12 @@ static unsigned long clk_leafclk_recalc(struct clk *clk)
return clk->parent->rate;
}
+int davinci_simple_set_rate(struct clk *clk, unsigned long rate)
+{
+ clk->rate = rate;
+ return 0;
+}
+
static unsigned long clk_pllclk_recalc(struct clk *clk)
{
u32 ctrl, mult = 1, prediv = 1, postdiv = 1;
@@ -506,6 +512,38 @@ int davinci_set_pllrate(struct pll_data *pll, unsigned int prediv,
}
EXPORT_SYMBOL(davinci_set_pllrate);
+/**
+ * davinci_set_refclk_rate() - Set the reference clock rate
+ * @rate: The new rate.
+ *
+ * Sets the reference clock rate to a given value. This will most likely
+ * result in the entire clock tree getting updated.
+ *
+ * This is used to support boards which use a reference clock different
+ * than that used by default in <soc>.c file. The reference clock rate
+ * should be updated early in the boot process; ideally soon after the
+ * clock tree has been initialized once with the default reference clock
+ * rate (davinci_common_init()).
+ *
+ * Returns 0 on success, error otherwise.
+ */
+int davinci_set_refclk_rate(unsigned long rate)
+{
+ struct clk *refclk;
+
+ refclk = clk_get(NULL, "ref");
+ if (IS_ERR(refclk)) {
+ pr_err("%s: failed to get reference clock.\n", __func__);
+ return PTR_ERR(refclk);
+ }
+
+ clk_set_rate(refclk, rate);
+
+ clk_put(refclk);
+
+ return 0;
+}
+
int __init davinci_clk_init(struct clk_lookup *clocks)
{
struct clk_lookup *c;
diff --git a/arch/arm/mach-davinci/clock.h b/arch/arm/mach-davinci/clock.h
index 0dd22031ec62..50b2482e0ba2 100644
--- a/arch/arm/mach-davinci/clock.h
+++ b/arch/arm/mach-davinci/clock.h
@@ -123,6 +123,8 @@ int davinci_clk_init(struct clk_lookup *clocks);
int davinci_set_pllrate(struct pll_data *pll, unsigned int prediv,
unsigned int mult, unsigned int postdiv);
int davinci_set_sysclk_rate(struct clk *clk, unsigned long rate);
+int davinci_set_refclk_rate(unsigned long rate);
+int davinci_simple_set_rate(struct clk *clk, unsigned long rate);
extern struct platform_device davinci_wdt_device;
extern void davinci_watchdog_reset(struct platform_device *);
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c
index e00d61e2efbe..1802e711a2b8 100644
--- a/arch/arm/mach-davinci/dm646x.c
+++ b/arch/arm/mach-davinci/dm646x.c
@@ -43,6 +43,7 @@
/*
* Device specific clocks
*/
+#define DM646X_REF_FREQ 27000000
#define DM646X_AUX_FREQ 24000000
static struct pll_data pll1_data = {
@@ -57,6 +58,8 @@ static struct pll_data pll2_data = {
static struct clk ref_clk = {
.name = "ref_clk",
+ .rate = DM646X_REF_FREQ,
+ .set_rate = davinci_simple_set_rate,
};
static struct clk aux_clkin = {
@@ -902,7 +905,6 @@ int __init dm646x_init_edma(struct edma_rsv_info *rsv)
void __init dm646x_init(void)
{
- dm646x_board_setup_refclk(&ref_clk);
davinci_common_init(&davinci_soc_info_dm646x);
}
diff --git a/arch/arm/mach-davinci/include/mach/dm646x.h b/arch/arm/mach-davinci/include/mach/dm646x.h
index 7a27f3f13913..2a00fe5ac253 100644
--- a/arch/arm/mach-davinci/include/mach/dm646x.h
+++ b/arch/arm/mach-davinci/include/mach/dm646x.h
@@ -15,7 +15,6 @@
#include <mach/asp.h>
#include <linux/i2c.h>
#include <linux/videodev2.h>
-#include <linux/clk.h>
#include <linux/davinci_emac.h>
#define DM646X_EMAC_BASE (0x01C80000)
@@ -31,7 +30,6 @@
void __init dm646x_init(void);
void __init dm646x_init_mcasp0(struct snd_platform_data *pdata);
void __init dm646x_init_mcasp1(struct snd_platform_data *pdata);
-void __init dm646x_board_setup_refclk(struct clk *clk);
int __init dm646x_init_edma(struct edma_rsv_info *rsv);
void dm646x_video_init(void);
diff --git a/arch/arm/mach-davinci/include/mach/entry-macro.S b/arch/arm/mach-davinci/include/mach/entry-macro.S
index fbdebc7cb409..e14c0dc0e12c 100644
--- a/arch/arm/mach-davinci/include/mach/entry-macro.S
+++ b/arch/arm/mach-davinci/include/mach/entry-macro.S
@@ -46,6 +46,3 @@
#endif
1002:
.endm
-
- .macro irq_prio_table
- .endm
diff --git a/arch/arm/mach-davinci/include/mach/memory.h b/arch/arm/mach-davinci/include/mach/memory.h
index 491249ef209c..78731944a70c 100644
--- a/arch/arm/mach-davinci/include/mach/memory.h
+++ b/arch/arm/mach-davinci/include/mach/memory.h
@@ -41,11 +41,4 @@
*/
#define CONSISTENT_DMA_SIZE (14<<20)
-/*
- * Restrict DMA-able region to workaround silicon bug. The bug
- * restricts buffers available for DMA to video hardware to be
- * below 128M
- */
-#define ARM_DMA_ZONE_SIZE SZ_128M
-
#endif /* __ASM_ARCH_MEMORY_H */
diff --git a/arch/arm/mach-davinci/include/mach/psc.h b/arch/arm/mach-davinci/include/mach/psc.h
index a47e6f29206e..1110fdd77ba4 100644
--- a/arch/arm/mach-davinci/include/mach/psc.h
+++ b/arch/arm/mach-davinci/include/mach/psc.h
@@ -30,47 +30,47 @@
#define DAVINCI_PWR_SLEEP_CNTRL_BASE 0x01C41000
/* Power and Sleep Controller (PSC) Domains */
-#define DAVINCI_GPSC_ARMDOMAIN 0
-#define DAVINCI_GPSC_DSPDOMAIN 1
+#define DAVINCI_GPSC_ARMDOMAIN 0
+#define DAVINCI_GPSC_DSPDOMAIN 1
-#define DAVINCI_LPSC_VPSSMSTR 0
-#define DAVINCI_LPSC_VPSSSLV 1
-#define DAVINCI_LPSC_TPCC 2
-#define DAVINCI_LPSC_TPTC0 3
-#define DAVINCI_LPSC_TPTC1 4
-#define DAVINCI_LPSC_EMAC 5
-#define DAVINCI_LPSC_EMAC_WRAPPER 6
-#define DAVINCI_LPSC_USB 9
-#define DAVINCI_LPSC_ATA 10
-#define DAVINCI_LPSC_VLYNQ 11
-#define DAVINCI_LPSC_UHPI 12
-#define DAVINCI_LPSC_DDR_EMIF 13
-#define DAVINCI_LPSC_AEMIF 14
-#define DAVINCI_LPSC_MMC_SD 15
-#define DAVINCI_LPSC_McBSP 17
-#define DAVINCI_LPSC_I2C 18
-#define DAVINCI_LPSC_UART0 19
-#define DAVINCI_LPSC_UART1 20
-#define DAVINCI_LPSC_UART2 21
-#define DAVINCI_LPSC_SPI 22
-#define DAVINCI_LPSC_PWM0 23
-#define DAVINCI_LPSC_PWM1 24
-#define DAVINCI_LPSC_PWM2 25
-#define DAVINCI_LPSC_GPIO 26
-#define DAVINCI_LPSC_TIMER0 27
-#define DAVINCI_LPSC_TIMER1 28
-#define DAVINCI_LPSC_TIMER2 29
-#define DAVINCI_LPSC_SYSTEM_SUBSYS 30
-#define DAVINCI_LPSC_ARM 31
-#define DAVINCI_LPSC_SCR2 32
-#define DAVINCI_LPSC_SCR3 33
-#define DAVINCI_LPSC_SCR4 34
-#define DAVINCI_LPSC_CROSSBAR 35
-#define DAVINCI_LPSC_CFG27 36
-#define DAVINCI_LPSC_CFG3 37
-#define DAVINCI_LPSC_CFG5 38
-#define DAVINCI_LPSC_GEM 39
-#define DAVINCI_LPSC_IMCOP 40
+#define DAVINCI_LPSC_VPSSMSTR 0
+#define DAVINCI_LPSC_VPSSSLV 1
+#define DAVINCI_LPSC_TPCC 2
+#define DAVINCI_LPSC_TPTC0 3
+#define DAVINCI_LPSC_TPTC1 4
+#define DAVINCI_LPSC_EMAC 5
+#define DAVINCI_LPSC_EMAC_WRAPPER 6
+#define DAVINCI_LPSC_USB 9
+#define DAVINCI_LPSC_ATA 10
+#define DAVINCI_LPSC_VLYNQ 11
+#define DAVINCI_LPSC_UHPI 12
+#define DAVINCI_LPSC_DDR_EMIF 13
+#define DAVINCI_LPSC_AEMIF 14
+#define DAVINCI_LPSC_MMC_SD 15
+#define DAVINCI_LPSC_McBSP 17
+#define DAVINCI_LPSC_I2C 18
+#define DAVINCI_LPSC_UART0 19
+#define DAVINCI_LPSC_UART1 20
+#define DAVINCI_LPSC_UART2 21
+#define DAVINCI_LPSC_SPI 22
+#define DAVINCI_LPSC_PWM0 23
+#define DAVINCI_LPSC_PWM1 24
+#define DAVINCI_LPSC_PWM2 25
+#define DAVINCI_LPSC_GPIO 26
+#define DAVINCI_LPSC_TIMER0 27
+#define DAVINCI_LPSC_TIMER1 28
+#define DAVINCI_LPSC_TIMER2 29
+#define DAVINCI_LPSC_SYSTEM_SUBSYS 30
+#define DAVINCI_LPSC_ARM 31
+#define DAVINCI_LPSC_SCR2 32
+#define DAVINCI_LPSC_SCR3 33
+#define DAVINCI_LPSC_SCR4 34
+#define DAVINCI_LPSC_CROSSBAR 35
+#define DAVINCI_LPSC_CFG27 36
+#define DAVINCI_LPSC_CFG3 37
+#define DAVINCI_LPSC_CFG5 38
+#define DAVINCI_LPSC_GEM 39
+#define DAVINCI_LPSC_IMCOP 40
#define DM355_LPSC_TIMER3 5
#define DM355_LPSC_SPI1 6
@@ -102,39 +102,39 @@
/*
* LPSC Assignments
*/
-#define DM646X_LPSC_ARM 0
-#define DM646X_LPSC_C64X_CPU 1
-#define DM646X_LPSC_HDVICP0 2
-#define DM646X_LPSC_HDVICP1 3
-#define DM646X_LPSC_TPCC 4
-#define DM646X_LPSC_TPTC0 5
-#define DM646X_LPSC_TPTC1 6
-#define DM646X_LPSC_TPTC2 7
-#define DM646X_LPSC_TPTC3 8
-#define DM646X_LPSC_PCI 13
-#define DM646X_LPSC_EMAC 14
-#define DM646X_LPSC_VDCE 15
-#define DM646X_LPSC_VPSSMSTR 16
-#define DM646X_LPSC_VPSSSLV 17
-#define DM646X_LPSC_TSIF0 18
-#define DM646X_LPSC_TSIF1 19
-#define DM646X_LPSC_DDR_EMIF 20
-#define DM646X_LPSC_AEMIF 21
-#define DM646X_LPSC_McASP0 22
-#define DM646X_LPSC_McASP1 23
-#define DM646X_LPSC_CRGEN0 24
-#define DM646X_LPSC_CRGEN1 25
-#define DM646X_LPSC_UART0 26
-#define DM646X_LPSC_UART1 27
-#define DM646X_LPSC_UART2 28
-#define DM646X_LPSC_PWM0 29
-#define DM646X_LPSC_PWM1 30
-#define DM646X_LPSC_I2C 31
-#define DM646X_LPSC_SPI 32
-#define DM646X_LPSC_GPIO 33
-#define DM646X_LPSC_TIMER0 34
-#define DM646X_LPSC_TIMER1 35
-#define DM646X_LPSC_ARM_INTC 45
+#define DM646X_LPSC_ARM 0
+#define DM646X_LPSC_C64X_CPU 1
+#define DM646X_LPSC_HDVICP0 2
+#define DM646X_LPSC_HDVICP1 3
+#define DM646X_LPSC_TPCC 4
+#define DM646X_LPSC_TPTC0 5
+#define DM646X_LPSC_TPTC1 6
+#define DM646X_LPSC_TPTC2 7
+#define DM646X_LPSC_TPTC3 8
+#define DM646X_LPSC_PCI 13
+#define DM646X_LPSC_EMAC 14
+#define DM646X_LPSC_VDCE 15
+#define DM646X_LPSC_VPSSMSTR 16
+#define DM646X_LPSC_VPSSSLV 17
+#define DM646X_LPSC_TSIF0 18
+#define DM646X_LPSC_TSIF1 19
+#define DM646X_LPSC_DDR_EMIF 20
+#define DM646X_LPSC_AEMIF 21
+#define DM646X_LPSC_McASP0 22
+#define DM646X_LPSC_McASP1 23
+#define DM646X_LPSC_CRGEN0 24
+#define DM646X_LPSC_CRGEN1 25
+#define DM646X_LPSC_UART0 26
+#define DM646X_LPSC_UART1 27
+#define DM646X_LPSC_UART2 28
+#define DM646X_LPSC_PWM0 29
+#define DM646X_LPSC_PWM1 30
+#define DM646X_LPSC_I2C 31
+#define DM646X_LPSC_SPI 32
+#define DM646X_LPSC_GPIO 33
+#define DM646X_LPSC_TIMER0 34
+#define DM646X_LPSC_TIMER1 35
+#define DM646X_LPSC_ARM_INTC 45
/* PSC0 defines */
#define DA8XX_LPSC0_TPCC 0
@@ -243,7 +243,7 @@
#define PSC_STATE_DISABLE 2
#define PSC_STATE_ENABLE 3
-#define MDSTAT_STATE_MASK 0x1f
+#define MDSTAT_STATE_MASK 0x1f
#ifndef __ASSEMBLER__
diff --git a/arch/arm/mach-exynos4/Kconfig b/arch/arm/mach-exynos4/Kconfig
index 1435fc31c4b2..ae433a052df6 100644
--- a/arch/arm/mach-exynos4/Kconfig
+++ b/arch/arm/mach-exynos4/Kconfig
@@ -110,6 +110,8 @@ config MACH_SMDKC210
select S3C_DEV_HSMMC1
select S3C_DEV_HSMMC2
select S3C_DEV_HSMMC3
+ select SAMSUNG_DEV_PWM
+ select SAMSUNG_DEV_BACKLIGHT
select EXYNOS4_DEV_PD
select EXYNOS4_DEV_SYSMMU
select EXYNOS4_SETUP_I2C1
@@ -127,8 +129,10 @@ config MACH_SMDKV310
select S3C_DEV_HSMMC1
select S3C_DEV_HSMMC2
select S3C_DEV_HSMMC3
+ select SAMSUNG_DEV_BACKLIGHT
select SAMSUNG_DEV_KEYPAD
select EXYNOS4_DEV_PD
+ select SAMSUNG_DEV_PWM
select EXYNOS4_DEV_SYSMMU
select EXYNOS4_SETUP_I2C1
select EXYNOS4_SETUP_KEYPAD
diff --git a/arch/arm/mach-exynos4/Makefile b/arch/arm/mach-exynos4/Makefile
index 60fe5ecf3599..1366995d8c2c 100644
--- a/arch/arm/mach-exynos4/Makefile
+++ b/arch/arm/mach-exynos4/Makefile
@@ -15,7 +15,6 @@ obj- :=
obj-$(CONFIG_CPU_EXYNOS4210) += cpu.o init.o clock.o irq-combiner.o
obj-$(CONFIG_CPU_EXYNOS4210) += setup-i2c0.o irq-eint.o dma.o
obj-$(CONFIG_PM) += pm.o sleep.o
-obj-$(CONFIG_CPU_FREQ) += cpufreq.o
obj-$(CONFIG_CPU_IDLE) += cpuidle.o
obj-$(CONFIG_SMP) += platsmp.o headsmp.o
diff --git a/arch/arm/mach-exynos4/clock.c b/arch/arm/mach-exynos4/clock.c
index 871f9d508fde..66494f28bbef 100644
--- a/arch/arm/mach-exynos4/clock.c
+++ b/arch/arm/mach-exynos4/clock.c
@@ -27,24 +27,20 @@
static struct clk clk_sclk_hdmi27m = {
.name = "sclk_hdmi27m",
- .id = -1,
.rate = 27000000,
};
static struct clk clk_sclk_hdmiphy = {
.name = "sclk_hdmiphy",
- .id = -1,
};
static struct clk clk_sclk_usbphy0 = {
.name = "sclk_usbphy0",
- .id = -1,
.rate = 27000000,
};
static struct clk clk_sclk_usbphy1 = {
.name = "sclk_usbphy1",
- .id = -1,
};
static int exynos4_clksrc_mask_top_ctrl(struct clk *clk, int enable)
@@ -132,7 +128,6 @@ static int exynos4_clk_ip_perir_ctrl(struct clk *clk, int enable)
static struct clksrc_clk clk_mout_apll = {
.clk = {
.name = "mout_apll",
- .id = -1,
},
.sources = &clk_src_apll,
.reg_src = { .reg = S5P_CLKSRC_CPU, .shift = 0, .size = 1 },
@@ -141,7 +136,6 @@ static struct clksrc_clk clk_mout_apll = {
static struct clksrc_clk clk_sclk_apll = {
.clk = {
.name = "sclk_apll",
- .id = -1,
.parent = &clk_mout_apll.clk,
},
.reg_div = { .reg = S5P_CLKDIV_CPU, .shift = 24, .size = 3 },
@@ -150,7 +144,6 @@ static struct clksrc_clk clk_sclk_apll = {
static struct clksrc_clk clk_mout_epll = {
.clk = {
.name = "mout_epll",
- .id = -1,
},
.sources = &clk_src_epll,
.reg_src = { .reg = S5P_CLKSRC_TOP0, .shift = 4, .size = 1 },
@@ -159,7 +152,6 @@ static struct clksrc_clk clk_mout_epll = {
static struct clksrc_clk clk_mout_mpll = {
.clk = {
.name = "mout_mpll",
- .id = -1,
},
.sources = &clk_src_mpll,
.reg_src = { .reg = S5P_CLKSRC_CPU, .shift = 8, .size = 1 },
@@ -178,7 +170,6 @@ static struct clksrc_sources clkset_moutcore = {
static struct clksrc_clk clk_moutcore = {
.clk = {
.name = "moutcore",
- .id = -1,
},
.sources = &clkset_moutcore,
.reg_src = { .reg = S5P_CLKSRC_CPU, .shift = 16, .size = 1 },
@@ -187,7 +178,6 @@ static struct clksrc_clk clk_moutcore = {
static struct clksrc_clk clk_coreclk = {
.clk = {
.name = "core_clk",
- .id = -1,
.parent = &clk_moutcore.clk,
},
.reg_div = { .reg = S5P_CLKDIV_CPU, .shift = 0, .size = 3 },
@@ -196,7 +186,6 @@ static struct clksrc_clk clk_coreclk = {
static struct clksrc_clk clk_armclk = {
.clk = {
.name = "armclk",
- .id = -1,
.parent = &clk_coreclk.clk,
},
};
@@ -204,7 +193,6 @@ static struct clksrc_clk clk_armclk = {
static struct clksrc_clk clk_aclk_corem0 = {
.clk = {
.name = "aclk_corem0",
- .id = -1,
.parent = &clk_coreclk.clk,
},
.reg_div = { .reg = S5P_CLKDIV_CPU, .shift = 4, .size = 3 },
@@ -213,7 +201,6 @@ static struct clksrc_clk clk_aclk_corem0 = {
static struct clksrc_clk clk_aclk_cores = {
.clk = {
.name = "aclk_cores",
- .id = -1,
.parent = &clk_coreclk.clk,
},
.reg_div = { .reg = S5P_CLKDIV_CPU, .shift = 4, .size = 3 },
@@ -222,7 +209,6 @@ static struct clksrc_clk clk_aclk_cores = {
static struct clksrc_clk clk_aclk_corem1 = {
.clk = {
.name = "aclk_corem1",
- .id = -1,
.parent = &clk_coreclk.clk,
},
.reg_div = { .reg = S5P_CLKDIV_CPU, .shift = 8, .size = 3 },
@@ -231,7 +217,6 @@ static struct clksrc_clk clk_aclk_corem1 = {
static struct clksrc_clk clk_periphclk = {
.clk = {
.name = "periphclk",
- .id = -1,
.parent = &clk_coreclk.clk,
},
.reg_div = { .reg = S5P_CLKDIV_CPU, .shift = 12, .size = 3 },
@@ -252,7 +237,6 @@ static struct clksrc_sources clkset_mout_corebus = {
static struct clksrc_clk clk_mout_corebus = {
.clk = {
.name = "mout_corebus",
- .id = -1,
},
.sources = &clkset_mout_corebus,
.reg_src = { .reg = S5P_CLKSRC_DMC, .shift = 4, .size = 1 },
@@ -261,7 +245,6 @@ static struct clksrc_clk clk_mout_corebus = {
static struct clksrc_clk clk_sclk_dmc = {
.clk = {
.name = "sclk_dmc",
- .id = -1,
.parent = &clk_mout_corebus.clk,
},
.reg_div = { .reg = S5P_CLKDIV_DMC0, .shift = 12, .size = 3 },
@@ -270,7 +253,6 @@ static struct clksrc_clk clk_sclk_dmc = {
static struct clksrc_clk clk_aclk_cored = {
.clk = {
.name = "aclk_cored",
- .id = -1,
.parent = &clk_sclk_dmc.clk,
},
.reg_div = { .reg = S5P_CLKDIV_DMC0, .shift = 16, .size = 3 },
@@ -279,7 +261,6 @@ static struct clksrc_clk clk_aclk_cored = {
static struct clksrc_clk clk_aclk_corep = {
.clk = {
.name = "aclk_corep",
- .id = -1,
.parent = &clk_aclk_cored.clk,
},
.reg_div = { .reg = S5P_CLKDIV_DMC0, .shift = 20, .size = 3 },
@@ -288,7 +269,6 @@ static struct clksrc_clk clk_aclk_corep = {
static struct clksrc_clk clk_aclk_acp = {
.clk = {
.name = "aclk_acp",
- .id = -1,
.parent = &clk_mout_corebus.clk,
},
.reg_div = { .reg = S5P_CLKDIV_DMC0, .shift = 0, .size = 3 },
@@ -297,7 +277,6 @@ static struct clksrc_clk clk_aclk_acp = {
static struct clksrc_clk clk_pclk_acp = {
.clk = {
.name = "pclk_acp",
- .id = -1,
.parent = &clk_aclk_acp.clk,
},
.reg_div = { .reg = S5P_CLKDIV_DMC0, .shift = 4, .size = 3 },
@@ -318,7 +297,6 @@ static struct clksrc_sources clkset_aclk = {
static struct clksrc_clk clk_aclk_200 = {
.clk = {
.name = "aclk_200",
- .id = -1,
},
.sources = &clkset_aclk,
.reg_src = { .reg = S5P_CLKSRC_TOP0, .shift = 12, .size = 1 },
@@ -328,7 +306,6 @@ static struct clksrc_clk clk_aclk_200 = {
static struct clksrc_clk clk_aclk_100 = {
.clk = {
.name = "aclk_100",
- .id = -1,
},
.sources = &clkset_aclk,
.reg_src = { .reg = S5P_CLKSRC_TOP0, .shift = 16, .size = 1 },
@@ -338,7 +315,6 @@ static struct clksrc_clk clk_aclk_100 = {
static struct clksrc_clk clk_aclk_160 = {
.clk = {
.name = "aclk_160",
- .id = -1,
},
.sources = &clkset_aclk,
.reg_src = { .reg = S5P_CLKSRC_TOP0, .shift = 20, .size = 1 },
@@ -348,7 +324,6 @@ static struct clksrc_clk clk_aclk_160 = {
static struct clksrc_clk clk_aclk_133 = {
.clk = {
.name = "aclk_133",
- .id = -1,
},
.sources = &clkset_aclk,
.reg_src = { .reg = S5P_CLKSRC_TOP0, .shift = 24, .size = 1 },
@@ -368,7 +343,6 @@ static struct clksrc_sources clkset_vpllsrc = {
static struct clksrc_clk clk_vpllsrc = {
.clk = {
.name = "vpll_src",
- .id = -1,
.enable = exynos4_clksrc_mask_top_ctrl,
.ctrlbit = (1 << 0),
},
@@ -389,7 +363,6 @@ static struct clksrc_sources clkset_sclk_vpll = {
static struct clksrc_clk clk_sclk_vpll = {
.clk = {
.name = "sclk_vpll",
- .id = -1,
},
.sources = &clkset_sclk_vpll,
.reg_src = { .reg = S5P_CLKSRC_TOP0, .shift = 8, .size = 1 },
@@ -398,161 +371,151 @@ static struct clksrc_clk clk_sclk_vpll = {
static struct clk init_clocks_off[] = {
{
.name = "timers",
- .id = -1,
.parent = &clk_aclk_100.clk,
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1<<24),
}, {
.name = "csis",
- .id = 0,
+ .devname = "s5p-mipi-csis.0",
.enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 4),
}, {
.name = "csis",
- .id = 1,
+ .devname = "s5p-mipi-csis.1",
.enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 5),
}, {
.name = "fimc",
- .id = 0,
+ .devname = "exynos4-fimc.0",
.enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 0),
}, {
.name = "fimc",
- .id = 1,
+ .devname = "exynos4-fimc.1",
.enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 1),
}, {
.name = "fimc",
- .id = 2,
+ .devname = "exynos4-fimc.2",
.enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 2),
}, {
.name = "fimc",
- .id = 3,
+ .devname = "exynos4-fimc.3",
.enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 3),
}, {
.name = "fimd",
- .id = 0,
+ .devname = "exynos4-fb.0",
.enable = exynos4_clk_ip_lcd0_ctrl,
.ctrlbit = (1 << 0),
}, {
.name = "fimd",
- .id = 1,
+ .devname = "exynos4-fb.1",
.enable = exynos4_clk_ip_lcd1_ctrl,
.ctrlbit = (1 << 0),
}, {
.name = "sataphy",
- .id = -1,
.parent = &clk_aclk_133.clk,
.enable = exynos4_clk_ip_fsys_ctrl,
.ctrlbit = (1 << 3),
}, {
.name = "hsmmc",
- .id = 0,
+ .devname = "s3c-sdhci.0",
.parent = &clk_aclk_133.clk,
.enable = exynos4_clk_ip_fsys_ctrl,
.ctrlbit = (1 << 5),
}, {
.name = "hsmmc",
- .id = 1,
+ .devname = "s3c-sdhci.1",
.parent = &clk_aclk_133.clk,
.enable = exynos4_clk_ip_fsys_ctrl,
.ctrlbit = (1 << 6),
}, {
.name = "hsmmc",
- .id = 2,
+ .devname = "s3c-sdhci.2",
.parent = &clk_aclk_133.clk,
.enable = exynos4_clk_ip_fsys_ctrl,
.ctrlbit = (1 << 7),
}, {
.name = "hsmmc",
- .id = 3,
+ .devname = "s3c-sdhci.3",
.parent = &clk_aclk_133.clk,
.enable = exynos4_clk_ip_fsys_ctrl,
.ctrlbit = (1 << 8),
}, {
- .name = "hsmmc",
- .id = 4,
+ .name = "dwmmc",
.parent = &clk_aclk_133.clk,
.enable = exynos4_clk_ip_fsys_ctrl,
.ctrlbit = (1 << 9),
}, {
.name = "sata",
- .id = -1,
.parent = &clk_aclk_133.clk,
.enable = exynos4_clk_ip_fsys_ctrl,
.ctrlbit = (1 << 10),
}, {
.name = "pdma",
- .id = 0,
+ .devname = "s3c-pl330.0",
.enable = exynos4_clk_ip_fsys_ctrl,
.ctrlbit = (1 << 0),
}, {
.name = "pdma",
- .id = 1,
+ .devname = "s3c-pl330.1",
.enable = exynos4_clk_ip_fsys_ctrl,
.ctrlbit = (1 << 1),
}, {
.name = "adc",
- .id = -1,
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 15),
}, {
.name = "keypad",
- .id = -1,
.enable = exynos4_clk_ip_perir_ctrl,
.ctrlbit = (1 << 16),
}, {
.name = "rtc",
- .id = -1,
.enable = exynos4_clk_ip_perir_ctrl,
.ctrlbit = (1 << 15),
}, {
.name = "watchdog",
- .id = -1,
.parent = &clk_aclk_100.clk,
.enable = exynos4_clk_ip_perir_ctrl,
.ctrlbit = (1 << 14),
}, {
.name = "usbhost",
- .id = -1,
.enable = exynos4_clk_ip_fsys_ctrl ,
.ctrlbit = (1 << 12),
}, {
.name = "otg",
- .id = -1,
.enable = exynos4_clk_ip_fsys_ctrl,
.ctrlbit = (1 << 13),
}, {
.name = "spi",
- .id = 0,
+ .devname = "s3c64xx-spi.0",
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 16),
}, {
.name = "spi",
- .id = 1,
+ .devname = "s3c64xx-spi.1",
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 17),
}, {
.name = "spi",
- .id = 2,
+ .devname = "s3c64xx-spi.2",
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 18),
}, {
.name = "iis",
- .id = 0,
+ .devname = "samsung-i2s.0",
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 19),
}, {
.name = "iis",
- .id = 1,
+ .devname = "samsung-i2s.1",
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 20),
}, {
.name = "iis",
- .id = 2,
+ .devname = "samsung-i2s.2",
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 21),
}, {
@@ -562,125 +525,110 @@ static struct clk init_clocks_off[] = {
.ctrlbit = (1 << 27),
}, {
.name = "fimg2d",
- .id = -1,
.enable = exynos4_clk_ip_image_ctrl,
.ctrlbit = (1 << 0),
}, {
.name = "i2c",
- .id = 0,
+ .devname = "s3c2440-i2c.0",
.parent = &clk_aclk_100.clk,
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 6),
}, {
.name = "i2c",
- .id = 1,
+ .devname = "s3c2440-i2c.1",
.parent = &clk_aclk_100.clk,
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 7),
}, {
.name = "i2c",
- .id = 2,
+ .devname = "s3c2440-i2c.2",
.parent = &clk_aclk_100.clk,
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 8),
}, {
.name = "i2c",
- .id = 3,
+ .devname = "s3c2440-i2c.3",
.parent = &clk_aclk_100.clk,
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 9),
}, {
.name = "i2c",
- .id = 4,
+ .devname = "s3c2440-i2c.4",
.parent = &clk_aclk_100.clk,
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 10),
}, {
.name = "i2c",
- .id = 5,
+ .devname = "s3c2440-i2c.5",
.parent = &clk_aclk_100.clk,
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 11),
}, {
.name = "i2c",
- .id = 6,
+ .devname = "s3c2440-i2c.6",
.parent = &clk_aclk_100.clk,
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 12),
}, {
.name = "i2c",
- .id = 7,
+ .devname = "s3c2440-i2c.7",
.parent = &clk_aclk_100.clk,
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 13),
}, {
.name = "SYSMMU_MDMA",
- .id = -1,
.enable = exynos4_clk_ip_image_ctrl,
.ctrlbit = (1 << 5),
}, {
.name = "SYSMMU_FIMC0",
- .id = -1,
.enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 7),
}, {
.name = "SYSMMU_FIMC1",
- .id = -1,
.enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 8),
}, {
.name = "SYSMMU_FIMC2",
- .id = -1,
.enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 9),
}, {
.name = "SYSMMU_FIMC3",
- .id = -1,
.enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 10),
}, {
.name = "SYSMMU_JPEG",
- .id = -1,
.enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 11),
}, {
.name = "SYSMMU_FIMD0",
- .id = -1,
.enable = exynos4_clk_ip_lcd0_ctrl,
.ctrlbit = (1 << 4),
}, {
.name = "SYSMMU_FIMD1",
- .id = -1,
.enable = exynos4_clk_ip_lcd1_ctrl,
.ctrlbit = (1 << 4),
}, {
.name = "SYSMMU_PCIe",
- .id = -1,
.enable = exynos4_clk_ip_fsys_ctrl,
.ctrlbit = (1 << 18),
}, {
.name = "SYSMMU_G2D",
- .id = -1,
.enable = exynos4_clk_ip_image_ctrl,
.ctrlbit = (1 << 3),
}, {
.name = "SYSMMU_ROTATOR",
- .id = -1,
.enable = exynos4_clk_ip_image_ctrl,
.ctrlbit = (1 << 4),
}, {
.name = "SYSMMU_TV",
- .id = -1,
.enable = exynos4_clk_ip_tv_ctrl,
.ctrlbit = (1 << 4),
}, {
.name = "SYSMMU_MFC_L",
- .id = -1,
.enable = exynos4_clk_ip_mfc_ctrl,
.ctrlbit = (1 << 1),
}, {
.name = "SYSMMU_MFC_R",
- .id = -1,
.enable = exynos4_clk_ip_mfc_ctrl,
.ctrlbit = (1 << 2),
}
@@ -689,32 +637,32 @@ static struct clk init_clocks_off[] = {
static struct clk init_clocks[] = {
{
.name = "uart",
- .id = 0,
+ .devname = "s5pv210-uart.0",
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 0),
}, {
.name = "uart",
- .id = 1,
+ .devname = "s5pv210-uart.1",
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 1),
}, {
.name = "uart",
- .id = 2,
+ .devname = "s5pv210-uart.2",
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 2),
}, {
.name = "uart",
- .id = 3,
+ .devname = "s5pv210-uart.3",
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 3),
}, {
.name = "uart",
- .id = 4,
+ .devname = "s5pv210-uart.4",
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 4),
}, {
.name = "uart",
- .id = 5,
+ .devname = "s5pv210-uart.5",
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 5),
}
@@ -750,7 +698,6 @@ static struct clksrc_sources clkset_mout_g2d0 = {
static struct clksrc_clk clk_mout_g2d0 = {
.clk = {
.name = "mout_g2d0",
- .id = -1,
},
.sources = &clkset_mout_g2d0,
.reg_src = { .reg = S5P_CLKSRC_IMAGE, .shift = 0, .size = 1 },
@@ -769,7 +716,6 @@ static struct clksrc_sources clkset_mout_g2d1 = {
static struct clksrc_clk clk_mout_g2d1 = {
.clk = {
.name = "mout_g2d1",
- .id = -1,
},
.sources = &clkset_mout_g2d1,
.reg_src = { .reg = S5P_CLKSRC_IMAGE, .shift = 4, .size = 1 },
@@ -788,7 +734,6 @@ static struct clksrc_sources clkset_mout_g2d = {
static struct clksrc_clk clk_dout_mmc0 = {
.clk = {
.name = "dout_mmc0",
- .id = -1,
},
.sources = &clkset_group,
.reg_src = { .reg = S5P_CLKSRC_FSYS, .shift = 0, .size = 4 },
@@ -798,7 +743,6 @@ static struct clksrc_clk clk_dout_mmc0 = {
static struct clksrc_clk clk_dout_mmc1 = {
.clk = {
.name = "dout_mmc1",
- .id = -1,
},
.sources = &clkset_group,
.reg_src = { .reg = S5P_CLKSRC_FSYS, .shift = 4, .size = 4 },
@@ -808,7 +752,6 @@ static struct clksrc_clk clk_dout_mmc1 = {
static struct clksrc_clk clk_dout_mmc2 = {
.clk = {
.name = "dout_mmc2",
- .id = -1,
},
.sources = &clkset_group,
.reg_src = { .reg = S5P_CLKSRC_FSYS, .shift = 8, .size = 4 },
@@ -818,7 +761,6 @@ static struct clksrc_clk clk_dout_mmc2 = {
static struct clksrc_clk clk_dout_mmc3 = {
.clk = {
.name = "dout_mmc3",
- .id = -1,
},
.sources = &clkset_group,
.reg_src = { .reg = S5P_CLKSRC_FSYS, .shift = 12, .size = 4 },
@@ -828,7 +770,6 @@ static struct clksrc_clk clk_dout_mmc3 = {
static struct clksrc_clk clk_dout_mmc4 = {
.clk = {
.name = "dout_mmc4",
- .id = -1,
},
.sources = &clkset_group,
.reg_src = { .reg = S5P_CLKSRC_FSYS, .shift = 16, .size = 4 },
@@ -839,7 +780,7 @@ static struct clksrc_clk clksrcs[] = {
{
.clk = {
.name = "uclk1",
- .id = 0,
+ .devname = "s5pv210-uart.0",
.enable = exynos4_clksrc_mask_peril0_ctrl,
.ctrlbit = (1 << 0),
},
@@ -849,7 +790,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "uclk1",
- .id = 1,
+ .devname = "s5pv210-uart.1",
.enable = exynos4_clksrc_mask_peril0_ctrl,
.ctrlbit = (1 << 4),
},
@@ -859,7 +800,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "uclk1",
- .id = 2,
+ .devname = "s5pv210-uart.2",
.enable = exynos4_clksrc_mask_peril0_ctrl,
.ctrlbit = (1 << 8),
},
@@ -869,7 +810,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "uclk1",
- .id = 3,
+ .devname = "s5pv210-uart.3",
.enable = exynos4_clksrc_mask_peril0_ctrl,
.ctrlbit = (1 << 12),
},
@@ -879,7 +820,6 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_pwm",
- .id = -1,
.enable = exynos4_clksrc_mask_peril0_ctrl,
.ctrlbit = (1 << 24),
},
@@ -889,7 +829,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_csis",
- .id = 0,
+ .devname = "s5p-mipi-csis.0",
.enable = exynos4_clksrc_mask_cam_ctrl,
.ctrlbit = (1 << 24),
},
@@ -899,7 +839,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_csis",
- .id = 1,
+ .devname = "s5p-mipi-csis.1",
.enable = exynos4_clksrc_mask_cam_ctrl,
.ctrlbit = (1 << 28),
},
@@ -909,7 +849,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_cam",
- .id = 0,
+ .devname = "exynos4-fimc.0",
.enable = exynos4_clksrc_mask_cam_ctrl,
.ctrlbit = (1 << 16),
},
@@ -919,7 +859,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_cam",
- .id = 1,
+ .devname = "exynos4-fimc.1",
.enable = exynos4_clksrc_mask_cam_ctrl,
.ctrlbit = (1 << 20),
},
@@ -929,7 +869,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_fimc",
- .id = 0,
+ .devname = "exynos4-fimc.0",
.enable = exynos4_clksrc_mask_cam_ctrl,
.ctrlbit = (1 << 0),
},
@@ -939,7 +879,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_fimc",
- .id = 1,
+ .devname = "exynos4-fimc.1",
.enable = exynos4_clksrc_mask_cam_ctrl,
.ctrlbit = (1 << 4),
},
@@ -949,7 +889,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_fimc",
- .id = 2,
+ .devname = "exynos4-fimc.2",
.enable = exynos4_clksrc_mask_cam_ctrl,
.ctrlbit = (1 << 8),
},
@@ -959,7 +899,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_fimc",
- .id = 3,
+ .devname = "exynos4-fimc.3",
.enable = exynos4_clksrc_mask_cam_ctrl,
.ctrlbit = (1 << 12),
},
@@ -969,7 +909,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_fimd",
- .id = 0,
+ .devname = "exynos4-fb.0",
.enable = exynos4_clksrc_mask_lcd0_ctrl,
.ctrlbit = (1 << 0),
},
@@ -979,7 +919,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_fimd",
- .id = 1,
+ .devname = "exynos4-fb.1",
.enable = exynos4_clksrc_mask_lcd1_ctrl,
.ctrlbit = (1 << 0),
},
@@ -989,7 +929,6 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_sata",
- .id = -1,
.enable = exynos4_clksrc_mask_fsys_ctrl,
.ctrlbit = (1 << 24),
},
@@ -999,7 +938,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_spi",
- .id = 0,
+ .devname = "s3c64xx-spi.0",
.enable = exynos4_clksrc_mask_peril1_ctrl,
.ctrlbit = (1 << 16),
},
@@ -1009,7 +948,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_spi",
- .id = 1,
+ .devname = "s3c64xx-spi.1",
.enable = exynos4_clksrc_mask_peril1_ctrl,
.ctrlbit = (1 << 20),
},
@@ -1019,7 +958,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_spi",
- .id = 2,
+ .devname = "s3c64xx-spi.2",
.enable = exynos4_clksrc_mask_peril1_ctrl,
.ctrlbit = (1 << 24),
},
@@ -1029,7 +968,6 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_fimg2d",
- .id = -1,
},
.sources = &clkset_mout_g2d,
.reg_src = { .reg = S5P_CLKSRC_IMAGE, .shift = 8, .size = 1 },
@@ -1037,7 +975,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_mmc",
- .id = 0,
+ .devname = "s3c-sdhci.0",
.parent = &clk_dout_mmc0.clk,
.enable = exynos4_clksrc_mask_fsys_ctrl,
.ctrlbit = (1 << 0),
@@ -1046,7 +984,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_mmc",
- .id = 1,
+ .devname = "s3c-sdhci.1",
.parent = &clk_dout_mmc1.clk,
.enable = exynos4_clksrc_mask_fsys_ctrl,
.ctrlbit = (1 << 4),
@@ -1055,7 +993,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_mmc",
- .id = 2,
+ .devname = "s3c-sdhci.2",
.parent = &clk_dout_mmc2.clk,
.enable = exynos4_clksrc_mask_fsys_ctrl,
.ctrlbit = (1 << 8),
@@ -1064,7 +1002,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_mmc",
- .id = 3,
+ .devname = "s3c-sdhci.3",
.parent = &clk_dout_mmc3.clk,
.enable = exynos4_clksrc_mask_fsys_ctrl,
.ctrlbit = (1 << 12),
@@ -1072,8 +1010,7 @@ static struct clksrc_clk clksrcs[] = {
.reg_div = { .reg = S5P_CLKDIV_FSYS2, .shift = 24, .size = 8 },
}, {
.clk = {
- .name = "sclk_mmc",
- .id = 4,
+ .name = "sclk_dwmmc",
.parent = &clk_dout_mmc4.clk,
.enable = exynos4_clksrc_mask_fsys_ctrl,
.ctrlbit = (1 << 16),
diff --git a/arch/arm/mach-exynos4/cpufreq.c b/arch/arm/mach-exynos4/cpufreq.c
deleted file mode 100644
index a1bd258f0c4d..000000000000
--- a/arch/arm/mach-exynos4/cpufreq.c
+++ /dev/null
@@ -1,569 +0,0 @@
-/* linux/arch/arm/mach-exynos4/cpufreq.c
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * EXYNOS4 - CPU frequency scaling support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/regulator/consumer.h>
-#include <linux/cpufreq.h>
-
-#include <mach/map.h>
-#include <mach/regs-clock.h>
-#include <mach/regs-mem.h>
-
-#include <plat/clock.h>
-#include <plat/pm.h>
-
-static struct clk *cpu_clk;
-static struct clk *moutcore;
-static struct clk *mout_mpll;
-static struct clk *mout_apll;
-
-static struct regulator *arm_regulator;
-static struct regulator *int_regulator;
-
-static struct cpufreq_freqs freqs;
-static unsigned int memtype;
-
-enum exynos4_memory_type {
- DDR2 = 4,
- LPDDR2,
- DDR3,
-};
-
-enum cpufreq_level_index {
- L0, L1, L2, L3, CPUFREQ_LEVEL_END,
-};
-
-static struct cpufreq_frequency_table exynos4_freq_table[] = {
- {L0, 1000*1000},
- {L1, 800*1000},
- {L2, 400*1000},
- {L3, 100*1000},
- {0, CPUFREQ_TABLE_END},
-};
-
-static unsigned int clkdiv_cpu0[CPUFREQ_LEVEL_END][7] = {
- /*
- * Clock divider value for following
- * { DIVCORE, DIVCOREM0, DIVCOREM1, DIVPERIPH,
- * DIVATB, DIVPCLK_DBG, DIVAPLL }
- */
-
- /* ARM L0: 1000MHz */
- { 0, 3, 7, 3, 3, 0, 1 },
-
- /* ARM L1: 800MHz */
- { 0, 3, 7, 3, 3, 0, 1 },
-
- /* ARM L2: 400MHz */
- { 0, 1, 3, 1, 3, 0, 1 },
-
- /* ARM L3: 100MHz */
- { 0, 0, 1, 0, 3, 1, 1 },
-};
-
-static unsigned int clkdiv_cpu1[CPUFREQ_LEVEL_END][2] = {
- /*
- * Clock divider value for following
- * { DIVCOPY, DIVHPM }
- */
-
- /* ARM L0: 1000MHz */
- { 3, 0 },
-
- /* ARM L1: 800MHz */
- { 3, 0 },
-
- /* ARM L2: 400MHz */
- { 3, 0 },
-
- /* ARM L3: 100MHz */
- { 3, 0 },
-};
-
-static unsigned int clkdiv_dmc0[CPUFREQ_LEVEL_END][8] = {
- /*
- * Clock divider value for following
- * { DIVACP, DIVACP_PCLK, DIVDPHY, DIVDMC, DIVDMCD
- * DIVDMCP, DIVCOPY2, DIVCORE_TIMERS }
- */
-
- /* DMC L0: 400MHz */
- { 3, 1, 1, 1, 1, 1, 3, 1 },
-
- /* DMC L1: 400MHz */
- { 3, 1, 1, 1, 1, 1, 3, 1 },
-
- /* DMC L2: 266.7MHz */
- { 7, 1, 1, 2, 1, 1, 3, 1 },
-
- /* DMC L3: 200MHz */
- { 7, 1, 1, 3, 1, 1, 3, 1 },
-};
-
-static unsigned int clkdiv_top[CPUFREQ_LEVEL_END][5] = {
- /*
- * Clock divider value for following
- * { DIVACLK200, DIVACLK100, DIVACLK160, DIVACLK133, DIVONENAND }
- */
-
- /* ACLK200 L0: 200MHz */
- { 3, 7, 4, 5, 1 },
-
- /* ACLK200 L1: 200MHz */
- { 3, 7, 4, 5, 1 },
-
- /* ACLK200 L2: 160MHz */
- { 4, 7, 5, 7, 1 },
-
- /* ACLK200 L3: 133.3MHz */
- { 5, 7, 7, 7, 1 },
-};
-
-static unsigned int clkdiv_lr_bus[CPUFREQ_LEVEL_END][2] = {
- /*
- * Clock divider value for following
- * { DIVGDL/R, DIVGPL/R }
- */
-
- /* ACLK_GDL/R L0: 200MHz */
- { 3, 1 },
-
- /* ACLK_GDL/R L1: 200MHz */
- { 3, 1 },
-
- /* ACLK_GDL/R L2: 160MHz */
- { 4, 1 },
-
- /* ACLK_GDL/R L3: 133.3MHz */
- { 5, 1 },
-};
-
-struct cpufreq_voltage_table {
- unsigned int index; /* any */
- unsigned int arm_volt; /* uV */
- unsigned int int_volt;
-};
-
-static struct cpufreq_voltage_table exynos4_volt_table[CPUFREQ_LEVEL_END] = {
- {
- .index = L0,
- .arm_volt = 1200000,
- .int_volt = 1100000,
- }, {
- .index = L1,
- .arm_volt = 1100000,
- .int_volt = 1100000,
- }, {
- .index = L2,
- .arm_volt = 1000000,
- .int_volt = 1000000,
- }, {
- .index = L3,
- .arm_volt = 900000,
- .int_volt = 1000000,
- },
-};
-
-static unsigned int exynos4_apll_pms_table[CPUFREQ_LEVEL_END] = {
- /* APLL FOUT L0: 1000MHz */
- ((250 << 16) | (6 << 8) | 1),
-
- /* APLL FOUT L1: 800MHz */
- ((200 << 16) | (6 << 8) | 1),
-
- /* APLL FOUT L2 : 400MHz */
- ((200 << 16) | (6 << 8) | 2),
-
- /* APLL FOUT L3: 100MHz */
- ((200 << 16) | (6 << 8) | 4),
-};
-
-int exynos4_verify_speed(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, exynos4_freq_table);
-}
-
-unsigned int exynos4_getspeed(unsigned int cpu)
-{
- return clk_get_rate(cpu_clk) / 1000;
-}
-
-void exynos4_set_clkdiv(unsigned int div_index)
-{
- unsigned int tmp;
-
- /* Change Divider - CPU0 */
-
- tmp = __raw_readl(S5P_CLKDIV_CPU);
-
- tmp &= ~(S5P_CLKDIV_CPU0_CORE_MASK | S5P_CLKDIV_CPU0_COREM0_MASK |
- S5P_CLKDIV_CPU0_COREM1_MASK | S5P_CLKDIV_CPU0_PERIPH_MASK |
- S5P_CLKDIV_CPU0_ATB_MASK | S5P_CLKDIV_CPU0_PCLKDBG_MASK |
- S5P_CLKDIV_CPU0_APLL_MASK);
-
- tmp |= ((clkdiv_cpu0[div_index][0] << S5P_CLKDIV_CPU0_CORE_SHIFT) |
- (clkdiv_cpu0[div_index][1] << S5P_CLKDIV_CPU0_COREM0_SHIFT) |
- (clkdiv_cpu0[div_index][2] << S5P_CLKDIV_CPU0_COREM1_SHIFT) |
- (clkdiv_cpu0[div_index][3] << S5P_CLKDIV_CPU0_PERIPH_SHIFT) |
- (clkdiv_cpu0[div_index][4] << S5P_CLKDIV_CPU0_ATB_SHIFT) |
- (clkdiv_cpu0[div_index][5] << S5P_CLKDIV_CPU0_PCLKDBG_SHIFT) |
- (clkdiv_cpu0[div_index][6] << S5P_CLKDIV_CPU0_APLL_SHIFT));
-
- __raw_writel(tmp, S5P_CLKDIV_CPU);
-
- do {
- tmp = __raw_readl(S5P_CLKDIV_STATCPU);
- } while (tmp & 0x1111111);
-
- /* Change Divider - CPU1 */
-
- tmp = __raw_readl(S5P_CLKDIV_CPU1);
-
- tmp &= ~((0x7 << 4) | 0x7);
-
- tmp |= ((clkdiv_cpu1[div_index][0] << 4) |
- (clkdiv_cpu1[div_index][1] << 0));
-
- __raw_writel(tmp, S5P_CLKDIV_CPU1);
-
- do {
- tmp = __raw_readl(S5P_CLKDIV_STATCPU1);
- } while (tmp & 0x11);
-
- /* Change Divider - DMC0 */
-
- tmp = __raw_readl(S5P_CLKDIV_DMC0);
-
- tmp &= ~(S5P_CLKDIV_DMC0_ACP_MASK | S5P_CLKDIV_DMC0_ACPPCLK_MASK |
- S5P_CLKDIV_DMC0_DPHY_MASK | S5P_CLKDIV_DMC0_DMC_MASK |
- S5P_CLKDIV_DMC0_DMCD_MASK | S5P_CLKDIV_DMC0_DMCP_MASK |
- S5P_CLKDIV_DMC0_COPY2_MASK | S5P_CLKDIV_DMC0_CORETI_MASK);
-
- tmp |= ((clkdiv_dmc0[div_index][0] << S5P_CLKDIV_DMC0_ACP_SHIFT) |
- (clkdiv_dmc0[div_index][1] << S5P_CLKDIV_DMC0_ACPPCLK_SHIFT) |
- (clkdiv_dmc0[div_index][2] << S5P_CLKDIV_DMC0_DPHY_SHIFT) |
- (clkdiv_dmc0[div_index][3] << S5P_CLKDIV_DMC0_DMC_SHIFT) |
- (clkdiv_dmc0[div_index][4] << S5P_CLKDIV_DMC0_DMCD_SHIFT) |
- (clkdiv_dmc0[div_index][5] << S5P_CLKDIV_DMC0_DMCP_SHIFT) |
- (clkdiv_dmc0[div_index][6] << S5P_CLKDIV_DMC0_COPY2_SHIFT) |
- (clkdiv_dmc0[div_index][7] << S5P_CLKDIV_DMC0_CORETI_SHIFT));
-
- __raw_writel(tmp, S5P_CLKDIV_DMC0);
-
- do {
- tmp = __raw_readl(S5P_CLKDIV_STAT_DMC0);
- } while (tmp & 0x11111111);
-
- /* Change Divider - TOP */
-
- tmp = __raw_readl(S5P_CLKDIV_TOP);
-
- tmp &= ~(S5P_CLKDIV_TOP_ACLK200_MASK | S5P_CLKDIV_TOP_ACLK100_MASK |
- S5P_CLKDIV_TOP_ACLK160_MASK | S5P_CLKDIV_TOP_ACLK133_MASK |
- S5P_CLKDIV_TOP_ONENAND_MASK);
-
- tmp |= ((clkdiv_top[div_index][0] << S5P_CLKDIV_TOP_ACLK200_SHIFT) |
- (clkdiv_top[div_index][1] << S5P_CLKDIV_TOP_ACLK100_SHIFT) |
- (clkdiv_top[div_index][2] << S5P_CLKDIV_TOP_ACLK160_SHIFT) |
- (clkdiv_top[div_index][3] << S5P_CLKDIV_TOP_ACLK133_SHIFT) |
- (clkdiv_top[div_index][4] << S5P_CLKDIV_TOP_ONENAND_SHIFT));
-
- __raw_writel(tmp, S5P_CLKDIV_TOP);
-
- do {
- tmp = __raw_readl(S5P_CLKDIV_STAT_TOP);
- } while (tmp & 0x11111);
-
- /* Change Divider - LEFTBUS */
-
- tmp = __raw_readl(S5P_CLKDIV_LEFTBUS);
-
- tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
-
- tmp |= ((clkdiv_lr_bus[div_index][0] << S5P_CLKDIV_BUS_GDLR_SHIFT) |
- (clkdiv_lr_bus[div_index][1] << S5P_CLKDIV_BUS_GPLR_SHIFT));
-
- __raw_writel(tmp, S5P_CLKDIV_LEFTBUS);
-
- do {
- tmp = __raw_readl(S5P_CLKDIV_STAT_LEFTBUS);
- } while (tmp & 0x11);
-
- /* Change Divider - RIGHTBUS */
-
- tmp = __raw_readl(S5P_CLKDIV_RIGHTBUS);
-
- tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
-
- tmp |= ((clkdiv_lr_bus[div_index][0] << S5P_CLKDIV_BUS_GDLR_SHIFT) |
- (clkdiv_lr_bus[div_index][1] << S5P_CLKDIV_BUS_GPLR_SHIFT));
-
- __raw_writel(tmp, S5P_CLKDIV_RIGHTBUS);
-
- do {
- tmp = __raw_readl(S5P_CLKDIV_STAT_RIGHTBUS);
- } while (tmp & 0x11);
-}
-
-static void exynos4_set_apll(unsigned int index)
-{
- unsigned int tmp;
-
- /* 1. MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
- clk_set_parent(moutcore, mout_mpll);
-
- do {
- tmp = (__raw_readl(S5P_CLKMUX_STATCPU)
- >> S5P_CLKSRC_CPU_MUXCORE_SHIFT);
- tmp &= 0x7;
- } while (tmp != 0x2);
-
- /* 2. Set APLL Lock time */
- __raw_writel(S5P_APLL_LOCKTIME, S5P_APLL_LOCK);
-
- /* 3. Change PLL PMS values */
- tmp = __raw_readl(S5P_APLL_CON0);
- tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0));
- tmp |= exynos4_apll_pms_table[index];
- __raw_writel(tmp, S5P_APLL_CON0);
-
- /* 4. wait_lock_time */
- do {
- tmp = __raw_readl(S5P_APLL_CON0);
- } while (!(tmp & (0x1 << S5P_APLLCON0_LOCKED_SHIFT)));
-
- /* 5. MUX_CORE_SEL = APLL */
- clk_set_parent(moutcore, mout_apll);
-
- do {
- tmp = __raw_readl(S5P_CLKMUX_STATCPU);
- tmp &= S5P_CLKMUX_STATCPU_MUXCORE_MASK;
- } while (tmp != (0x1 << S5P_CLKSRC_CPU_MUXCORE_SHIFT));
-}
-
-static void exynos4_set_frequency(unsigned int old_index, unsigned int new_index)
-{
- unsigned int tmp;
-
- if (old_index > new_index) {
- /* The frequency changing to L0 needs to change apll */
- if (freqs.new == exynos4_freq_table[L0].frequency) {
- /* 1. Change the system clock divider values */
- exynos4_set_clkdiv(new_index);
-
- /* 2. Change the apll m,p,s value */
- exynos4_set_apll(new_index);
- } else {
- /* 1. Change the system clock divider values */
- exynos4_set_clkdiv(new_index);
-
- /* 2. Change just s value in apll m,p,s value */
- tmp = __raw_readl(S5P_APLL_CON0);
- tmp &= ~(0x7 << 0);
- tmp |= (exynos4_apll_pms_table[new_index] & 0x7);
- __raw_writel(tmp, S5P_APLL_CON0);
- }
- }
-
- else if (old_index < new_index) {
- /* The frequency changing from L0 needs to change apll */
- if (freqs.old == exynos4_freq_table[L0].frequency) {
- /* 1. Change the apll m,p,s value */
- exynos4_set_apll(new_index);
-
- /* 2. Change the system clock divider values */
- exynos4_set_clkdiv(new_index);
- } else {
- /* 1. Change just s value in apll m,p,s value */
- tmp = __raw_readl(S5P_APLL_CON0);
- tmp &= ~(0x7 << 0);
- tmp |= (exynos4_apll_pms_table[new_index] & 0x7);
- __raw_writel(tmp, S5P_APLL_CON0);
-
- /* 2. Change the system clock divider values */
- exynos4_set_clkdiv(new_index);
- }
- }
-}
-
-static int exynos4_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
-{
- unsigned int index, old_index;
- unsigned int arm_volt, int_volt;
-
- freqs.old = exynos4_getspeed(policy->cpu);
-
- if (cpufreq_frequency_table_target(policy, exynos4_freq_table,
- freqs.old, relation, &old_index))
- return -EINVAL;
-
- if (cpufreq_frequency_table_target(policy, exynos4_freq_table,
- target_freq, relation, &index))
- return -EINVAL;
-
- freqs.new = exynos4_freq_table[index].frequency;
- freqs.cpu = policy->cpu;
-
- if (freqs.new == freqs.old)
- return 0;
-
- /* get the voltage value */
- arm_volt = exynos4_volt_table[index].arm_volt;
- int_volt = exynos4_volt_table[index].int_volt;
-
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
-
- /* control regulator */
- if (freqs.new > freqs.old) {
- /* Voltage up */
- regulator_set_voltage(arm_regulator, arm_volt, arm_volt);
- regulator_set_voltage(int_regulator, int_volt, int_volt);
- }
-
- /* Clock Configuration Procedure */
- exynos4_set_frequency(old_index, index);
-
- /* control regulator */
- if (freqs.new < freqs.old) {
- /* Voltage down */
- regulator_set_voltage(arm_regulator, arm_volt, arm_volt);
- regulator_set_voltage(int_regulator, int_volt, int_volt);
- }
-
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int exynos4_cpufreq_suspend(struct cpufreq_policy *policy)
-{
- return 0;
-}
-
-static int exynos4_cpufreq_resume(struct cpufreq_policy *policy)
-{
- return 0;
-}
-#endif
-
-static int exynos4_cpufreq_cpu_init(struct cpufreq_policy *policy)
-{
- policy->cur = policy->min = policy->max = exynos4_getspeed(policy->cpu);
-
- cpufreq_frequency_table_get_attr(exynos4_freq_table, policy->cpu);
-
- /* set the transition latency value */
- policy->cpuinfo.transition_latency = 100000;
-
- /*
- * EXYNOS4 multi-core processors has 2 cores
- * that the frequency cannot be set independently.
- * Each cpu is bound to the same speed.
- * So the affected cpu is all of the cpus.
- */
- cpumask_setall(policy->cpus);
-
- return cpufreq_frequency_table_cpuinfo(policy, exynos4_freq_table);
-}
-
-static struct cpufreq_driver exynos4_driver = {
- .flags = CPUFREQ_STICKY,
- .verify = exynos4_verify_speed,
- .target = exynos4_target,
- .get = exynos4_getspeed,
- .init = exynos4_cpufreq_cpu_init,
- .name = "exynos4_cpufreq",
-#ifdef CONFIG_PM
- .suspend = exynos4_cpufreq_suspend,
- .resume = exynos4_cpufreq_resume,
-#endif
-};
-
-static int __init exynos4_cpufreq_init(void)
-{
- cpu_clk = clk_get(NULL, "armclk");
- if (IS_ERR(cpu_clk))
- return PTR_ERR(cpu_clk);
-
- moutcore = clk_get(NULL, "moutcore");
- if (IS_ERR(moutcore))
- goto out;
-
- mout_mpll = clk_get(NULL, "mout_mpll");
- if (IS_ERR(mout_mpll))
- goto out;
-
- mout_apll = clk_get(NULL, "mout_apll");
- if (IS_ERR(mout_apll))
- goto out;
-
- arm_regulator = regulator_get(NULL, "vdd_arm");
- if (IS_ERR(arm_regulator)) {
- printk(KERN_ERR "failed to get resource %s\n", "vdd_arm");
- goto out;
- }
-
- int_regulator = regulator_get(NULL, "vdd_int");
- if (IS_ERR(int_regulator)) {
- printk(KERN_ERR "failed to get resource %s\n", "vdd_int");
- goto out;
- }
-
- /*
- * Check DRAM type.
- * Because DVFS level is different according to DRAM type.
- */
- memtype = __raw_readl(S5P_VA_DMC0 + S5P_DMC0_MEMCON_OFFSET);
- memtype = (memtype >> S5P_DMC0_MEMTYPE_SHIFT);
- memtype &= S5P_DMC0_MEMTYPE_MASK;
-
- if ((memtype < DDR2) && (memtype > DDR3)) {
- printk(KERN_ERR "%s: wrong memtype= 0x%x\n", __func__, memtype);
- goto out;
- } else {
- printk(KERN_DEBUG "%s: memtype= 0x%x\n", __func__, memtype);
- }
-
- return cpufreq_register_driver(&exynos4_driver);
-
-out:
- if (!IS_ERR(cpu_clk))
- clk_put(cpu_clk);
-
- if (!IS_ERR(moutcore))
- clk_put(moutcore);
-
- if (!IS_ERR(mout_mpll))
- clk_put(mout_mpll);
-
- if (!IS_ERR(mout_apll))
- clk_put(mout_apll);
-
- if (!IS_ERR(arm_regulator))
- regulator_put(arm_regulator);
-
- if (!IS_ERR(int_regulator))
- regulator_put(int_regulator);
-
- printk(KERN_ERR "%s: failed initialization\n", __func__);
-
- return -EINVAL;
-}
-late_initcall(exynos4_cpufreq_init);
diff --git a/arch/arm/mach-exynos4/include/mach/clkdev.h b/arch/arm/mach-exynos4/include/mach/clkdev.h
new file mode 100644
index 000000000000..7dffa83d23ff
--- /dev/null
+++ b/arch/arm/mach-exynos4/include/mach/clkdev.h
@@ -0,0 +1,7 @@
+#ifndef __MACH_CLKDEV_H__
+#define __MACH_CLKDEV_H__
+
+#define __clk_get(clk) ({ 1; })
+#define __clk_put(clk) do {} while (0)
+
+#endif
diff --git a/arch/arm/mach-exynos4/mach-smdkc210.c b/arch/arm/mach-exynos4/mach-smdkc210.c
index e645f7a955f0..f606ea75bf43 100644
--- a/arch/arm/mach-exynos4/mach-smdkc210.c
+++ b/arch/arm/mach-exynos4/mach-smdkc210.c
@@ -15,6 +15,7 @@
#include <linux/smsc911x.h>
#include <linux/io.h>
#include <linux/i2c.h>
+#include <linux/pwm_backlight.h>
#include <asm/mach/arch.h>
#include <asm/mach-types.h>
@@ -27,6 +28,8 @@
#include <plat/sdhci.h>
#include <plat/iic.h>
#include <plat/pd.h>
+#include <plat/gpio-cfg.h>
+#include <plat/backlight.h>
#include <mach/map.h>
@@ -191,6 +194,17 @@ static void __init smdkc210_smsc911x_init(void)
(0x1 << S5P_SROM_BCX__TACS__SHIFT), S5P_SROM_BC1);
}
+/* LCD Backlight data */
+static struct samsung_bl_gpio_info smdkc210_bl_gpio_info = {
+ .no = EXYNOS4_GPD0(1),
+ .func = S3C_GPIO_SFN(2),
+};
+
+static struct platform_pwm_backlight_data smdkc210_bl_data = {
+ .pwm_id = 1,
+ .pwm_period_ns = 1000,
+};
+
static void __init smdkc210_map_io(void)
{
s5p_init_io(NULL, 0, S5P_VA_CHIPID);
@@ -210,6 +224,8 @@ static void __init smdkc210_machine_init(void)
s3c_sdhci2_set_platdata(&smdkc210_hsmmc2_pdata);
s3c_sdhci3_set_platdata(&smdkc210_hsmmc3_pdata);
+ samsung_bl_set(&smdkc210_bl_gpio_info, &smdkc210_bl_data);
+
platform_add_devices(smdkc210_devices, ARRAY_SIZE(smdkc210_devices));
}
diff --git a/arch/arm/mach-exynos4/mach-smdkv310.c b/arch/arm/mach-exynos4/mach-smdkv310.c
index edd814110da8..df1107828abd 100644
--- a/arch/arm/mach-exynos4/mach-smdkv310.c
+++ b/arch/arm/mach-exynos4/mach-smdkv310.c
@@ -16,6 +16,7 @@
#include <linux/io.h>
#include <linux/i2c.h>
#include <linux/input.h>
+#include <linux/pwm_backlight.h>
#include <asm/mach/arch.h>
#include <asm/mach-types.h>
@@ -29,6 +30,8 @@
#include <plat/sdhci.h>
#include <plat/iic.h>
#include <plat/pd.h>
+#include <plat/gpio-cfg.h>
+#include <plat/backlight.h>
#include <mach/map.h>
@@ -209,6 +212,17 @@ static void __init smdkv310_smsc911x_init(void)
(0x1 << S5P_SROM_BCX__TACS__SHIFT), S5P_SROM_BC1);
}
+/* LCD Backlight data */
+static struct samsung_bl_gpio_info smdkv310_bl_gpio_info = {
+ .no = EXYNOS4_GPD0(1),
+ .func = S3C_GPIO_SFN(2),
+};
+
+static struct platform_pwm_backlight_data smdkv310_bl_data = {
+ .pwm_id = 1,
+ .pwm_period_ns = 1000,
+};
+
static void __init smdkv310_map_io(void)
{
s5p_init_io(NULL, 0, S5P_VA_CHIPID);
@@ -230,6 +244,8 @@ static void __init smdkv310_machine_init(void)
samsung_keypad_set_platdata(&smdkv310_keypad_data);
+ samsung_bl_set(&smdkv310_bl_gpio_info, &smdkv310_bl_data);
+
platform_add_devices(smdkv310_devices, ARRAY_SIZE(smdkv310_devices));
}
diff --git a/arch/arm/mach-exynos4/platsmp.c b/arch/arm/mach-exynos4/platsmp.c
index c5e65a02be8d..b68d5bdf04cf 100644
--- a/arch/arm/mach-exynos4/platsmp.c
+++ b/arch/arm/mach-exynos4/platsmp.c
@@ -154,14 +154,6 @@ void __init smp_init_cpus(void)
void __init platform_smp_prepare_cpus(unsigned int max_cpus)
{
- int i;
-
- /*
- * Initialise the present map, which describes the set of CPUs
- * actually populated at the present time.
- */
- for (i = 0; i < max_cpus; i++)
- set_cpu_present(i, true);
scu_enable(scu_base_addr());
diff --git a/arch/arm/mach-exynos4/pm.c b/arch/arm/mach-exynos4/pm.c
index 8755ca8dd48d..533c28f758ca 100644
--- a/arch/arm/mach-exynos4/pm.c
+++ b/arch/arm/mach-exynos4/pm.c
@@ -280,7 +280,7 @@ static struct sleep_save exynos4_l2cc_save[] = {
SAVE_ITEM(S5P_VA_L2CC + L2X0_AUX_CTRL),
};
-void exynos4_cpu_suspend(void)
+static int exynos4_cpu_suspend(unsigned long arg)
{
unsigned long tmp;
unsigned long mask = 0xFFFFFFFF;
diff --git a/arch/arm/mach-exynos4/sleep.S b/arch/arm/mach-exynos4/sleep.S
index 6b62425417a6..0984078f1eba 100644
--- a/arch/arm/mach-exynos4/sleep.S
+++ b/arch/arm/mach-exynos4/sleep.S
@@ -33,28 +33,6 @@
.text
/*
- * s3c_cpu_save
- *
- * entry:
- * r1 = v:p offset
- */
-
-ENTRY(s3c_cpu_save)
-
- stmfd sp!, { r3 - r12, lr }
- ldr r3, =resume_with_mmu
- bl cpu_suspend
-
- ldr r0, =pm_cpu_sleep
- ldr r0, [ r0 ]
- mov pc, r0
-
-resume_with_mmu:
- ldmfd sp!, { r3 - r12, pc }
-
- .ltorg
-
- /*
* sleep magic, to allow the bootloader to check for an valid
* image to resume to. Must be the first word before the
* s3c_cpu_resume entry.
diff --git a/arch/arm/mach-h720x/h7201-eval.c b/arch/arm/mach-h720x/h7201-eval.c
index 629454d71c8d..65f1bea958e5 100644
--- a/arch/arm/mach-h720x/h7201-eval.c
+++ b/arch/arm/mach-h720x/h7201-eval.c
@@ -33,4 +33,5 @@ MACHINE_START(H7201, "Hynix GMS30C7201")
.map_io = h720x_map_io,
.init_irq = h720x_init_irq,
.timer = &h7201_timer,
+ .dma_zone_size = SZ_256M,
MACHINE_END
diff --git a/arch/arm/mach-h720x/h7202-eval.c b/arch/arm/mach-h720x/h7202-eval.c
index e9f46b696354..884584a09752 100644
--- a/arch/arm/mach-h720x/h7202-eval.c
+++ b/arch/arm/mach-h720x/h7202-eval.c
@@ -76,4 +76,5 @@ MACHINE_START(H7202, "Hynix HMS30C7202")
.init_irq = h7202_init_irq,
.timer = &h7202_timer,
.init_machine = init_eval_h7202,
+ .dma_zone_size = SZ_256M,
MACHINE_END
diff --git a/arch/arm/mach-h720x/include/mach/entry-macro.S b/arch/arm/mach-h720x/include/mach/entry-macro.S
index 6d3b917c4a18..c3948e5ba4a0 100644
--- a/arch/arm/mach-h720x/include/mach/entry-macro.S
+++ b/arch/arm/mach-h720x/include/mach/entry-macro.S
@@ -57,9 +57,6 @@
tst \irqstat, #1 @ bit 0 should be set
.endm
- .macro irq_prio_table
- .endm
-
#else
#error hynix processor selection missmatch
#endif
diff --git a/arch/arm/mach-h720x/include/mach/memory.h b/arch/arm/mach-h720x/include/mach/memory.h
index b0b3baec9acf..96dcf50c51d3 100644
--- a/arch/arm/mach-h720x/include/mach/memory.h
+++ b/arch/arm/mach-h720x/include/mach/memory.h
@@ -8,11 +8,4 @@
#define __ASM_ARCH_MEMORY_H
#define PLAT_PHYS_OFFSET UL(0x40000000)
-/*
- * This is the maximum DMA address that can be DMAd to.
- * There should not be more than (0xd0000000 - 0xc0000000)
- * bytes of RAM.
- */
-#define ARM_DMA_ZONE_SIZE SZ_256M
-
#endif
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index 59c97a331136..e8dd22fa7d61 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -167,6 +167,7 @@ config MACH_EUKREA_MBIMXSD25_BASEBOARD
bool "Eukrea MBIMXSD development board"
select IMX_HAVE_PLATFORM_GPIO_KEYS
select IMX_HAVE_PLATFORM_IMX_SSI
+ select LEDS_GPIO_REGISTER
help
This adds board specific devices that can be found on Eukrea's
MBIMXSD evaluation board.
@@ -265,6 +266,7 @@ config MACH_EUKREA_MBIMX27_BASEBOARD
select IMX_HAVE_PLATFORM_IMX_UART
select IMX_HAVE_PLATFORM_MXC_MMC
select IMX_HAVE_PLATFORM_SPI_IMX
+ select LEDS_GPIO_REGISTER
help
This adds board specific devices that can be found on Eukrea's
MBIMX27 evaluation board.
@@ -403,6 +405,7 @@ config MACH_MX31LITE
select IMX_HAVE_PLATFORM_MXC_NAND
select IMX_HAVE_PLATFORM_MXC_RTC
select IMX_HAVE_PLATFORM_SPI_IMX
+ select LEDS_GPIO_REGISTER
help
Include support for MX31 LITEKIT platform. This includes specific
configurations for the board and its peripherals.
@@ -471,6 +474,7 @@ config MACH_MX31MOBOARD
select IMX_HAVE_PLATFORM_MXC_EHCI
select IMX_HAVE_PLATFORM_MXC_MMC
select IMX_HAVE_PLATFORM_SPI_IMX
+ select LEDS_GPIO_REGISTER
select MXC_ULPI if USB_ULPI
help
Include support for mx31moboard platform. This includes specific
@@ -577,6 +581,7 @@ config MACH_EUKREA_MBIMXSD35_BASEBOARD
select IMX_HAVE_PLATFORM_GPIO_KEYS
select IMX_HAVE_PLATFORM_IMX_SSI
select IMX_HAVE_PLATFORM_IPU_CORE
+ select LEDS_GPIO_REGISTER
help
This adds board specific devices that can be found on Eukrea's
MBIMXSD evaluation board.
diff --git a/arch/arm/mach-imx/dma-v1.c b/arch/arm/mach-imx/dma-v1.c
index f8aa5be0eb15..42afc29a7da8 100644
--- a/arch/arm/mach-imx/dma-v1.c
+++ b/arch/arm/mach-imx/dma-v1.c
@@ -476,7 +476,6 @@ void imx_dma_enable(int channel)
imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel)) | CCR_CEN |
CCR_ACRPT, DMA_CCR(channel));
-#ifdef CONFIG_ARCH_MX2
if ((cpu_is_mx21() || cpu_is_mx27()) &&
imxdma->sg && imx_dma_hw_chain(imxdma)) {
imxdma->sg = sg_next(imxdma->sg);
@@ -488,7 +487,6 @@ void imx_dma_enable(int channel)
DMA_CCR(channel));
}
}
-#endif
imxdma->in_use = 1;
local_irq_restore(flags);
@@ -519,7 +517,6 @@ void imx_dma_disable(int channel)
}
EXPORT_SYMBOL(imx_dma_disable);
-#ifdef CONFIG_ARCH_MX2
static void imx_dma_watchdog(unsigned long chno)
{
struct imx_dma_channel *imxdma = &imx_dma_channels[chno];
@@ -531,7 +528,6 @@ static void imx_dma_watchdog(unsigned long chno)
if (imxdma->err_handler)
imxdma->err_handler(chno, imxdma->data, IMX_DMA_ERR_TIMEOUT);
}
-#endif
static irqreturn_t dma_err_handler(int irq, void *dev_id)
{
@@ -655,10 +651,8 @@ static irqreturn_t dma_irq_handler(int irq, void *dev_id)
{
int i, disr;
-#ifdef CONFIG_ARCH_MX2
if (cpu_is_mx21() || cpu_is_mx27())
dma_err_handler(irq, dev_id);
-#endif
disr = imx_dmav1_readl(DMA_DISR);
@@ -704,7 +698,6 @@ int imx_dma_request(int channel, const char *name)
imxdma->name = name;
local_irq_restore(flags); /* request_irq() can block */
-#ifdef CONFIG_ARCH_MX2
if (cpu_is_mx21() || cpu_is_mx27()) {
ret = request_irq(MX2x_INT_DMACH0 + channel,
dma_irq_handler, 0, "DMA", NULL);
@@ -718,7 +711,6 @@ int imx_dma_request(int channel, const char *name)
imxdma->watchdog.function = &imx_dma_watchdog;
imxdma->watchdog.data = channel;
}
-#endif
return ret;
}
@@ -745,10 +737,8 @@ void imx_dma_free(int channel)
imx_dma_disable(channel);
imxdma->name = NULL;
-#ifdef CONFIG_ARCH_MX2
if (cpu_is_mx21() || cpu_is_mx27())
free_irq(MX2x_INT_DMACH0 + channel, NULL);
-#endif
local_irq_restore(flags);
}
@@ -804,21 +794,13 @@ static int __init imx_dma_init(void)
int ret = 0;
int i;
-#ifdef CONFIG_ARCH_MX1
if (cpu_is_mx1())
imx_dmav1_baseaddr = MX1_IO_ADDRESS(MX1_DMA_BASE_ADDR);
- else
-#endif
-#ifdef CONFIG_MACH_MX21
- if (cpu_is_mx21())
+ else if (cpu_is_mx21())
imx_dmav1_baseaddr = MX21_IO_ADDRESS(MX21_DMA_BASE_ADDR);
- else
-#endif
-#ifdef CONFIG_MACH_MX27
- if (cpu_is_mx27())
+ else if (cpu_is_mx27())
imx_dmav1_baseaddr = MX27_IO_ADDRESS(MX27_DMA_BASE_ADDR);
else
-#endif
return 0;
dma_clk = clk_get(NULL, "dma");
@@ -829,7 +811,6 @@ static int __init imx_dma_init(void)
/* reset DMA module */
imx_dmav1_writel(DCR_DRST, DMA_DCR);
-#ifdef CONFIG_ARCH_MX1
if (cpu_is_mx1()) {
ret = request_irq(MX1_DMA_INT, dma_irq_handler, 0, "DMA", NULL);
if (ret) {
@@ -844,7 +825,7 @@ static int __init imx_dma_init(void)
return ret;
}
}
-#endif
+
/* enable DMA module */
imx_dmav1_writel(DCR_DEN, DMA_DCR);
diff --git a/arch/arm/mach-imx/eukrea_mbimx27-baseboard.c b/arch/arm/mach-imx/eukrea_mbimx27-baseboard.c
index 5911281da5f5..5db3e1463af7 100644
--- a/arch/arm/mach-imx/eukrea_mbimx27-baseboard.c
+++ b/arch/arm/mach-imx/eukrea_mbimx27-baseboard.c
@@ -112,7 +112,7 @@ eukrea_mbimx27_keymap_data __initconst = {
.keymap_size = ARRAY_SIZE(eukrea_mbimx27_keymap),
};
-static struct gpio_led gpio_leds[] = {
+static const struct gpio_led eukrea_mbimx27_gpio_leds[] __initconst = {
{
.name = "led1",
.default_trigger = "heartbeat",
@@ -127,17 +127,10 @@ static struct gpio_led gpio_leds[] = {
},
};
-static struct gpio_led_platform_data gpio_led_info = {
- .leds = gpio_leds,
- .num_leds = ARRAY_SIZE(gpio_leds),
-};
-
-static struct platform_device leds_gpio = {
- .name = "leds-gpio",
- .id = -1,
- .dev = {
- .platform_data = &gpio_led_info,
- },
+static const struct gpio_led_platform_data
+ eukrea_mbimx27_gpio_led_info __initconst = {
+ .leds = eukrea_mbimx27_gpio_leds,
+ .num_leds = ARRAY_SIZE(eukrea_mbimx27_gpio_leds),
};
static struct imx_fb_videomode eukrea_mbimx27_modes[] = {
@@ -293,10 +286,6 @@ static struct i2c_board_info eukrea_mbimx27_i2c_devices[] = {
},
};
-static struct platform_device *platform_devices[] __initdata = {
- &leds_gpio,
-};
-
static const struct imxmmc_platform_data sdhc_pdata __initconst = {
.dat3_card_detect = 1,
};
@@ -377,5 +366,5 @@ void __init eukrea_mbimx27_baseboard_init(void)
imx27_add_imx_keypad(&eukrea_mbimx27_keymap_data);
- platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices));
+ gpio_led_register_device(-1, &eukrea_mbimx27_gpio_led_info);
}
diff --git a/arch/arm/mach-imx/eukrea_mbimxsd25-baseboard.c b/arch/arm/mach-imx/eukrea_mbimxsd25-baseboard.c
index f9ef04acdab1..01ebcb31e482 100644
--- a/arch/arm/mach-imx/eukrea_mbimxsd25-baseboard.c
+++ b/arch/arm/mach-imx/eukrea_mbimxsd25-baseboard.c
@@ -173,7 +173,7 @@ static struct platform_device eukrea_mbimxsd_lcd_powerdev = {
.dev.platform_data = &eukrea_mbimxsd_lcd_power_data,
};
-static struct gpio_led eukrea_mbimxsd_leds[] = {
+static const struct gpio_led eukrea_mbimxsd_leds[] __initconst = {
{
.name = "led1",
.default_trigger = "heartbeat",
@@ -182,19 +182,12 @@ static struct gpio_led eukrea_mbimxsd_leds[] = {
},
};
-static struct gpio_led_platform_data eukrea_mbimxsd_led_info = {
+static const struct gpio_led_platform_data
+ eukrea_mbimxsd_led_info __initconst = {
.leds = eukrea_mbimxsd_leds,
.num_leds = ARRAY_SIZE(eukrea_mbimxsd_leds),
};
-static struct platform_device eukrea_mbimxsd_leds_gpio = {
- .name = "leds-gpio",
- .id = -1,
- .dev = {
- .platform_data = &eukrea_mbimxsd_led_info,
- },
-};
-
static struct gpio_keys_button eukrea_mbimxsd_gpio_buttons[] = {
{
.gpio = GPIO_SWITCH1,
@@ -212,7 +205,6 @@ static const struct gpio_keys_platform_data
};
static struct platform_device *platform_devices[] __initdata = {
- &eukrea_mbimxsd_leds_gpio,
&eukrea_mbimxsd_lcd_powerdev,
};
@@ -287,5 +279,6 @@ void __init eukrea_mbimxsd25_baseboard_init(void)
ARRAY_SIZE(eukrea_mbimxsd_i2c_devices));
platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices));
+ gpio_led_register_device(-1, &eukrea_mbimxsd_led_info);
imx_add_gpio_keys(&eukrea_mbimxsd_button_data);
}
diff --git a/arch/arm/mach-imx/eukrea_mbimxsd35-baseboard.c b/arch/arm/mach-imx/eukrea_mbimxsd35-baseboard.c
index 4909ea05855a..558eb526ba56 100644
--- a/arch/arm/mach-imx/eukrea_mbimxsd35-baseboard.c
+++ b/arch/arm/mach-imx/eukrea_mbimxsd35-baseboard.c
@@ -193,19 +193,12 @@ static struct gpio_led eukrea_mbimxsd_leds[] = {
},
};
-static struct gpio_led_platform_data eukrea_mbimxsd_led_info = {
+static const struct gpio_led_platform_data
+ eukrea_mbimxsd_led_info __initconst = {
.leds = eukrea_mbimxsd_leds,
.num_leds = ARRAY_SIZE(eukrea_mbimxsd_leds),
};
-static struct platform_device eukrea_mbimxsd_leds_gpio = {
- .name = "leds-gpio",
- .id = -1,
- .dev = {
- .platform_data = &eukrea_mbimxsd_led_info,
- },
-};
-
static struct gpio_keys_button eukrea_mbimxsd_gpio_buttons[] = {
{
.gpio = GPIO_SWITCH1,
@@ -223,7 +216,6 @@ static const struct gpio_keys_platform_data
};
static struct platform_device *platform_devices[] __initdata = {
- &eukrea_mbimxsd_leds_gpio,
&eukrea_mbimxsd_lcd_powerdev,
};
@@ -299,5 +291,6 @@ void __init eukrea_mbimxsd35_baseboard_init(void)
ARRAY_SIZE(eukrea_mbimxsd_i2c_devices));
platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices));
+ gpio_led_register_device(-1, &eukrea_mbimxsd_led_info);
imx_add_gpio_keys(&eukrea_mbimxsd_button_data);
}
diff --git a/arch/arm/mach-imx/mach-apf9328.c b/arch/arm/mach-imx/mach-apf9328.c
index 59d2a3b137d9..a404c89485ca 100644
--- a/arch/arm/mach-imx/mach-apf9328.c
+++ b/arch/arm/mach-imx/mach-apf9328.c
@@ -99,11 +99,6 @@ static struct platform_device dm9000x_device = {
}
};
-/* --- SERIAL RESSOURCE --- */
-static const struct imxuart_platform_data uart0_pdata __initconst = {
- .flags = 0,
-};
-
static const struct imxuart_platform_data uart1_pdata __initconst = {
.flags = IMXUART_HAVE_RTSCTS,
};
@@ -121,7 +116,7 @@ static void __init apf9328_init(void)
ARRAY_SIZE(apf9328_pins),
"APF9328");
- imx1_add_imx_uart0(&uart0_pdata);
+ imx1_add_imx_uart0(NULL);
imx1_add_imx_uart1(&uart1_pdata);
platform_add_devices(devices, ARRAY_SIZE(devices));
diff --git a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
index c6269d60ddbc..6707de0ab716 100644
--- a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
+++ b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
@@ -34,7 +34,7 @@
#include <asm/mach/arch.h>
#include <asm/mach/time.h>
#include <mach/common.h>
-#include <mach/iomux.h>
+#include <mach/iomux-mx27.h>
#include "devices-imx27.h"
diff --git a/arch/arm/mach-imx/mach-mx27_3ds.c b/arch/arm/mach-imx/mach-mx27_3ds.c
index 117ce0a50f4e..b31d4129e10e 100644
--- a/arch/arm/mach-imx/mach-mx27_3ds.c
+++ b/arch/arm/mach-imx/mach-mx27_3ds.c
@@ -42,10 +42,12 @@
#include "devices-imx27.h"
-#define SD1_EN_GPIO (GPIO_PORTB + 25)
-#define OTG_PHY_RESET_GPIO (GPIO_PORTB + 23)
-#define SPI2_SS0 (GPIO_PORTD + 21)
-#define EXPIO_PARENT_INT (MXC_INTERNAL_IRQS + GPIO_PORTC + 28)
+#define SD1_EN_GPIO IMX_GPIO_NR(2, 25)
+#define OTG_PHY_RESET_GPIO IMX_GPIO_NR(2, 23)
+#define SPI2_SS0 IMX_GPIO_NR(4, 21)
+#define EXPIO_PARENT_INT gpio_to_irq(IMX_GPIO_NR(3, 28))
+#define PMIC_INT IMX_GPIO_NR(3, 14)
+#define SD1_CD IMX_GPIO_NR(2, 26)
static const int mx27pdk_pins[] __initconst = {
/* UART1 */
@@ -98,9 +100,12 @@ static const int mx27pdk_pins[] __initconst = {
PD22_PF_CSPI2_SCLK,
PD23_PF_CSPI2_MISO,
PD24_PF_CSPI2_MOSI,
+ SPI2_SS0 | GPIO_GPIO | GPIO_OUT,
/* I2C1 */
PD17_PF_I2C_DATA,
PD18_PF_I2C_CLK,
+ /* PMIC INT */
+ PMIC_INT | GPIO_GPIO | GPIO_IN,
};
static const struct imxuart_platform_data uart_pdata __initconst = {
@@ -131,13 +136,13 @@ static const struct matrix_keymap_data mx27_3ds_keymap_data __initconst = {
static int mx27_3ds_sdhc1_init(struct device *dev, irq_handler_t detect_irq,
void *data)
{
- return request_irq(IRQ_GPIOB(26), detect_irq, IRQF_TRIGGER_FALLING |
- IRQF_TRIGGER_RISING, "sdhc1-card-detect", data);
+ return request_irq(gpio_to_irq(SD1_CD), detect_irq,
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, "sdhc1-card-detect", data);
}
static void mx27_3ds_sdhc1_exit(struct device *dev, void *data)
{
- free_irq(IRQ_GPIOB(26), data);
+ free_irq(gpio_to_irq(SD1_CD), data);
}
static const struct imxmmc_platform_data sdhc1_pdata __initconst = {
@@ -193,6 +198,13 @@ static int __init mx27_3ds_otg_mode(char *options)
__setup("otg_mode=", mx27_3ds_otg_mode);
/* Regulators */
+static struct regulator_init_data gpo_init = {
+ .constraints = {
+ .boot_on = 1,
+ .always_on = 1,
+ }
+};
+
static struct regulator_consumer_supply vmmc1_consumers[] = {
REGULATOR_SUPPLY("lcd_2v8", NULL),
};
@@ -201,7 +213,9 @@ static struct regulator_init_data vmmc1_init = {
.constraints = {
.min_uV = 2800000,
.max_uV = 2800000,
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE,
+ .apply_uV = 1,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS,
},
.num_consumer_supplies = ARRAY_SIZE(vmmc1_consumers),
.consumer_supplies = vmmc1_consumers,
@@ -228,6 +242,12 @@ static struct mc13xxx_regulator_init_data mx27_3ds_regulators[] = {
}, {
.id = MC13783_REG_VGEN,
.init_data = &vgen_init,
+ }, {
+ .id = MC13783_REG_GPO1, /* Turn on 1.8V */
+ .init_data = &gpo_init,
+ }, {
+ .id = MC13783_REG_GPO3, /* Turn on 3.3V */
+ .init_data = &gpo_init,
},
};
@@ -242,11 +262,11 @@ static struct mc13xxx_platform_data mc13783_pdata = {
};
/* SPI */
-static int spi2_internal_chipselect[] = {SPI2_SS0};
+static int spi2_chipselect[] = {SPI2_SS0};
static const struct spi_imx_master spi2_pdata __initconst = {
- .chipselect = spi2_internal_chipselect,
- .num_chipselect = ARRAY_SIZE(spi2_internal_chipselect),
+ .chipselect = spi2_chipselect,
+ .num_chipselect = ARRAY_SIZE(spi2_chipselect),
};
static struct spi_board_info mx27_3ds_spi_devs[] __initdata = {
@@ -256,7 +276,7 @@ static struct spi_board_info mx27_3ds_spi_devs[] __initdata = {
.bus_num = 1,
.chip_select = 0, /* SS0 */
.platform_data = &mc13783_pdata,
- .irq = IRQ_GPIOC(14),
+ .irq = gpio_to_irq(PMIC_INT),
.mode = SPI_CS_HIGH,
},
};
diff --git a/arch/arm/mach-imx/mach-mx31_3ds.c b/arch/arm/mach-imx/mach-mx31_3ds.c
index 441fbb83f39c..c20be7530927 100644
--- a/arch/arm/mach-imx/mach-mx31_3ds.c
+++ b/arch/arm/mach-imx/mach-mx31_3ds.c
@@ -54,11 +54,8 @@ static int mx31_3ds_pins[] = {
MX31_PIN_RXD1__RXD1,
IOMUX_MODE(MX31_PIN_GPIO1_1, IOMUX_CONFIG_GPIO),
/*SPI0*/
- MX31_PIN_CSPI1_SCLK__SCLK,
- MX31_PIN_CSPI1_MOSI__MOSI,
- MX31_PIN_CSPI1_MISO__MISO,
- MX31_PIN_CSPI1_SPI_RDY__SPI_RDY,
- MX31_PIN_CSPI1_SS2__SS2, /* CS for LCD */
+ IOMUX_MODE(MX31_PIN_DSR_DCE1, IOMUX_CONFIG_ALT1),
+ IOMUX_MODE(MX31_PIN_RI_DCE1, IOMUX_CONFIG_ALT1),
/* SPI 1 */
MX31_PIN_CSPI2_SCLK__SCLK,
MX31_PIN_CSPI2_MOSI__MOSI,
@@ -692,6 +689,9 @@ static void __init mx31_3ds_init(void)
imx31_soc_init();
+ /* Configure SPI1 IOMUX */
+ mxc_iomux_set_gpr(MUX_PGP_CSPI_BB, true);
+
mxc_iomux_setup_multiple_pins(mx31_3ds_pins, ARRAY_SIZE(mx31_3ds_pins),
"mx31_3ds");
diff --git a/arch/arm/mach-imx/mach-mx31moboard.c b/arch/arm/mach-imx/mach-mx31moboard.c
index a52fd36e2b52..b358383120e7 100644
--- a/arch/arm/mach-imx/mach-mx31moboard.c
+++ b/arch/arm/mach-imx/mach-mx31moboard.c
@@ -425,7 +425,7 @@ static int __init moboard_usbh2_init(void)
return 0;
}
-static struct gpio_led mx31moboard_leds[] = {
+static const struct gpio_led mx31moboard_leds[] __initconst = {
{
.name = "coreboard-led-0:red:running",
.default_trigger = "heartbeat",
@@ -442,26 +442,17 @@ static struct gpio_led mx31moboard_leds[] = {
},
};
-static struct gpio_led_platform_data mx31moboard_led_pdata = {
+static const struct gpio_led_platform_data mx31moboard_led_pdata __initconst = {
.num_leds = ARRAY_SIZE(mx31moboard_leds),
.leds = mx31moboard_leds,
};
-static struct platform_device mx31moboard_leds_device = {
- .name = "leds-gpio",
- .id = -1,
- .dev = {
- .platform_data = &mx31moboard_led_pdata,
- },
-};
-
static const struct ipu_platform_data mx3_ipu_data __initconst = {
.irq_base = MXC_IPU_IRQ_START,
};
static struct platform_device *devices[] __initdata = {
&mx31moboard_flash,
- &mx31moboard_leds_device,
};
static struct mx3_camera_pdata camera_pdata __initdata = {
@@ -513,6 +504,7 @@ static void __init mx31moboard_init(void)
"moboard");
platform_add_devices(devices, ARRAY_SIZE(devices));
+ gpio_led_register_device(-1, &mx31moboard_led_pdata);
imx31_add_imx_uart0(&uart0_pdata);
imx31_add_imx_uart4(&uart4_pdata);
diff --git a/arch/arm/mach-imx/mach-mx35_3ds.c b/arch/arm/mach-imx/mach-mx35_3ds.c
index 48b3c6fd5cf0..b3b9bd8ac2a3 100644
--- a/arch/arm/mach-imx/mach-mx35_3ds.c
+++ b/arch/arm/mach-imx/mach-mx35_3ds.c
@@ -43,7 +43,7 @@
#include "devices-imx35.h"
-#define EXPIO_PARENT_INT (MXC_INTERNAL_IRQS + GPIO_PORTA + 1)
+#define EXPIO_PARENT_INT gpio_to_irq(IMX_GPIO_NR(1, 1))
static const struct imxuart_platform_data uart_pdata __initconst = {
.flags = IMXUART_HAVE_RTSCTS,
diff --git a/arch/arm/mach-imx/mach-scb9328.c b/arch/arm/mach-imx/mach-scb9328.c
index 82805260e19c..db2d60470e15 100644
--- a/arch/arm/mach-imx/mach-scb9328.c
+++ b/arch/arm/mach-imx/mach-scb9328.c
@@ -101,21 +101,7 @@ static const int mxc_uart1_pins[] = {
PC12_PF_UART1_RXD,
};
-static int uart1_mxc_init(struct platform_device *pdev)
-{
- return mxc_gpio_setup_multiple_pins(mxc_uart1_pins,
- ARRAY_SIZE(mxc_uart1_pins), "UART1");
-}
-
-static void uart1_mxc_exit(struct platform_device *pdev)
-{
- mxc_gpio_release_multiple_pins(mxc_uart1_pins,
- ARRAY_SIZE(mxc_uart1_pins));
-}
-
static const struct imxuart_platform_data uart_pdata __initconst = {
- .init = uart1_mxc_init,
- .exit = uart1_mxc_exit,
.flags = IMXUART_HAVE_RTSCTS,
};
@@ -131,6 +117,9 @@ static void __init scb9328_init(void)
{
imx1_soc_init();
+ mxc_gpio_setup_multiple_pins(mxc_uart1_pins,
+ ARRAY_SIZE(mxc_uart1_pins), "UART1");
+
imx1_add_imx_uart0(&uart_pdata);
printk(KERN_INFO"Scb9328: Adding devices\n");
diff --git a/arch/arm/mach-imx/mx31lite-db.c b/arch/arm/mach-imx/mx31lite-db.c
index 5aa053edc17c..bf0fb87946ba 100644
--- a/arch/arm/mach-imx/mx31lite-db.c
+++ b/arch/arm/mach-imx/mx31lite-db.c
@@ -161,7 +161,7 @@ static const struct spi_imx_master spi0_pdata __initconst = {
/* GPIO LEDs */
-static struct gpio_led litekit_leds[] = {
+static const struct gpio_led litekit_leds[] __initconst = {
{
.name = "GPIO0",
.gpio = IOMUX_TO_GPIO(MX31_PIN_COMPARE),
@@ -176,19 +176,12 @@ static struct gpio_led litekit_leds[] = {
}
};
-static struct gpio_led_platform_data litekit_led_platform_data = {
+static const struct gpio_led_platform_data
+ litekit_led_platform_data __initconst = {
.leds = litekit_leds,
.num_leds = ARRAY_SIZE(litekit_leds),
};
-static struct platform_device litekit_led_device = {
- .name = "leds-gpio",
- .id = -1,
- .dev = {
- .platform_data = &litekit_led_platform_data,
- },
-};
-
void __init mx31lite_db_init(void)
{
mxc_iomux_setup_multiple_pins(litekit_db_board_pins,
@@ -197,7 +190,7 @@ void __init mx31lite_db_init(void)
imx31_add_imx_uart0(&uart_pdata);
imx31_add_mxc_mmc(0, &mmc_pdata);
imx31_add_spi_imx0(&spi0_pdata);
- platform_device_register(&litekit_led_device);
+ gpio_led_register_device(-1, &litekit_led_platform_data);
imx31_add_imx2_wdt(NULL);
imx31_add_mxc_rtc(NULL);
}
diff --git a/arch/arm/mach-integrator/include/mach/bits.h b/arch/arm/mach-integrator/include/mach/bits.h
deleted file mode 100644
index 09b024e0496a..000000000000
--- a/arch/arm/mach-integrator/include/mach/bits.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-/* DO NOT EDIT!! - this file automatically generated
- * from .s file by awk -f s2h.awk
- */
-/* Bit field definitions
- * Copyright (C) ARM Limited 1998. All rights reserved.
- */
-
-#ifndef __bits_h
-#define __bits_h 1
-
-#define BIT0 0x00000001
-#define BIT1 0x00000002
-#define BIT2 0x00000004
-#define BIT3 0x00000008
-#define BIT4 0x00000010
-#define BIT5 0x00000020
-#define BIT6 0x00000040
-#define BIT7 0x00000080
-#define BIT8 0x00000100
-#define BIT9 0x00000200
-#define BIT10 0x00000400
-#define BIT11 0x00000800
-#define BIT12 0x00001000
-#define BIT13 0x00002000
-#define BIT14 0x00004000
-#define BIT15 0x00008000
-#define BIT16 0x00010000
-#define BIT17 0x00020000
-#define BIT18 0x00040000
-#define BIT19 0x00080000
-#define BIT20 0x00100000
-#define BIT21 0x00200000
-#define BIT22 0x00400000
-#define BIT23 0x00800000
-#define BIT24 0x01000000
-#define BIT25 0x02000000
-#define BIT26 0x04000000
-#define BIT27 0x08000000
-#define BIT28 0x10000000
-#define BIT29 0x20000000
-#define BIT30 0x40000000
-#define BIT31 0x80000000
-
-#endif
-
-/* END */
diff --git a/arch/arm/mach-ixp4xx/avila-setup.c b/arch/arm/mach-ixp4xx/avila-setup.c
index 73745ff102d5..ee19c1d383aa 100644
--- a/arch/arm/mach-ixp4xx/avila-setup.c
+++ b/arch/arm/mach-ixp4xx/avila-setup.c
@@ -169,6 +169,9 @@ MACHINE_START(AVILA, "Gateworks Avila Network Platform")
.timer = &ixp4xx_timer,
.boot_params = 0x0100,
.init_machine = avila_init,
+#if defined(CONFIG_PCI)
+ .dma_zone_size = SZ_64M,
+#endif
MACHINE_END
/*
@@ -184,6 +187,9 @@ MACHINE_START(LOFT, "Giant Shoulder Inc Loft board")
.timer = &ixp4xx_timer,
.boot_params = 0x0100,
.init_machine = avila_init,
+#if defined(CONFIG_PCI)
+ .dma_zone_size = SZ_64M,
+#endif
MACHINE_END
#endif
diff --git a/arch/arm/mach-ixp4xx/common-pci.c b/arch/arm/mach-ixp4xx/common-pci.c
index e9a589395723..e2e98bbb6413 100644
--- a/arch/arm/mach-ixp4xx/common-pci.c
+++ b/arch/arm/mach-ixp4xx/common-pci.c
@@ -316,6 +316,11 @@ static int abort_handler(unsigned long addr, unsigned int fsr, struct pt_regs *r
}
+static int ixp4xx_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
+{
+ return (dma_addr + size) >= SZ_64M;
+}
+
/*
* Setup DMA mask to 64MB on PCI devices. Ignore all other devices.
*/
@@ -324,7 +329,7 @@ static int ixp4xx_pci_platform_notify(struct device *dev)
if(dev->bus == &pci_bus_type) {
*dev->dma_mask = SZ_64M - 1;
dev->coherent_dma_mask = SZ_64M - 1;
- dmabounce_register_dev(dev, 2048, 4096);
+ dmabounce_register_dev(dev, 2048, 4096, ixp4xx_needs_bounce);
}
return 0;
}
@@ -337,11 +342,6 @@ static int ixp4xx_pci_platform_notify_remove(struct device *dev)
return 0;
}
-int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
-{
- return (dev->bus == &pci_bus_type ) && ((dma_addr + size) >= SZ_64M);
-}
-
void __init ixp4xx_pci_preinit(void)
{
unsigned long cpuid = read_cpuid_id();
diff --git a/arch/arm/mach-ixp4xx/coyote-setup.c b/arch/arm/mach-ixp4xx/coyote-setup.c
index 355e3de38733..e24564b5d935 100644
--- a/arch/arm/mach-ixp4xx/coyote-setup.c
+++ b/arch/arm/mach-ixp4xx/coyote-setup.c
@@ -114,6 +114,9 @@ MACHINE_START(ADI_COYOTE, "ADI Engineering Coyote")
.timer = &ixp4xx_timer,
.boot_params = 0x0100,
.init_machine = coyote_init,
+#if defined(CONFIG_PCI)
+ .dma_zone_size = SZ_64M,
+#endif
MACHINE_END
#endif
diff --git a/arch/arm/mach-ixp4xx/dsmg600-setup.c b/arch/arm/mach-ixp4xx/dsmg600-setup.c
index d398229cfaa5..03e54515e8b3 100644
--- a/arch/arm/mach-ixp4xx/dsmg600-setup.c
+++ b/arch/arm/mach-ixp4xx/dsmg600-setup.c
@@ -284,4 +284,7 @@ MACHINE_START(DSMG600, "D-Link DSM-G600 RevA")
.init_irq = ixp4xx_init_irq,
.timer = &dsmg600_timer,
.init_machine = dsmg600_init,
+#if defined(CONFIG_PCI)
+ .dma_zone_size = SZ_64M,
+#endif
MACHINE_END
diff --git a/arch/arm/mach-ixp4xx/fsg-setup.c b/arch/arm/mach-ixp4xx/fsg-setup.c
index 727ee39ce11c..23a8b3614568 100644
--- a/arch/arm/mach-ixp4xx/fsg-setup.c
+++ b/arch/arm/mach-ixp4xx/fsg-setup.c
@@ -275,5 +275,8 @@ MACHINE_START(FSG, "Freecom FSG-3")
.timer = &ixp4xx_timer,
.boot_params = 0x0100,
.init_machine = fsg_init,
+#if defined(CONFIG_PCI)
+ .dma_zone_size = SZ_64M,
+#endif
MACHINE_END
diff --git a/arch/arm/mach-ixp4xx/gateway7001-setup.c b/arch/arm/mach-ixp4xx/gateway7001-setup.c
index 9dc0b4eaa65a..d4f851bdd9a4 100644
--- a/arch/arm/mach-ixp4xx/gateway7001-setup.c
+++ b/arch/arm/mach-ixp4xx/gateway7001-setup.c
@@ -101,5 +101,8 @@ MACHINE_START(GATEWAY7001, "Gateway 7001 AP")
.timer = &ixp4xx_timer,
.boot_params = 0x0100,
.init_machine = gateway7001_init,
+#if defined(CONFIG_PCI)
+ .dma_zone_size = SZ_64M,
+#endif
MACHINE_END
#endif
diff --git a/arch/arm/mach-ixp4xx/goramo_mlr.c b/arch/arm/mach-ixp4xx/goramo_mlr.c
index 3e8c0e33b59c..5f00ad224fe0 100644
--- a/arch/arm/mach-ixp4xx/goramo_mlr.c
+++ b/arch/arm/mach-ixp4xx/goramo_mlr.c
@@ -501,4 +501,7 @@ MACHINE_START(GORAMO_MLR, "MultiLink")
.timer = &ixp4xx_timer,
.boot_params = 0x0100,
.init_machine = gmlr_init,
+#if defined(CONFIG_PCI)
+ .dma_zone_size = SZ_64M,
+#endif
MACHINE_END
diff --git a/arch/arm/mach-ixp4xx/gtwx5715-setup.c b/arch/arm/mach-ixp4xx/gtwx5715-setup.c
index 77abead36227..3790dffd3c30 100644
--- a/arch/arm/mach-ixp4xx/gtwx5715-setup.c
+++ b/arch/arm/mach-ixp4xx/gtwx5715-setup.c
@@ -169,6 +169,9 @@ MACHINE_START(GTWX5715, "Gemtek GTWX5715 (Linksys WRV54G)")
.timer = &ixp4xx_timer,
.boot_params = 0x0100,
.init_machine = gtwx5715_init,
+#if defined(CONFIG_PCI)
+ .dma_zone_size = SZ_64M,
+#endif
MACHINE_END
diff --git a/arch/arm/mach-ixp4xx/include/mach/memory.h b/arch/arm/mach-ixp4xx/include/mach/memory.h
index 34e79404671a..4caf1761f1e2 100644
--- a/arch/arm/mach-ixp4xx/include/mach/memory.h
+++ b/arch/arm/mach-ixp4xx/include/mach/memory.h
@@ -14,8 +14,4 @@
*/
#define PLAT_PHYS_OFFSET UL(0x00000000)
-#ifdef CONFIG_PCI
-#define ARM_DMA_ZONE_SIZE SZ_64M
-#endif
-
#endif
diff --git a/arch/arm/mach-ixp4xx/ixdp425-setup.c b/arch/arm/mach-ixp4xx/ixdp425-setup.c
index dca4f7f9f4f7..6a2927956bf6 100644
--- a/arch/arm/mach-ixp4xx/ixdp425-setup.c
+++ b/arch/arm/mach-ixp4xx/ixdp425-setup.c
@@ -258,6 +258,9 @@ MACHINE_START(IXDP425, "Intel IXDP425 Development Platform")
.timer = &ixp4xx_timer,
.boot_params = 0x0100,
.init_machine = ixdp425_init,
+#if defined(CONFIG_PCI)
+ .dma_zone_size = SZ_64M,
+#endif
MACHINE_END
#endif
@@ -269,6 +272,9 @@ MACHINE_START(IXDP465, "Intel IXDP465 Development Platform")
.timer = &ixp4xx_timer,
.boot_params = 0x0100,
.init_machine = ixdp425_init,
+#if defined(CONFIG_PCI)
+ .dma_zone_size = SZ_64M,
+#endif
MACHINE_END
#endif
@@ -280,6 +286,9 @@ MACHINE_START(IXCDP1100, "Intel IXCDP1100 Development Platform")
.timer = &ixp4xx_timer,
.boot_params = 0x0100,
.init_machine = ixdp425_init,
+#if defined(CONFIG_PCI)
+ .dma_zone_size = SZ_64M,
+#endif
MACHINE_END
#endif
@@ -291,5 +300,8 @@ MACHINE_START(KIXRP435, "Intel KIXRP435 Reference Platform")
.timer = &ixp4xx_timer,
.boot_params = 0x0100,
.init_machine = ixdp425_init,
+#if defined(CONFIG_PCI)
+ .dma_zone_size = SZ_64M,
+#endif
MACHINE_END
#endif
diff --git a/arch/arm/mach-ixp4xx/nas100d-setup.c b/arch/arm/mach-ixp4xx/nas100d-setup.c
index f18fee748878..afb51879d9a4 100644
--- a/arch/arm/mach-ixp4xx/nas100d-setup.c
+++ b/arch/arm/mach-ixp4xx/nas100d-setup.c
@@ -319,4 +319,7 @@ MACHINE_START(NAS100D, "Iomega NAS 100d")
.init_irq = ixp4xx_init_irq,
.timer = &ixp4xx_timer,
.init_machine = nas100d_init,
+#if defined(CONFIG_PCI)
+ .dma_zone_size = SZ_64M,
+#endif
MACHINE_END
diff --git a/arch/arm/mach-ixp4xx/nslu2-setup.c b/arch/arm/mach-ixp4xx/nslu2-setup.c
index f79b62eb7614..69e40f2cf092 100644
--- a/arch/arm/mach-ixp4xx/nslu2-setup.c
+++ b/arch/arm/mach-ixp4xx/nslu2-setup.c
@@ -305,4 +305,7 @@ MACHINE_START(NSLU2, "Linksys NSLU2")
.init_irq = ixp4xx_init_irq,
.timer = &nslu2_timer,
.init_machine = nslu2_init,
+#if defined(CONFIG_PCI)
+ .dma_zone_size = SZ_64M,
+#endif
MACHINE_END
diff --git a/arch/arm/mach-ixp4xx/vulcan-setup.c b/arch/arm/mach-ixp4xx/vulcan-setup.c
index 4e72cfdd3c46..045336c833af 100644
--- a/arch/arm/mach-ixp4xx/vulcan-setup.c
+++ b/arch/arm/mach-ixp4xx/vulcan-setup.c
@@ -241,4 +241,7 @@ MACHINE_START(ARCOM_VULCAN, "Arcom/Eurotech Vulcan")
.timer = &ixp4xx_timer,
.boot_params = 0x0100,
.init_machine = vulcan_init,
+#if defined(CONFIG_PCI)
+ .dma_zone_size = SZ_64M,
+#endif
MACHINE_END
diff --git a/arch/arm/mach-ixp4xx/wg302v2-setup.c b/arch/arm/mach-ixp4xx/wg302v2-setup.c
index 5d148c7bc4fb..40b9fad800b8 100644
--- a/arch/arm/mach-ixp4xx/wg302v2-setup.c
+++ b/arch/arm/mach-ixp4xx/wg302v2-setup.c
@@ -102,5 +102,8 @@ MACHINE_START(WG302V2, "Netgear WG302 v2 / WAG302 v2")
.timer = &ixp4xx_timer,
.boot_params = 0x0100,
.init_machine = wg302v2_init,
+#if defined(CONFIG_PCI)
+ .dma_zone_size = SZ_64M,
+#endif
MACHINE_END
#endif
diff --git a/arch/arm/mach-loki/Kconfig b/arch/arm/mach-loki/Kconfig
deleted file mode 100644
index 0045bdd761ca..000000000000
--- a/arch/arm/mach-loki/Kconfig
+++ /dev/null
@@ -1,13 +0,0 @@
-if ARCH_LOKI
-
-menu "Marvell Loki (88RC8480) Implementations"
-
-config MACH_LB88RC8480
- bool "Marvell LB88RC8480 Development Board"
- help
- Say 'Y' here if you want your kernel to support the
- Marvell LB88RC8480 Development Board.
-
-endmenu
-
-endif
diff --git a/arch/arm/mach-loki/Makefile b/arch/arm/mach-loki/Makefile
deleted file mode 100644
index d43233ee590f..000000000000
--- a/arch/arm/mach-loki/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-obj-y += common.o addr-map.o irq.o
-
-obj-$(CONFIG_MACH_LB88RC8480) += lb88rc8480-setup.o
diff --git a/arch/arm/mach-loki/Makefile.boot b/arch/arm/mach-loki/Makefile.boot
deleted file mode 100644
index 67039c3e0c48..000000000000
--- a/arch/arm/mach-loki/Makefile.boot
+++ /dev/null
@@ -1,3 +0,0 @@
- zreladdr-y := 0x00008000
-params_phys-y := 0x00000100
-initrd_phys-y := 0x00800000
diff --git a/arch/arm/mach-loki/addr-map.c b/arch/arm/mach-loki/addr-map.c
deleted file mode 100644
index b9537c97beba..000000000000
--- a/arch/arm/mach-loki/addr-map.c
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * arch/arm/mach-loki/addr-map.c
- *
- * Address map functions for Marvell Loki (88RC8480) SoCs
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/mbus.h>
-#include <linux/io.h>
-#include <mach/hardware.h>
-#include "common.h"
-
-/*
- * Generic Address Decode Windows bit settings
- */
-#define TARGET_DDR 0
-#define TARGET_DEV_BUS 1
-#define TARGET_PCIE0 3
-#define TARGET_PCIE1 4
-#define ATTR_DEV_BOOT 0x0f
-#define ATTR_DEV_CS2 0x1b
-#define ATTR_DEV_CS1 0x1d
-#define ATTR_DEV_CS0 0x1e
-#define ATTR_PCIE_IO 0x51
-#define ATTR_PCIE_MEM 0x59
-
-/*
- * Helpers to get DDR bank info
- */
-#define DDR_SIZE_CS(n) DDR_REG(0x1500 + ((n) << 3))
-#define DDR_BASE_CS(n) DDR_REG(0x1504 + ((n) << 3))
-
-/*
- * CPU Address Decode Windows registers
- */
-#define BRIDGE_REG(x) (BRIDGE_VIRT_BASE | (x))
-#define CPU_WIN_CTRL(n) BRIDGE_REG(0x000 | ((n) << 4))
-#define CPU_WIN_BASE(n) BRIDGE_REG(0x004 | ((n) << 4))
-#define CPU_WIN_REMAP_LO(n) BRIDGE_REG(0x008 | ((n) << 4))
-#define CPU_WIN_REMAP_HI(n) BRIDGE_REG(0x00c | ((n) << 4))
-
-
-struct mbus_dram_target_info loki_mbus_dram_info;
-
-static void __init setup_cpu_win(int win, u32 base, u32 size,
- u8 target, u8 attr, int remap)
-{
- u32 ctrl;
-
- base &= 0xffff0000;
- ctrl = ((size - 1) & 0xffff0000) | (attr << 8) | (1 << 5) | target;
-
- writel(base, CPU_WIN_BASE(win));
- writel(ctrl, CPU_WIN_CTRL(win));
- if (win < 2) {
- if (remap < 0)
- remap = base;
-
- writel(remap & 0xffff0000, CPU_WIN_REMAP_LO(win));
- writel(0, CPU_WIN_REMAP_HI(win));
- }
-}
-
-void __init loki_setup_cpu_mbus(void)
-{
- int i;
- int cs;
-
- /*
- * First, disable and clear windows.
- */
- for (i = 0; i < 8; i++) {
- writel(0, CPU_WIN_BASE(i));
- writel(0, CPU_WIN_CTRL(i));
- if (i < 2) {
- writel(0, CPU_WIN_REMAP_LO(i));
- writel(0, CPU_WIN_REMAP_HI(i));
- }
- }
-
- /*
- * Setup windows for PCIe IO+MEM space.
- */
- setup_cpu_win(2, LOKI_PCIE0_MEM_PHYS_BASE, LOKI_PCIE0_MEM_SIZE,
- TARGET_PCIE0, ATTR_PCIE_MEM, -1);
- setup_cpu_win(3, LOKI_PCIE1_MEM_PHYS_BASE, LOKI_PCIE1_MEM_SIZE,
- TARGET_PCIE1, ATTR_PCIE_MEM, -1);
-
- /*
- * Setup MBUS dram target info.
- */
- loki_mbus_dram_info.mbus_dram_target_id = TARGET_DDR;
-
- for (i = 0, cs = 0; i < 4; i++) {
- u32 base = readl(DDR_BASE_CS(i));
- u32 size = readl(DDR_SIZE_CS(i));
-
- /*
- * Chip select enabled?
- */
- if (size & 1) {
- struct mbus_dram_window *w;
-
- w = &loki_mbus_dram_info.cs[cs++];
- w->cs_index = i;
- w->mbus_attr = 0xf & ~(1 << i);
- w->base = base & 0xffff0000;
- w->size = (size | 0x0000ffff) + 1;
- }
- }
- loki_mbus_dram_info.num_cs = cs;
-}
-
-void __init loki_setup_dev_boot_win(u32 base, u32 size)
-{
- setup_cpu_win(4, base, size, TARGET_DEV_BUS, ATTR_DEV_BOOT, -1);
-}
diff --git a/arch/arm/mach-loki/common.c b/arch/arm/mach-loki/common.c
deleted file mode 100644
index 5f02664db812..000000000000
--- a/arch/arm/mach-loki/common.c
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * arch/arm/mach-loki/common.c
- *
- * Core functions for Marvell Loki (88RC8480) SoCs
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/serial_8250.h>
-#include <linux/mbus.h>
-#include <linux/dma-mapping.h>
-#include <asm/page.h>
-#include <asm/timex.h>
-#include <asm/mach/map.h>
-#include <asm/mach/time.h>
-#include <mach/bridge-regs.h>
-#include <mach/loki.h>
-#include <plat/orion_nand.h>
-#include <plat/time.h>
-#include <plat/common.h>
-#include "common.h"
-
-/*****************************************************************************
- * I/O Address Mapping
- ****************************************************************************/
-static struct map_desc loki_io_desc[] __initdata = {
- {
- .virtual = LOKI_REGS_VIRT_BASE,
- .pfn = __phys_to_pfn(LOKI_REGS_PHYS_BASE),
- .length = LOKI_REGS_SIZE,
- .type = MT_DEVICE,
- },
-};
-
-void __init loki_map_io(void)
-{
- iotable_init(loki_io_desc, ARRAY_SIZE(loki_io_desc));
-}
-
-
-/*****************************************************************************
- * GE00
- ****************************************************************************/
-void __init loki_ge0_init(struct mv643xx_eth_platform_data *eth_data)
-{
- writel(0x00079220, GE0_VIRT_BASE + 0x20b0);
-
- orion_ge00_init(eth_data, &loki_mbus_dram_info,
- GE0_PHYS_BASE, IRQ_LOKI_GBE_A_INT,
- 0, LOKI_TCLK);
-}
-
-
-/*****************************************************************************
- * GE01
- ****************************************************************************/
-void __init loki_ge1_init(struct mv643xx_eth_platform_data *eth_data)
-{
- writel(0x00079220, GE1_VIRT_BASE + 0x20b0);
-
- orion_ge01_init(eth_data, &loki_mbus_dram_info,
- GE1_PHYS_BASE, IRQ_LOKI_GBE_B_INT,
- 0, LOKI_TCLK);
-}
-
-
-/*****************************************************************************
- * SAS/SATA
- ****************************************************************************/
-static struct resource loki_sas_resources[] = {
- {
- .name = "mvsas0 mem",
- .start = SAS0_PHYS_BASE,
- .end = SAS0_PHYS_BASE + 0x01ff,
- .flags = IORESOURCE_MEM,
- }, {
- .name = "mvsas0 irq",
- .start = IRQ_LOKI_SAS_A,
- .end = IRQ_LOKI_SAS_A,
- .flags = IORESOURCE_IRQ,
- }, {
- .name = "mvsas1 mem",
- .start = SAS1_PHYS_BASE,
- .end = SAS1_PHYS_BASE + 0x01ff,
- .flags = IORESOURCE_MEM,
- }, {
- .name = "mvsas1 irq",
- .start = IRQ_LOKI_SAS_B,
- .end = IRQ_LOKI_SAS_B,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device loki_sas = {
- .name = "mvsas",
- .id = 0,
- .dev = {
- .coherent_dma_mask = DMA_BIT_MASK(32),
- },
- .num_resources = ARRAY_SIZE(loki_sas_resources),
- .resource = loki_sas_resources,
-};
-
-void __init loki_sas_init(void)
-{
- writel(0x8300f707, DDR_REG(0x1424));
- platform_device_register(&loki_sas);
-}
-
-
-/*****************************************************************************
- * UART0
- ****************************************************************************/
-void __init loki_uart0_init(void)
-{
- orion_uart0_init(UART0_VIRT_BASE, UART0_PHYS_BASE,
- IRQ_LOKI_UART0, LOKI_TCLK);
-}
-
-/*****************************************************************************
- * UART1
- ****************************************************************************/
-void __init loki_uart1_init(void)
-{
- orion_uart1_init(UART1_VIRT_BASE, UART1_PHYS_BASE,
- IRQ_LOKI_UART1, LOKI_TCLK);
-}
-
-
-/*****************************************************************************
- * Time handling
- ****************************************************************************/
-void __init loki_init_early(void)
-{
- orion_time_set_base(TIMER_VIRT_BASE);
-}
-
-static void loki_timer_init(void)
-{
- orion_time_init(BRIDGE_VIRT_BASE, BRIDGE_INT_TIMER1_CLR,
- IRQ_LOKI_BRIDGE, LOKI_TCLK);
-}
-
-struct sys_timer loki_timer = {
- .init = loki_timer_init,
-};
-
-
-/*****************************************************************************
- * General
- ****************************************************************************/
-void __init loki_init(void)
-{
- printk(KERN_INFO "Loki ID: 88RC8480. TCLK=%d.\n", LOKI_TCLK);
-
- loki_setup_cpu_mbus();
-}
diff --git a/arch/arm/mach-loki/common.h b/arch/arm/mach-loki/common.h
deleted file mode 100644
index a315dcf8887c..000000000000
--- a/arch/arm/mach-loki/common.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * arch/arm/mach-loki/common.h
- *
- * Core functions for Marvell Loki (88RC8480) SoCs
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __ARCH_LOKI_COMMON_H
-#define __ARCH_LOKI_COMMON_H
-
-struct mv643xx_eth_platform_data;
-
-/*
- * Basic Loki init functions used early by machine-setup.
- */
-void loki_map_io(void);
-void loki_init(void);
-void loki_init_early(void);
-void loki_init_irq(void);
-
-extern struct mbus_dram_target_info loki_mbus_dram_info;
-void loki_setup_cpu_mbus(void);
-void loki_setup_dev_boot_win(u32 base, u32 size);
-
-void loki_ge0_init(struct mv643xx_eth_platform_data *eth_data);
-void loki_ge1_init(struct mv643xx_eth_platform_data *eth_data);
-void loki_sas_init(void);
-void loki_uart0_init(void);
-void loki_uart1_init(void);
-
-extern struct sys_timer loki_timer;
-
-
-#endif
diff --git a/arch/arm/mach-loki/include/mach/bridge-regs.h b/arch/arm/mach-loki/include/mach/bridge-regs.h
deleted file mode 100644
index fd87732097cd..000000000000
--- a/arch/arm/mach-loki/include/mach/bridge-regs.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * arch/arm/mach-loki/include/mach/bridge-regs.h
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __ASM_ARCH_BRIDGE_REGS_H
-#define __ASM_ARCH_BRIDGE_REGS_H
-
-#include <mach/loki.h>
-
-#define RSTOUTn_MASK (BRIDGE_VIRT_BASE | 0x0108)
-#define SOFT_RESET_OUT_EN 0x00000004
-
-#define SYSTEM_SOFT_RESET (BRIDGE_VIRT_BASE | 0x010c)
-#define SOFT_RESET 0x00000001
-
-#define BRIDGE_INT_TIMER1_CLR 0x0004
-
-#define IRQ_VIRT_BASE (BRIDGE_VIRT_BASE | 0x0200)
-#define IRQ_CAUSE_OFF 0x0000
-#define IRQ_MASK_OFF 0x0004
-
-#define TIMER_VIRT_BASE (BRIDGE_VIRT_BASE | 0x0300)
-
-#endif
diff --git a/arch/arm/mach-loki/include/mach/debug-macro.S b/arch/arm/mach-loki/include/mach/debug-macro.S
deleted file mode 100644
index cc90d99ac76c..000000000000
--- a/arch/arm/mach-loki/include/mach/debug-macro.S
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * arch/arm/mach-loki/include/mach/debug-macro.S
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <mach/loki.h>
-
- .macro addruart, rp, rv
- ldr \rp, =LOKI_REGS_PHYS_BASE
- ldr \rv, =LOKI_REGS_VIRT_BASE
- orr \rp, \rp, #0x00012000
- orr \rv, \rv, #0x00012000
- .endm
-
-#define UART_SHIFT 2
-#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/mach-loki/include/mach/entry-macro.S b/arch/arm/mach-loki/include/mach/entry-macro.S
deleted file mode 100644
index bc917ed3a62d..000000000000
--- a/arch/arm/mach-loki/include/mach/entry-macro.S
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * arch/arm/mach-loki/include/mach/entry-macro.S
- *
- * Low-level IRQ helper macros for Marvell Loki (88RC8480) platforms
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <mach/bridge-regs.h>
-
- .macro disable_fiq
- .endm
-
- .macro arch_ret_to_user, tmp1, tmp2
- .endm
-
- .macro get_irqnr_preamble, base, tmp
- ldr \base, =IRQ_VIRT_BASE
- .endm
-
- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
- ldr \irqstat, [\base, #IRQ_CAUSE_OFF]
- ldr \tmp, [\base, #IRQ_MASK_OFF]
- mov \irqnr, #0
- ands \irqstat, \irqstat, \tmp
- clzne \irqnr, \irqstat
- rsbne \irqnr, \irqnr, #31
- .endm
diff --git a/arch/arm/mach-loki/include/mach/hardware.h b/arch/arm/mach-loki/include/mach/hardware.h
deleted file mode 100644
index d7bfc8f17729..000000000000
--- a/arch/arm/mach-loki/include/mach/hardware.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * arch/arm/mach-loki/include/mach/hardware.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __ASM_ARCH_HARDWARE_H
-#define __ASM_ARCH_HARDWARE_H
-
-#include "loki.h"
-
-
-#endif
diff --git a/arch/arm/mach-loki/include/mach/io.h b/arch/arm/mach-loki/include/mach/io.h
deleted file mode 100644
index a373cd582c84..000000000000
--- a/arch/arm/mach-loki/include/mach/io.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * arch/arm/mach-loki/include/mach/io.h
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __ASM_ARCH_IO_H
-#define __ASM_ARCH_IO_H
-
-#include "loki.h"
-
-#define IO_SPACE_LIMIT 0xffffffff
-
-static inline void __iomem *__io(unsigned long addr)
-{
- return (void __iomem *)((addr - LOKI_PCIE0_IO_PHYS_BASE)
- + LOKI_PCIE0_IO_VIRT_BASE);
-}
-
-#define __io(a) __io(a)
-#define __mem_pci(a) (a)
-
-
-#endif
diff --git a/arch/arm/mach-loki/include/mach/irqs.h b/arch/arm/mach-loki/include/mach/irqs.h
deleted file mode 100644
index 9fbd3326867b..000000000000
--- a/arch/arm/mach-loki/include/mach/irqs.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * arch/arm/mach-loki/include/mach/irqs.h
- *
- * IRQ definitions for Marvell Loki (88RC8480) SoCs
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __ASM_ARCH_IRQS_H
-#define __ASM_ARCH_IRQS_H
-
-#include "loki.h" /* need GPIO_MAX */
-
-/*
- * Interrupt Controller
- */
-#define IRQ_LOKI_PCIE_A_CPU_DRBL 0
-#define IRQ_LOKI_CPU_PCIE_A_DRBL 1
-#define IRQ_LOKI_PCIE_B_CPU_DRBL 2
-#define IRQ_LOKI_CPU_PCIE_B_DRBL 3
-#define IRQ_LOKI_COM_A_ERR 6
-#define IRQ_LOKI_COM_A_IN 7
-#define IRQ_LOKI_COM_A_OUT 8
-#define IRQ_LOKI_COM_B_ERR 9
-#define IRQ_LOKI_COM_B_IN 10
-#define IRQ_LOKI_COM_B_OUT 11
-#define IRQ_LOKI_DMA_A 12
-#define IRQ_LOKI_DMA_B 13
-#define IRQ_LOKI_SAS_A 14
-#define IRQ_LOKI_SAS_B 15
-#define IRQ_LOKI_DDR 16
-#define IRQ_LOKI_XOR 17
-#define IRQ_LOKI_BRIDGE 18
-#define IRQ_LOKI_PCIE_A_ERR 20
-#define IRQ_LOKI_PCIE_A_INT 21
-#define IRQ_LOKI_PCIE_B_ERR 22
-#define IRQ_LOKI_PCIE_B_INT 23
-#define IRQ_LOKI_GBE_A_INT 24
-#define IRQ_LOKI_GBE_B_INT 25
-#define IRQ_LOKI_DEV_ERR 26
-#define IRQ_LOKI_UART0 27
-#define IRQ_LOKI_UART1 28
-#define IRQ_LOKI_TWSI 29
-#define IRQ_LOKI_GPIO_23_0 30
-#define IRQ_LOKI_GPIO_25_24 31
-
-/*
- * Loki General Purpose Pins
- */
-#define IRQ_LOKI_GPIO_START 32
-#define NR_GPIO_IRQS GPIO_MAX
-
-#define NR_IRQS (IRQ_LOKI_GPIO_START + NR_GPIO_IRQS)
-
-
-#endif
diff --git a/arch/arm/mach-loki/include/mach/loki.h b/arch/arm/mach-loki/include/mach/loki.h
deleted file mode 100644
index bfca7c265f43..000000000000
--- a/arch/arm/mach-loki/include/mach/loki.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * arch/arm/mach-loki/include/mach/loki.h
- *
- * Generic definitions for Marvell Loki (88RC8480) SoC flavors
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __ASM_ARCH_LOKI_H
-#define __ASM_ARCH_LOKI_H
-
-/*
- * Marvell Loki (88RC8480) address maps.
- *
- * phys
- * d0000000 on-chip peripheral registers
- * e0000000 PCIe 0 Memory space
- * e8000000 PCIe 1 Memory space
- * f0000000 PCIe 0 I/O space
- * f0100000 PCIe 1 I/O space
- *
- * virt phys size
- * fed00000 d0000000 1M on-chip peripheral registers
- * fee00000 f0000000 64K PCIe 0 I/O space
- * fef00000 f0100000 64K PCIe 1 I/O space
- */
-
-#define LOKI_REGS_PHYS_BASE 0xd0000000
-#define LOKI_REGS_VIRT_BASE 0xfed00000
-#define LOKI_REGS_SIZE SZ_1M
-
-#define LOKI_PCIE0_IO_PHYS_BASE 0xf0000000
-#define LOKI_PCIE0_IO_VIRT_BASE 0xfee00000
-#define LOKI_PCIE0_IO_BUS_BASE 0x00000000
-#define LOKI_PCIE0_IO_SIZE SZ_64K
-
-#define LOKI_PCIE1_IO_PHYS_BASE 0xf0100000
-#define LOKI_PCIE1_IO_VIRT_BASE 0xfef00000
-#define LOKI_PCIE1_IO_BUS_BASE 0x00000000
-#define LOKI_PCIE1_IO_SIZE SZ_64K
-
-#define LOKI_PCIE0_MEM_PHYS_BASE 0xe0000000
-#define LOKI_PCIE0_MEM_SIZE SZ_128M
-
-#define LOKI_PCIE1_MEM_PHYS_BASE 0xe8000000
-#define LOKI_PCIE1_MEM_SIZE SZ_128M
-
-/*
- * Register Map
- */
-#define DEV_BUS_PHYS_BASE (LOKI_REGS_PHYS_BASE | 0x10000)
-#define DEV_BUS_VIRT_BASE (LOKI_REGS_VIRT_BASE | 0x10000)
-#define UART0_PHYS_BASE (DEV_BUS_PHYS_BASE | 0x2000)
-#define UART0_VIRT_BASE (DEV_BUS_VIRT_BASE | 0x2000)
-#define UART1_PHYS_BASE (DEV_BUS_PHYS_BASE | 0x2100)
-#define UART1_VIRT_BASE (DEV_BUS_VIRT_BASE | 0x2100)
-
-#define BRIDGE_VIRT_BASE (LOKI_REGS_VIRT_BASE | 0x20000)
-
-#define PCIE0_VIRT_BASE (LOKI_REGS_VIRT_BASE | 0x30000)
-
-#define PCIE1_VIRT_BASE (LOKI_REGS_VIRT_BASE | 0x40000)
-
-#define SAS0_PHYS_BASE (LOKI_REGS_PHYS_BASE | 0x80000)
-
-#define SAS1_PHYS_BASE (LOKI_REGS_PHYS_BASE | 0x90000)
-
-#define GE0_PHYS_BASE (LOKI_REGS_PHYS_BASE | 0xa0000)
-#define GE0_VIRT_BASE (LOKI_REGS_VIRT_BASE | 0xa0000)
-
-#define GE1_PHYS_BASE (LOKI_REGS_PHYS_BASE | 0xb0000)
-#define GE1_VIRT_BASE (LOKI_REGS_VIRT_BASE | 0xb0000)
-
-#define DDR_VIRT_BASE (LOKI_REGS_VIRT_BASE | 0xf0000)
-#define DDR_REG(x) (DDR_VIRT_BASE | (x))
-
-
-#define GPIO_MAX 8
-
-
-#endif
diff --git a/arch/arm/mach-loki/include/mach/memory.h b/arch/arm/mach-loki/include/mach/memory.h
deleted file mode 100644
index 66366657a875..000000000000
--- a/arch/arm/mach-loki/include/mach/memory.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- * arch/arm/mach-loki/include/mach/memory.h
- */
-
-#ifndef __ASM_ARCH_MEMORY_H
-#define __ASM_ARCH_MEMORY_H
-
-#define PLAT_PHYS_OFFSET UL(0x00000000)
-
-#endif
diff --git a/arch/arm/mach-loki/include/mach/system.h b/arch/arm/mach-loki/include/mach/system.h
deleted file mode 100644
index 71895199a534..000000000000
--- a/arch/arm/mach-loki/include/mach/system.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * arch/arm/mach-loki/include/mach/system.h
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __ASM_ARCH_SYSTEM_H
-#define __ASM_ARCH_SYSTEM_H
-
-#include <mach/bridge-regs.h>
-
-static inline void arch_idle(void)
-{
- cpu_do_idle();
-}
-
-static inline void arch_reset(char mode, const char *cmd)
-{
- /*
- * Enable soft reset to assert RSTOUTn.
- */
- writel(SOFT_RESET_OUT_EN, RSTOUTn_MASK);
-
- /*
- * Assert soft reset.
- */
- writel(SOFT_RESET, SYSTEM_SOFT_RESET);
-
- while (1)
- ;
-}
-
-
-#endif
diff --git a/arch/arm/mach-loki/include/mach/timex.h b/arch/arm/mach-loki/include/mach/timex.h
deleted file mode 100644
index 9df210915297..000000000000
--- a/arch/arm/mach-loki/include/mach/timex.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/*
- * arch/arm/mach-loki/include/mach/timex.h
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#define CLOCK_TICK_RATE (100 * HZ)
-
-#define LOKI_TCLK 180000000
diff --git a/arch/arm/mach-loki/include/mach/uncompress.h b/arch/arm/mach-loki/include/mach/uncompress.h
deleted file mode 100644
index 90b2a7e65da3..000000000000
--- a/arch/arm/mach-loki/include/mach/uncompress.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * arch/arm/mach-loki/include/mach/uncompress.h
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/serial_reg.h>
-#include <mach/loki.h>
-
-#define SERIAL_BASE ((unsigned char *)UART0_PHYS_BASE)
-
-static void putc(const char c)
-{
- unsigned char *base = SERIAL_BASE;
- int i;
-
- for (i = 0; i < 0x1000; i++) {
- if (base[UART_LSR << 2] & UART_LSR_THRE)
- break;
- barrier();
- }
-
- base[UART_TX << 2] = c;
-}
-
-static void flush(void)
-{
- unsigned char *base = SERIAL_BASE;
- unsigned char mask;
- int i;
-
- mask = UART_LSR_TEMT | UART_LSR_THRE;
-
- for (i = 0; i < 0x1000; i++) {
- if ((base[UART_LSR << 2] & mask) == mask)
- break;
- barrier();
- }
-}
-
-/*
- * nothing to do
- */
-#define arch_decomp_setup()
-#define arch_decomp_wdog()
diff --git a/arch/arm/mach-loki/include/mach/vmalloc.h b/arch/arm/mach-loki/include/mach/vmalloc.h
deleted file mode 100644
index 5dcbd865443f..000000000000
--- a/arch/arm/mach-loki/include/mach/vmalloc.h
+++ /dev/null
@@ -1,5 +0,0 @@
-/*
- * arch/arm/mach-loki/include/mach/vmalloc.h
- */
-
-#define VMALLOC_END 0xfe800000UL
diff --git a/arch/arm/mach-loki/irq.c b/arch/arm/mach-loki/irq.c
deleted file mode 100644
index 76b211bfcca2..000000000000
--- a/arch/arm/mach-loki/irq.c
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * arch/arm/mach-loki/irq.c
- *
- * Marvell Loki (88RC8480) IRQ handling.
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-#include <mach/bridge-regs.h>
-#include <plat/irq.h>
-#include "common.h"
-
-void __init loki_init_irq(void)
-{
- orion_irq_init(0, (void __iomem *)(IRQ_VIRT_BASE + IRQ_MASK_OFF));
-}
diff --git a/arch/arm/mach-loki/lb88rc8480-setup.c b/arch/arm/mach-loki/lb88rc8480-setup.c
deleted file mode 100644
index 35eae4e6abb2..000000000000
--- a/arch/arm/mach-loki/lb88rc8480-setup.c
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * arch/arm/mach-loki/lb88rc8480-setup.c
- *
- * Marvell LB88RC8480 Development Board Setup
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/irq.h>
-#include <linux/mtd/physmap.h>
-#include <linux/mtd/nand.h>
-#include <linux/timer.h>
-#include <linux/ata_platform.h>
-#include <linux/mv643xx_eth.h>
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <mach/loki.h>
-#include "common.h"
-
-#define LB88RC8480_FLASH_BOOT_CS_BASE 0xf8000000
-#define LB88RC8480_FLASH_BOOT_CS_SIZE SZ_128M
-
-#define LB88RC8480_NOR_BOOT_BASE 0xff000000
-#define LB88RC8480_NOR_BOOT_SIZE SZ_16M
-
-static struct mtd_partition lb88rc8480_boot_flash_parts[] = {
- {
- .name = "kernel",
- .offset = 0,
- .size = SZ_2M,
- }, {
- .name = "root-fs",
- .offset = SZ_2M,
- .size = (SZ_8M + SZ_4M + SZ_1M),
- }, {
- .name = "u-boot",
- .offset = (SZ_8M + SZ_4M + SZ_2M + SZ_1M),
- .size = SZ_1M,
- },
-};
-
-static struct physmap_flash_data lb88rc8480_boot_flash_data = {
- .parts = lb88rc8480_boot_flash_parts,
- .nr_parts = ARRAY_SIZE(lb88rc8480_boot_flash_parts),
- .width = 1, /* 8 bit bus width */
-};
-
-static struct resource lb88rc8480_boot_flash_resource = {
- .flags = IORESOURCE_MEM,
- .start = LB88RC8480_NOR_BOOT_BASE,
- .end = LB88RC8480_NOR_BOOT_BASE + LB88RC8480_NOR_BOOT_SIZE - 1,
-};
-
-static struct platform_device lb88rc8480_boot_flash = {
- .name = "physmap-flash",
- .id = 0,
- .dev = {
- .platform_data = &lb88rc8480_boot_flash_data,
- },
- .num_resources = 1,
- .resource = &lb88rc8480_boot_flash_resource,
-};
-
-static struct mv643xx_eth_platform_data lb88rc8480_ge0_data = {
- .phy_addr = MV643XX_ETH_PHY_ADDR(1),
- .mac_addr = { 0x00, 0x50, 0x43, 0x11, 0x22, 0x33 },
-};
-
-static void __init lb88rc8480_init(void)
-{
- /*
- * Basic setup. Needs to be called early.
- */
- loki_init();
-
- loki_ge0_init(&lb88rc8480_ge0_data);
- loki_sas_init();
- loki_uart0_init();
- loki_uart1_init();
-
- loki_setup_dev_boot_win(LB88RC8480_FLASH_BOOT_CS_BASE,
- LB88RC8480_FLASH_BOOT_CS_SIZE);
- platform_device_register(&lb88rc8480_boot_flash);
-}
-
-MACHINE_START(LB88RC8480, "Marvell LB88RC8480 Development Board")
- /* Maintainer: Ke Wei <kewei@marvell.com> */
- .boot_params = 0x00000100,
- .init_machine = lb88rc8480_init,
- .map_io = loki_map_io,
- .init_early = loki_init_early,
- .init_irq = loki_init_irq,
- .timer = &loki_timer,
-MACHINE_END
diff --git a/arch/arm/mach-lpc32xx/include/mach/entry-macro.S b/arch/arm/mach-lpc32xx/include/mach/entry-macro.S
index 870227c96602..b725f6c93975 100644
--- a/arch/arm/mach-lpc32xx/include/mach/entry-macro.S
+++ b/arch/arm/mach-lpc32xx/include/mach/entry-macro.S
@@ -41,7 +41,3 @@
rsb \irqnr, \irqnr, #31
teq \irqstat, #0
.endm
-
- .macro irq_prio_table
- .endm
-
diff --git a/arch/arm/mach-lpc32xx/include/mach/vmalloc.h b/arch/arm/mach-lpc32xx/include/mach/vmalloc.h
index d1d936c7236d..720fa43a60bf 100644
--- a/arch/arm/mach-lpc32xx/include/mach/vmalloc.h
+++ b/arch/arm/mach-lpc32xx/include/mach/vmalloc.h
@@ -19,6 +19,6 @@
#ifndef __ASM_ARCH_VMALLOC_H
#define __ASM_ARCH_VMALLOC_H
-#define VMALLOC_END 0xF0000000
+#define VMALLOC_END 0xF0000000UL
#endif
diff --git a/arch/arm/mach-msm/platsmp.c b/arch/arm/mach-msm/platsmp.c
index 2034098cf015..1a1af9e56250 100644
--- a/arch/arm/mach-msm/platsmp.c
+++ b/arch/arm/mach-msm/platsmp.c
@@ -18,6 +18,7 @@
#include <asm/hardware/gic.h>
#include <asm/cacheflush.h>
+#include <asm/cputype.h>
#include <asm/mach-types.h>
#include <mach/msm_iomap.h>
@@ -40,6 +41,12 @@ volatile int pen_release = -1;
static DEFINE_SPINLOCK(boot_lock);
+static inline int get_core_count(void)
+{
+ /* 1 + the PART[1:0] field of MIDR */
+ return ((read_cpuid_id() >> 4) & 3) + 1;
+}
+
void __cpuinit platform_secondary_init(unsigned int cpu)
{
/* Configure edge-triggered PPIs */
@@ -147,9 +154,9 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
*/
void __init smp_init_cpus(void)
{
- unsigned int i;
+ unsigned int i, ncores = get_core_count();
- for (i = 0; i < NR_CPUS; i++)
+ for (i = 0; i < ncores; i++)
set_cpu_possible(i, true);
set_smp_cross_call(gic_raise_softirq);
@@ -157,12 +164,4 @@ void __init smp_init_cpus(void)
void __init platform_smp_prepare_cpus(unsigned int max_cpus)
{
- int i;
-
- /*
- * Initialise the present map, which describes the set of CPUs
- * actually populated at the present time.
- */
- for (i = 0; i < max_cpus; i++)
- set_cpu_present(i, true);
}
diff --git a/arch/arm/mach-mx5/Kconfig b/arch/arm/mach-mx5/Kconfig
index 799fbc40e53c..f25e9d7bf0f5 100644
--- a/arch/arm/mach-mx5/Kconfig
+++ b/arch/arm/mach-mx5/Kconfig
@@ -109,6 +109,7 @@ config MACH_EUKREA_MBIMX51_BASEBOARD
bool
select IMX_HAVE_PLATFORM_IMX_KEYPAD
select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
+ select LEDS_GPIO_REGISTER
help
This adds board specific devices that can be found on Eukrea's
MBIMX51 evaluation board.
@@ -135,6 +136,7 @@ config MACH_EUKREA_MBIMXSD51_BASEBOARD
prompt "Eukrea MBIMXSD development board"
bool
select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
+ select LEDS_GPIO_REGISTER
help
This adds board specific devices that can be found on Eukrea's
MBIMXSD evaluation board.
@@ -151,6 +153,7 @@ config MX51_EFIKA_COMMON
config MACH_MX51_EFIKAMX
bool "Support MX51 Genesi Efika MX nettop"
+ select LEDS_GPIO_REGISTER
select MX51_EFIKA_COMMON
help
Include support for Genesi Efika MX nettop. This includes specific
@@ -158,6 +161,7 @@ config MACH_MX51_EFIKAMX
config MACH_MX51_EFIKASB
bool "Support MX51 Genesi Efika Smartbook"
+ select LEDS_GPIO_REGISTER
select MX51_EFIKA_COMMON
help
Include support for Genesi Efika Smartbook. This includes specific
diff --git a/arch/arm/mach-mx5/board-cpuimx51.c b/arch/arm/mach-mx5/board-cpuimx51.c
index add0d42de7af..7c893fa70266 100644
--- a/arch/arm/mach-mx5/board-cpuimx51.c
+++ b/arch/arm/mach-mx5/board-cpuimx51.c
@@ -43,10 +43,6 @@
#define CPUIMX51_QUARTB_GPIO IMX_GPIO_NR(3, 25)
#define CPUIMX51_QUARTC_GPIO IMX_GPIO_NR(3, 26)
#define CPUIMX51_QUARTD_GPIO IMX_GPIO_NR(3, 27)
-#define CPUIMX51_QUARTA_IRQ (MXC_INTERNAL_IRQS + CPUIMX51_QUARTA_GPIO)
-#define CPUIMX51_QUARTB_IRQ (MXC_INTERNAL_IRQS + CPUIMX51_QUARTB_GPIO)
-#define CPUIMX51_QUARTC_IRQ (MXC_INTERNAL_IRQS + CPUIMX51_QUARTC_GPIO)
-#define CPUIMX51_QUARTD_IRQ (MXC_INTERNAL_IRQS + CPUIMX51_QUARTD_GPIO)
#define CPUIMX51_QUART_XTAL 14745600
#define CPUIMX51_QUART_REGSHIFT 17
@@ -61,7 +57,7 @@
static struct plat_serial8250_port serial_platform_data[] = {
{
.mapbase = (unsigned long)(MX51_CS1_BASE_ADDR + 0x400000),
- .irq = CPUIMX51_QUARTA_IRQ,
+ .irq = gpio_to_irq(CPUIMX51_QUARTA_GPIO),
.irqflags = IRQF_TRIGGER_HIGH,
.uartclk = CPUIMX51_QUART_XTAL,
.regshift = CPUIMX51_QUART_REGSHIFT,
@@ -69,7 +65,7 @@ static struct plat_serial8250_port serial_platform_data[] = {
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP,
}, {
.mapbase = (unsigned long)(MX51_CS1_BASE_ADDR + 0x800000),
- .irq = CPUIMX51_QUARTB_IRQ,
+ .irq = gpio_to_irq(CPUIMX51_QUARTB_GPIO),
.irqflags = IRQF_TRIGGER_HIGH,
.uartclk = CPUIMX51_QUART_XTAL,
.regshift = CPUIMX51_QUART_REGSHIFT,
@@ -77,7 +73,7 @@ static struct plat_serial8250_port serial_platform_data[] = {
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP,
}, {
.mapbase = (unsigned long)(MX51_CS1_BASE_ADDR + 0x1000000),
- .irq = CPUIMX51_QUARTC_IRQ,
+ .irq = gpio_to_irq(CPUIMX51_QUARTC_GPIO),
.irqflags = IRQF_TRIGGER_HIGH,
.uartclk = CPUIMX51_QUART_XTAL,
.regshift = CPUIMX51_QUART_REGSHIFT,
@@ -85,7 +81,7 @@ static struct plat_serial8250_port serial_platform_data[] = {
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP,
}, {
.mapbase = (unsigned long)(MX51_CS1_BASE_ADDR + 0x2000000),
- .irq = CPUIMX51_QUARTD_IRQ,
+ .irq = irq_to_gpio(CPUIMX51_QUARTD_GPIO),
.irqflags = IRQF_TRIGGER_HIGH,
.uartclk = CPUIMX51_QUART_XTAL,
.regshift = CPUIMX51_QUART_REGSHIFT,
diff --git a/arch/arm/mach-mx5/board-mx51_3ds.c b/arch/arm/mach-mx5/board-mx51_3ds.c
index 3112d15feebc..07a38154da21 100644
--- a/arch/arm/mach-mx5/board-mx51_3ds.c
+++ b/arch/arm/mach-mx5/board-mx51_3ds.c
@@ -13,6 +13,7 @@
#include <linux/irq.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
+#include <linux/gpio.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -26,7 +27,7 @@
#include "devices-imx51.h"
#include "devices.h"
-#define EXPIO_PARENT_INT (MXC_INTERNAL_IRQS + GPIO_PORTA + 6)
+#define EXPIO_PARENT_INT gpio_to_irq(IMX_GPIO_NR(1, 6))
#define MX51_3DS_ECSPI2_CS (GPIO_PORTC + 28)
static iomux_v3_cfg_t mx51_3ds_pads[] = {
diff --git a/arch/arm/mach-mx5/board-mx51_babbage.c b/arch/arm/mach-mx5/board-mx51_babbage.c
index 6021dd00ec75..e54e4bf61cfd 100644
--- a/arch/arm/mach-mx5/board-mx51_babbage.c
+++ b/arch/arm/mach-mx5/board-mx51_babbage.c
@@ -36,7 +36,7 @@
#define BABBAGE_USB_HUB_RESET IMX_GPIO_NR(1, 7)
#define BABBAGE_USBH1_STP IMX_GPIO_NR(1, 27)
-#define BABBAGE_PHY_RESET IMX_GPIO_NR(2, 5)
+#define BABBAGE_USB_PHY_RESET IMX_GPIO_NR(2, 5)
#define BABBAGE_FEC_PHY_RESET IMX_GPIO_NR(2, 14)
#define BABBAGE_POWER_KEY IMX_GPIO_NR(2, 21)
#define BABBAGE_ECSPI1_CS0 IMX_GPIO_NR(4, 24)
@@ -110,6 +110,9 @@ static iomux_v3_cfg_t mx51babbage_pads[] = {
/* USB HUB reset line*/
MX51_PAD_GPIO1_7__GPIO1_7,
+ /* USB PHY reset line */
+ MX51_PAD_EIM_D21__GPIO2_5,
+
/* FEC */
MX51_PAD_EIM_EB2__FEC_MDIO,
MX51_PAD_EIM_EB3__FEC_RDATA1,
@@ -169,34 +172,31 @@ static struct imxi2c_platform_data babbage_hsi2c_data = {
.bitrate = 400000,
};
+static struct gpio mx51_babbage_usbh1_gpios[] = {
+ { BABBAGE_USBH1_STP, GPIOF_OUT_INIT_LOW, "usbh1_stp" },
+ { BABBAGE_USB_PHY_RESET, GPIOF_OUT_INIT_LOW, "usbh1_phy_reset" },
+};
+
static int gpio_usbh1_active(void)
{
iomux_v3_cfg_t usbh1stp_gpio = MX51_PAD_USBH1_STP__GPIO1_27;
- iomux_v3_cfg_t phyreset_gpio = MX51_PAD_EIM_D21__GPIO2_5;
int ret;
/* Set USBH1_STP to GPIO and toggle it */
mxc_iomux_v3_setup_pad(usbh1stp_gpio);
- ret = gpio_request(BABBAGE_USBH1_STP, "usbh1_stp");
+ ret = gpio_request_array(mx51_babbage_usbh1_gpios,
+ ARRAY_SIZE(mx51_babbage_usbh1_gpios));
if (ret) {
- pr_debug("failed to get MX51_PAD_USBH1_STP__GPIO_1_27: %d\n", ret);
+ pr_debug("failed to get USBH1 pins: %d\n", ret);
return ret;
}
- gpio_direction_output(BABBAGE_USBH1_STP, 0);
- gpio_set_value(BABBAGE_USBH1_STP, 1);
- msleep(100);
- gpio_free(BABBAGE_USBH1_STP);
-
- /* De-assert USB PHY RESETB */
- mxc_iomux_v3_setup_pad(phyreset_gpio);
- ret = gpio_request(BABBAGE_PHY_RESET, "phy_reset");
- if (ret) {
- pr_debug("failed to get MX51_PAD_EIM_D21__GPIO_2_5: %d\n", ret);
- return ret;
- }
- gpio_direction_output(BABBAGE_PHY_RESET, 1);
+ msleep(100);
+ gpio_set_value(BABBAGE_USBH1_STP, 1);
+ gpio_set_value(BABBAGE_USB_PHY_RESET, 1);
+ gpio_free_array(mx51_babbage_usbh1_gpios,
+ ARRAY_SIZE(mx51_babbage_usbh1_gpios));
return 0;
}
diff --git a/arch/arm/mach-mx5/board-mx51_efikamx.c b/arch/arm/mach-mx5/board-mx51_efikamx.c
index 3be603b9075a..f70700dc0ec1 100644
--- a/arch/arm/mach-mx5/board-mx51_efikamx.c
+++ b/arch/arm/mach-mx5/board-mx51_efikamx.c
@@ -139,7 +139,7 @@ static void __init mx51_efikamx_board_id(void)
}
}
-static struct gpio_led mx51_efikamx_leds[] = {
+static struct gpio_led mx51_efikamx_leds[] __initdata = {
{
.name = "efikamx:green",
.default_trigger = "default-on",
@@ -157,19 +157,12 @@ static struct gpio_led mx51_efikamx_leds[] = {
},
};
-static struct gpio_led_platform_data mx51_efikamx_leds_data = {
+static const struct gpio_led_platform_data
+ mx51_efikamx_leds_data __initconst = {
.leds = mx51_efikamx_leds,
.num_leds = ARRAY_SIZE(mx51_efikamx_leds),
};
-static struct platform_device mx51_efikamx_leds_device = {
- .name = "leds-gpio",
- .id = -1,
- .dev = {
- .platform_data = &mx51_efikamx_leds_data,
- },
-};
-
static struct gpio_keys_button mx51_efikamx_powerkey[] = {
{
.code = KEY_POWER,
@@ -250,7 +243,7 @@ static void __init mx51_efikamx_init(void)
mx51_efikamx_leds[2].default_trigger = "mmc1";
}
- platform_device_register(&mx51_efikamx_leds_device);
+ gpio_led_register_device(-1, &mx51_efikamx_leds_data);
imx_add_gpio_keys(&mx51_efikamx_powerkey_data);
if (system_rev == 0x11) {
diff --git a/arch/arm/mach-mx5/board-mx51_efikasb.c b/arch/arm/mach-mx5/board-mx51_efikasb.c
index 4b2e522de0f8..2e4d9d32a87c 100644
--- a/arch/arm/mach-mx5/board-mx51_efikasb.c
+++ b/arch/arm/mach-mx5/board-mx51_efikasb.c
@@ -132,7 +132,7 @@ static void __init mx51_efikasb_usb(void)
mxc_register_device(&mxc_usbh2_device, &usbh2_config);
}
-static struct gpio_led mx51_efikasb_leds[] = {
+static const struct gpio_led mx51_efikasb_leds[] __initconst = {
{
.name = "efikasb:green",
.default_trigger = "default-on",
@@ -146,19 +146,12 @@ static struct gpio_led mx51_efikasb_leds[] = {
},
};
-static struct gpio_led_platform_data mx51_efikasb_leds_data = {
+static const struct gpio_led_platform_data
+ mx51_efikasb_leds_data __initconst = {
.leds = mx51_efikasb_leds,
.num_leds = ARRAY_SIZE(mx51_efikasb_leds),
};
-static struct platform_device mx51_efikasb_leds_device = {
- .name = "leds-gpio",
- .id = -1,
- .dev = {
- .platform_data = &mx51_efikasb_leds_data,
- },
-};
-
static struct gpio_keys_button mx51_efikasb_keys[] = {
{
.code = KEY_POWER,
@@ -258,9 +251,8 @@ static void __init efikasb_board_init(void)
mx51_efikasb_usb();
imx51_add_sdhci_esdhc_imx(1, NULL);
- platform_device_register(&mx51_efikasb_leds_device);
+ gpio_led_register_device(-1, &mx51_efikasb_leds_data);
imx_add_gpio_keys(&mx51_efikasb_keys_data);
-
}
static void __init mx51_efikasb_timer_init(void)
diff --git a/arch/arm/mach-mx5/clock-mx51-mx53.c b/arch/arm/mach-mx5/clock-mx51-mx53.c
index cd79e3435e28..0adeea17d123 100644
--- a/arch/arm/mach-mx5/clock-mx51-mx53.c
+++ b/arch/arm/mach-mx5/clock-mx51-mx53.c
@@ -1274,9 +1274,9 @@ DEFINE_CLOCK(pwm2_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG8_OFFSET,
/* I2C */
DEFINE_CLOCK(i2c1_clk, 0, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG9_OFFSET,
- NULL, NULL, &ipg_clk, NULL);
+ NULL, NULL, &ipg_perclk, NULL);
DEFINE_CLOCK(i2c2_clk, 1, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG10_OFFSET,
- NULL, NULL, &ipg_clk, NULL);
+ NULL, NULL, &ipg_perclk, NULL);
DEFINE_CLOCK(hsi2c_clk, 0, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG11_OFFSET,
NULL, NULL, &ipg_clk, NULL);
diff --git a/arch/arm/mach-mx5/eukrea_mbimx51-baseboard.c b/arch/arm/mach-mx5/eukrea_mbimx51-baseboard.c
index 97292d20f1f3..bbf4564bd050 100644
--- a/arch/arm/mach-mx5/eukrea_mbimx51-baseboard.c
+++ b/arch/arm/mach-mx5/eukrea_mbimx51-baseboard.c
@@ -31,13 +31,12 @@
#include "devices.h"
#define MBIMX51_TSC2007_GPIO IMX_GPIO_NR(3, 30)
-#define MBIMX51_TSC2007_IRQ (MXC_INTERNAL_IRQS + MBIMX51_TSC2007_GPIO)
#define MBIMX51_LED0 IMX_GPIO_NR(3, 5)
#define MBIMX51_LED1 IMX_GPIO_NR(3, 6)
#define MBIMX51_LED2 IMX_GPIO_NR(3, 7)
#define MBIMX51_LED3 IMX_GPIO_NR(3, 8)
-static struct gpio_led mbimx51_leds[] = {
+static const struct gpio_led mbimx51_leds[] __initconst = {
{
.name = "led0",
.default_trigger = "heartbeat",
@@ -64,23 +63,11 @@ static struct gpio_led mbimx51_leds[] = {
},
};
-static struct gpio_led_platform_data mbimx51_leds_info = {
+static const struct gpio_led_platform_data mbimx51_leds_info __initconst = {
.leds = mbimx51_leds,
.num_leds = ARRAY_SIZE(mbimx51_leds),
};
-static struct platform_device mbimx51_leds_gpio = {
- .name = "leds-gpio",
- .id = -1,
- .dev = {
- .platform_data = &mbimx51_leds_info,
- },
-};
-
-static struct platform_device *devices[] __initdata = {
- &mbimx51_leds_gpio,
-};
-
static iomux_v3_cfg_t mbimx51_pads[] = {
/* UART2 */
MX51_PAD_UART2_RXD__UART2_RXD,
@@ -173,7 +160,7 @@ struct tsc2007_platform_data tsc2007_data = {
static struct i2c_board_info mbimx51_i2c_devices[] = {
{
I2C_BOARD_INFO("tsc2007", 0x49),
- .irq = MBIMX51_TSC2007_IRQ,
+ .irq = gpio_to_irq(MBIMX51_TSC2007_GPIO),
.platform_data = &tsc2007_data,
}, {
I2C_BOARD_INFO("tlv320aic23", 0x1a),
@@ -204,13 +191,14 @@ void __init eukrea_mbimx51_baseboard_init(void)
gpio_direction_output(MBIMX51_LED3, 1);
gpio_free(MBIMX51_LED3);
- platform_add_devices(devices, ARRAY_SIZE(devices));
+ gpio_led_register_device(-1, &mbimx51_leds_info);
imx51_add_imx_keypad(&mbimx51_map_data);
gpio_request(MBIMX51_TSC2007_GPIO, "tsc2007_irq");
gpio_direction_input(MBIMX51_TSC2007_GPIO);
- irq_set_irq_type(MBIMX51_TSC2007_IRQ, IRQF_TRIGGER_FALLING);
+ irq_set_irq_type(gpio_to_irq(MBIMX51_TSC2007_GPIO),
+ IRQF_TRIGGER_FALLING);
i2c_register_board_info(1, mbimx51_i2c_devices,
ARRAY_SIZE(mbimx51_i2c_devices));
diff --git a/arch/arm/mach-mx5/eukrea_mbimxsd-baseboard.c b/arch/arm/mach-mx5/eukrea_mbimxsd-baseboard.c
index 31c871ec46a6..261923997643 100644
--- a/arch/arm/mach-mx5/eukrea_mbimxsd-baseboard.c
+++ b/arch/arm/mach-mx5/eukrea_mbimxsd-baseboard.c
@@ -74,7 +74,7 @@ static iomux_v3_cfg_t eukrea_mbimxsd_pads[] = {
#define GPIO_LED1 IMX_GPIO_NR(3, 30)
#define GPIO_SWITCH1 IMX_GPIO_NR(3, 31)
-static struct gpio_led eukrea_mbimxsd_leds[] = {
+static const struct gpio_led eukrea_mbimxsd_leds[] __initconst = {
{
.name = "led1",
.default_trigger = "heartbeat",
@@ -83,19 +83,12 @@ static struct gpio_led eukrea_mbimxsd_leds[] = {
},
};
-static struct gpio_led_platform_data eukrea_mbimxsd_led_info = {
+static const struct gpio_led_platform_data
+ eukrea_mbimxsd_led_info __initconst = {
.leds = eukrea_mbimxsd_leds,
.num_leds = ARRAY_SIZE(eukrea_mbimxsd_leds),
};
-static struct platform_device eukrea_mbimxsd_leds_gpio = {
- .name = "leds-gpio",
- .id = -1,
- .dev = {
- .platform_data = &eukrea_mbimxsd_led_info,
- },
-};
-
static struct gpio_keys_button eukrea_mbimxsd_gpio_buttons[] = {
{
.gpio = GPIO_SWITCH1,
@@ -112,10 +105,6 @@ static const struct gpio_keys_platform_data
.nbuttons = ARRAY_SIZE(eukrea_mbimxsd_gpio_buttons),
};
-static struct platform_device *platform_devices[] __initdata = {
- &eukrea_mbimxsd_leds_gpio,
-};
-
static const struct imxuart_platform_data uart_pdata __initconst = {
.flags = IMXUART_HAVE_RTSCTS,
};
@@ -154,6 +143,6 @@ void __init eukrea_mbimxsd51_baseboard_init(void)
i2c_register_board_info(0, eukrea_mbimxsd_i2c_devices,
ARRAY_SIZE(eukrea_mbimxsd_i2c_devices));
- platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices));
+ gpio_led_register_device(-1, &eukrea_mbimxsd_led_info);
imx_add_gpio_keys(&eukrea_mbimxsd_button_data);
}
diff --git a/arch/arm/mach-mxs/Kconfig b/arch/arm/mach-mxs/Kconfig
index f114960622e0..162b0b0bc356 100644
--- a/arch/arm/mach-mxs/Kconfig
+++ b/arch/arm/mach-mxs/Kconfig
@@ -55,6 +55,7 @@ config MACH_MX28EVK
config MODULE_TX28
bool
select SOC_IMX28
+ select LEDS_GPIO_REGISTER
select MXS_HAVE_AMBA_DUART
select MXS_HAVE_PLATFORM_AUART
select MXS_HAVE_PLATFORM_FEC
diff --git a/arch/arm/mach-mxs/devices/platform-mxsfb.c b/arch/arm/mach-mxs/devices/platform-mxsfb.c
index bf72c9b8dbdd..5a75b7180f74 100644
--- a/arch/arm/mach-mxs/devices/platform-mxsfb.c
+++ b/arch/arm/mach-mxs/devices/platform-mxsfb.c
@@ -5,6 +5,7 @@
* the terms of the GNU General Public License version 2 as published by the
* Free Software Foundation.
*/
+#include <linux/dma-mapping.h>
#include <asm/sizes.h>
#include <mach/mx23.h>
#include <mach/mx28.h>
diff --git a/arch/arm/mach-mxs/include/mach/dma.h b/arch/arm/mach-mxs/include/mach/dma.h
index 7f4aeeaba8df..203d7c4a3e11 100644
--- a/arch/arm/mach-mxs/include/mach/dma.h
+++ b/arch/arm/mach-mxs/include/mach/dma.h
@@ -9,6 +9,8 @@
#ifndef __MACH_MXS_DMA_H__
#define __MACH_MXS_DMA_H__
+#include <linux/dmaengine.h>
+
struct mxs_dma_data {
int chan_irq;
};
diff --git a/arch/arm/mach-mxs/mach-tx28.c b/arch/arm/mach-mxs/mach-tx28.c
index b65e3719cbc4..6766a12cca7f 100644
--- a/arch/arm/mach-mxs/mach-tx28.c
+++ b/arch/arm/mach-mxs/mach-tx28.c
@@ -101,14 +101,6 @@ static const iomux_cfg_t tx28_stk5v3_pads[] __initconst = {
(MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
MX28_PAD_SSP0_DATA3__SSP0_D3 |
(MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
- MX28_PAD_SSP0_DATA4__SSP0_D4 |
- (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
- MX28_PAD_SSP0_DATA5__SSP0_D5 |
- (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
- MX28_PAD_SSP0_DATA6__SSP0_D6 |
- (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
- MX28_PAD_SSP0_DATA7__SSP0_D7 |
- (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
MX28_PAD_SSP0_CMD__SSP0_CMD |
(MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
MX28_PAD_SSP0_DETECT__SSP0_CARD_DETECT |
@@ -117,7 +109,7 @@ static const iomux_cfg_t tx28_stk5v3_pads[] __initconst = {
(MXS_PAD_12MA | MXS_PAD_3V3 | MXS_PAD_NOPULL),
};
-static struct gpio_led tx28_stk5v3_leds[] = {
+static const struct gpio_led tx28_stk5v3_leds[] __initconst = {
{
.name = "GPIO-LED",
.default_trigger = "heartbeat",
@@ -159,8 +151,7 @@ static void __init tx28_stk5v3_init(void)
/* spi via ssp will be added when available */
spi_register_board_info(tx28_spi_board_info,
ARRAY_SIZE(tx28_spi_board_info));
- mxs_add_platform_device("leds-gpio", 0, NULL, 0,
- &tx28_stk5v3_led_data, sizeof(tx28_stk5v3_led_data));
+ gpio_led_register_device(0, &tx28_stk5v3_led_data);
mx28_add_mxs_i2c(0);
i2c_register_board_info(0, tx28_stk5v3_i2c_boardinfo,
ARRAY_SIZE(tx28_stk5v3_i2c_boardinfo));
diff --git a/arch/arm/mach-nuc93x/include/mach/vmalloc.h b/arch/arm/mach-nuc93x/include/mach/vmalloc.h
index 98a21b81dec0..7d11a5f07696 100644
--- a/arch/arm/mach-nuc93x/include/mach/vmalloc.h
+++ b/arch/arm/mach-nuc93x/include/mach/vmalloc.h
@@ -18,6 +18,6 @@
#ifndef __ASM_ARCH_VMALLOC_H
#define __ASM_ARCH_VMALLOC_H
-#define VMALLOC_END (0xE0000000)
+#define VMALLOC_END 0xE0000000UL
#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index f49ce85d2448..312ea6b0409d 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -138,7 +138,7 @@ void ams_delta_latch2_write(u16 mask, u16 value)
static void __init ams_delta_init_irq(void)
{
omap1_init_common_hw();
- omap_init_irq();
+ omap1_init_irq();
}
static struct map_desc ams_delta_io_desc[] __initdata = {
@@ -391,7 +391,7 @@ MACHINE_START(AMS_DELTA, "Amstrad E3 (Delta)")
.reserve = omap_reserve,
.init_irq = ams_delta_init_irq,
.init_machine = ams_delta_init,
- .timer = &omap_timer,
+ .timer = &omap1_timer,
MACHINE_END
EXPORT_SYMBOL(ams_delta_latch1_write);
diff --git a/arch/arm/mach-omap1/board-fsample.c b/arch/arm/mach-omap1/board-fsample.c
index 87f173d93557..a6b1bea50371 100644
--- a/arch/arm/mach-omap1/board-fsample.c
+++ b/arch/arm/mach-omap1/board-fsample.c
@@ -329,7 +329,7 @@ static void __init omap_fsample_init(void)
static void __init omap_fsample_init_irq(void)
{
omap1_init_common_hw();
- omap_init_irq();
+ omap1_init_irq();
}
/* Only FPGA needs to be mapped here. All others are done with ioremap */
@@ -394,5 +394,5 @@ MACHINE_START(OMAP_FSAMPLE, "OMAP730 F-Sample")
.reserve = omap_reserve,
.init_irq = omap_fsample_init_irq,
.init_machine = omap_fsample_init,
- .timer = &omap_timer,
+ .timer = &omap1_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-generic.c b/arch/arm/mach-omap1/board-generic.c
index 23f4ab9e2651..04fc356c40fa 100644
--- a/arch/arm/mach-omap1/board-generic.c
+++ b/arch/arm/mach-omap1/board-generic.c
@@ -31,7 +31,7 @@
static void __init omap_generic_init_irq(void)
{
omap1_init_common_hw();
- omap_init_irq();
+ omap1_init_irq();
}
/* assume no Mini-AB port */
@@ -99,5 +99,5 @@ MACHINE_START(OMAP_GENERIC, "Generic OMAP1510/1610/1710")
.reserve = omap_reserve,
.init_irq = omap_generic_init_irq,
.init_machine = omap_generic_init,
- .timer = &omap_timer,
+ .timer = &omap1_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-h2.c b/arch/arm/mach-omap1/board-h2.c
index ba3bd09c4754..cb7fb1aa3dca 100644
--- a/arch/arm/mach-omap1/board-h2.c
+++ b/arch/arm/mach-omap1/board-h2.c
@@ -376,7 +376,7 @@ static struct i2c_board_info __initdata h2_i2c_board_info[] = {
static void __init h2_init_irq(void)
{
omap1_init_common_hw();
- omap_init_irq();
+ omap1_init_irq();
}
static struct omap_usb_config h2_usb_config __initdata = {
@@ -466,5 +466,5 @@ MACHINE_START(OMAP_H2, "TI-H2")
.reserve = omap_reserve,
.init_irq = h2_init_irq,
.init_machine = h2_init,
- .timer = &omap_timer,
+ .timer = &omap1_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-h3.c b/arch/arm/mach-omap1/board-h3.c
index ac48677672ee..31f34875ffad 100644
--- a/arch/arm/mach-omap1/board-h3.c
+++ b/arch/arm/mach-omap1/board-h3.c
@@ -439,7 +439,7 @@ static void __init h3_init(void)
static void __init h3_init_irq(void)
{
omap1_init_common_hw();
- omap_init_irq();
+ omap1_init_irq();
}
static void __init h3_map_io(void)
@@ -454,5 +454,5 @@ MACHINE_START(OMAP_H3, "TI OMAP1710 H3 board")
.reserve = omap_reserve,
.init_irq = h3_init_irq,
.init_machine = h3_init,
- .timer = &omap_timer,
+ .timer = &omap1_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-htcherald.c b/arch/arm/mach-omap1/board-htcherald.c
index ba05a51f9408..36e06ea7ec65 100644
--- a/arch/arm/mach-omap1/board-htcherald.c
+++ b/arch/arm/mach-omap1/board-htcherald.c
@@ -605,7 +605,7 @@ static void __init htcherald_init_irq(void)
{
printk(KERN_INFO "htcherald_init_irq.\n");
omap1_init_common_hw();
- omap_init_irq();
+ omap1_init_irq();
}
MACHINE_START(HERALD, "HTC Herald")
@@ -616,5 +616,5 @@ MACHINE_START(HERALD, "HTC Herald")
.reserve = omap_reserve,
.init_irq = htcherald_init_irq,
.init_machine = htcherald_init,
- .timer = &omap_timer,
+ .timer = &omap1_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-innovator.c b/arch/arm/mach-omap1/board-innovator.c
index 2d9b8cbd7a14..0b1ba462d388 100644
--- a/arch/arm/mach-omap1/board-innovator.c
+++ b/arch/arm/mach-omap1/board-innovator.c
@@ -292,7 +292,7 @@ static void __init innovator_init_smc91x(void)
static void __init innovator_init_irq(void)
{
omap1_init_common_hw();
- omap_init_irq();
+ omap1_init_irq();
}
#ifdef CONFIG_ARCH_OMAP15XX
@@ -464,5 +464,5 @@ MACHINE_START(OMAP_INNOVATOR, "TI-Innovator")
.reserve = omap_reserve,
.init_irq = innovator_init_irq,
.init_machine = innovator_init,
- .timer = &omap_timer,
+ .timer = &omap1_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c
index cfd084926146..5469ce247ffe 100644
--- a/arch/arm/mach-omap1/board-nokia770.c
+++ b/arch/arm/mach-omap1/board-nokia770.c
@@ -51,7 +51,7 @@ static void __init omap_nokia770_init_irq(void)
omap_writew((omap_readw(0xfffb5004) & ~2), 0xfffb5004);
omap1_init_common_hw();
- omap_init_irq();
+ omap1_init_irq();
}
static const unsigned int nokia770_keymap[] = {
@@ -269,5 +269,5 @@ MACHINE_START(NOKIA770, "Nokia 770")
.reserve = omap_reserve,
.init_irq = omap_nokia770_init_irq,
.init_machine = omap_nokia770_init,
- .timer = &omap_timer,
+ .timer = &omap1_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-osk.c b/arch/arm/mach-omap1/board-osk.c
index e68dfde1918e..b08a21380772 100644
--- a/arch/arm/mach-omap1/board-osk.c
+++ b/arch/arm/mach-omap1/board-osk.c
@@ -282,7 +282,7 @@ static void __init osk_init_cf(void)
static void __init osk_init_irq(void)
{
omap1_init_common_hw();
- omap_init_irq();
+ omap1_init_irq();
}
static struct omap_usb_config osk_usb_config __initdata = {
@@ -588,5 +588,5 @@ MACHINE_START(OMAP_OSK, "TI-OSK")
.reserve = omap_reserve,
.init_irq = osk_init_irq,
.init_machine = osk_init,
- .timer = &omap_timer,
+ .timer = &omap1_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-palmte.c b/arch/arm/mach-omap1/board-palmte.c
index c9d38f47845f..459cb6bfed55 100644
--- a/arch/arm/mach-omap1/board-palmte.c
+++ b/arch/arm/mach-omap1/board-palmte.c
@@ -62,7 +62,7 @@
static void __init omap_palmte_init_irq(void)
{
omap1_init_common_hw();
- omap_init_irq();
+ omap1_init_irq();
}
static const unsigned int palmte_keymap[] = {
@@ -280,5 +280,5 @@ MACHINE_START(OMAP_PALMTE, "OMAP310 based Palm Tungsten E")
.reserve = omap_reserve,
.init_irq = omap_palmte_init_irq,
.init_machine = omap_palmte_init,
- .timer = &omap_timer,
+ .timer = &omap1_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-palmtt.c b/arch/arm/mach-omap1/board-palmtt.c
index f04f2d36e7d3..b214f45f646c 100644
--- a/arch/arm/mach-omap1/board-palmtt.c
+++ b/arch/arm/mach-omap1/board-palmtt.c
@@ -266,7 +266,7 @@ static struct spi_board_info __initdata palmtt_boardinfo[] = {
static void __init omap_palmtt_init_irq(void)
{
omap1_init_common_hw();
- omap_init_irq();
+ omap1_init_irq();
}
static struct omap_usb_config palmtt_usb_config __initdata = {
@@ -326,5 +326,5 @@ MACHINE_START(OMAP_PALMTT, "OMAP1510 based Palm Tungsten|T")
.reserve = omap_reserve,
.init_irq = omap_palmtt_init_irq,
.init_machine = omap_palmtt_init,
- .timer = &omap_timer,
+ .timer = &omap1_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-palmz71.c b/arch/arm/mach-omap1/board-palmz71.c
index 45f01d2c3a7a..9b0ea48d35fd 100644
--- a/arch/arm/mach-omap1/board-palmz71.c
+++ b/arch/arm/mach-omap1/board-palmz71.c
@@ -61,7 +61,7 @@ static void __init
omap_palmz71_init_irq(void)
{
omap1_init_common_hw();
- omap_init_irq();
+ omap1_init_irq();
}
static const unsigned int palmz71_keymap[] = {
@@ -346,5 +346,5 @@ MACHINE_START(OMAP_PALMZ71, "OMAP310 based Palm Zire71")
.reserve = omap_reserve,
.init_irq = omap_palmz71_init_irq,
.init_machine = omap_palmz71_init,
- .timer = &omap_timer,
+ .timer = &omap1_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-perseus2.c b/arch/arm/mach-omap1/board-perseus2.c
index 3c8ee8489458..67acd4142639 100644
--- a/arch/arm/mach-omap1/board-perseus2.c
+++ b/arch/arm/mach-omap1/board-perseus2.c
@@ -297,7 +297,7 @@ static void __init omap_perseus2_init(void)
static void __init omap_perseus2_init_irq(void)
{
omap1_init_common_hw();
- omap_init_irq();
+ omap1_init_irq();
}
/* Only FPGA needs to be mapped here. All others are done with ioremap */
static struct map_desc omap_perseus2_io_desc[] __initdata = {
@@ -355,5 +355,5 @@ MACHINE_START(OMAP_PERSEUS2, "OMAP730 Perseus2")
.reserve = omap_reserve,
.init_irq = omap_perseus2_init_irq,
.init_machine = omap_perseus2_init,
- .timer = &omap_timer,
+ .timer = &omap1_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-sx1.c b/arch/arm/mach-omap1/board-sx1.c
index 0ad781db4e66..9c3b7c52d9cf 100644
--- a/arch/arm/mach-omap1/board-sx1.c
+++ b/arch/arm/mach-omap1/board-sx1.c
@@ -411,7 +411,7 @@ static void __init omap_sx1_init(void)
static void __init omap_sx1_init_irq(void)
{
omap1_init_common_hw();
- omap_init_irq();
+ omap1_init_irq();
}
/*----------------------------------------*/
@@ -426,5 +426,5 @@ MACHINE_START(SX1, "OMAP310 based Siemens SX1")
.reserve = omap_reserve,
.init_irq = omap_sx1_init_irq,
.init_machine = omap_sx1_init,
- .timer = &omap_timer,
+ .timer = &omap1_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-voiceblue.c b/arch/arm/mach-omap1/board-voiceblue.c
index 65d24204937a..036edc0ee9b6 100644
--- a/arch/arm/mach-omap1/board-voiceblue.c
+++ b/arch/arm/mach-omap1/board-voiceblue.c
@@ -162,7 +162,7 @@ static struct omap_board_config_kernel voiceblue_config[] = {
static void __init voiceblue_init_irq(void)
{
omap1_init_common_hw();
- omap_init_irq();
+ omap1_init_irq();
}
static void __init voiceblue_map_io(void)
@@ -306,5 +306,5 @@ MACHINE_START(VOICEBLUE, "VoiceBlue OMAP5910")
.reserve = omap_reserve,
.init_irq = voiceblue_init_irq,
.init_machine = voiceblue_init,
- .timer = &omap_timer,
+ .timer = &omap1_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap1/irq.c b/arch/arm/mach-omap1/irq.c
index 5d3da7a63af3..e2b9c901ab67 100644
--- a/arch/arm/mach-omap1/irq.c
+++ b/arch/arm/mach-omap1/irq.c
@@ -175,7 +175,7 @@ static struct irq_chip omap_irq_chip = {
.irq_set_wake = omap_wake_irq,
};
-void __init omap_init_irq(void)
+void __init omap1_init_irq(void)
{
int i, j;
diff --git a/arch/arm/mach-omap1/mcbsp.c b/arch/arm/mach-omap1/mcbsp.c
index d9af9811dedd..ab7395d84bc8 100644
--- a/arch/arm/mach-omap1/mcbsp.c
+++ b/arch/arm/mach-omap1/mcbsp.c
@@ -38,7 +38,7 @@ static void omap1_mcbsp_request(unsigned int id)
* On 1510, 1610 and 1710, McBSP1 and McBSP3
* are DSP public peripherals.
*/
- if (id == OMAP_MCBSP1 || id == OMAP_MCBSP3) {
+ if (id == 0 || id == 2) {
if (dsp_use++ == 0) {
api_clk = clk_get(NULL, "api_ck");
dsp_clk = clk_get(NULL, "dsp_ck");
@@ -59,7 +59,7 @@ static void omap1_mcbsp_request(unsigned int id)
static void omap1_mcbsp_free(unsigned int id)
{
- if (id == OMAP_MCBSP1 || id == OMAP_MCBSP3) {
+ if (id == 0 || id == 2) {
if (--dsp_use == 0) {
if (!IS_ERR(api_clk)) {
clk_disable(api_clk);
diff --git a/arch/arm/mach-omap1/time.c b/arch/arm/mach-omap1/time.c
index 03e1e1062ad4..a1837771e031 100644
--- a/arch/arm/mach-omap1/time.c
+++ b/arch/arm/mach-omap1/time.c
@@ -297,7 +297,7 @@ static inline int omap_32k_timer_usable(void)
* Timer initialization
* ---------------------------------------------------------------------------
*/
-static void __init omap_timer_init(void)
+static void __init omap1_timer_init(void)
{
if (omap_32k_timer_usable()) {
preferred_sched_clock_init(1);
@@ -307,6 +307,6 @@ static void __init omap_timer_init(void)
}
}
-struct sys_timer omap_timer = {
- .init = omap_timer_init,
+struct sys_timer omap1_timer = {
+ .init = omap1_timer_init,
};
diff --git a/arch/arm/mach-omap1/timer32k.c b/arch/arm/mach-omap1/timer32k.c
index 13d7b8f145bd..96604a50c4fe 100644
--- a/arch/arm/mach-omap1/timer32k.c
+++ b/arch/arm/mach-omap1/timer32k.c
@@ -183,10 +183,6 @@ static __init void omap_init_32k_timer(void)
bool __init omap_32k_timer_init(void)
{
omap_init_clocksource_32k();
-
-#ifdef CONFIG_OMAP_DM_TIMER
- omap_dm_timer_init();
-#endif
omap_init_32k_timer();
return true;
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index b14807794401..f34336560437 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -3,7 +3,7 @@
#
# Common support
-obj-y := id.o io.o control.o mux.o devices.o serial.o gpmc.o timer-gp.o pm.o \
+obj-y := id.o io.o control.o mux.o devices.o serial.o gpmc.o timer.o pm.o \
common.o gpio.o dma.o wd_timer.o
omap-2-3-common = irq.o sdrc.o
@@ -145,9 +145,19 @@ obj-$(CONFIG_SOC_OMAP2420) += opp2420_data.o
obj-$(CONFIG_SOC_OMAP2430) += opp2430_data.o
# hwmod data
-obj-$(CONFIG_SOC_OMAP2420) += omap_hwmod_2420_data.o
-obj-$(CONFIG_SOC_OMAP2430) += omap_hwmod_2430_data.o
-obj-$(CONFIG_ARCH_OMAP3) += omap_hwmod_3xxx_data.o
+obj-$(CONFIG_SOC_OMAP2420) += omap_hwmod_2xxx_ipblock_data.o \
+ omap_hwmod_2xxx_3xxx_ipblock_data.o \
+ omap_hwmod_2xxx_interconnect_data.o \
+ omap_hwmod_2xxx_3xxx_interconnect_data.o \
+ omap_hwmod_2420_data.o
+obj-$(CONFIG_SOC_OMAP2430) += omap_hwmod_2xxx_ipblock_data.o \
+ omap_hwmod_2xxx_3xxx_ipblock_data.o \
+ omap_hwmod_2xxx_interconnect_data.o \
+ omap_hwmod_2xxx_3xxx_interconnect_data.o \
+ omap_hwmod_2430_data.o
+obj-$(CONFIG_ARCH_OMAP3) += omap_hwmod_2xxx_3xxx_ipblock_data.o \
+ omap_hwmod_2xxx_3xxx_interconnect_data.o \
+ omap_hwmod_3xxx_data.o
obj-$(CONFIG_ARCH_OMAP4) += omap_hwmod_44xx_data.o
# EMU peripherals
@@ -269,4 +279,4 @@ obj-$(CONFIG_ARCH_OMAP4) += hwspinlock.o
disp-$(CONFIG_OMAP2_DSS) := display.o
obj-y += $(disp-m) $(disp-y)
-obj-y += common-board-devices.o
+obj-y += common-board-devices.o twl-common.o
diff --git a/arch/arm/mach-omap2/board-2430sdp.c b/arch/arm/mach-omap2/board-2430sdp.c
index 5de6eac0a725..2028464cf5b9 100644
--- a/arch/arm/mach-omap2/board-2430sdp.c
+++ b/arch/arm/mach-omap2/board-2430sdp.c
@@ -260,7 +260,7 @@ MACHINE_START(OMAP_2430SDP, "OMAP2430 sdp2430 board")
.reserve = omap_reserve,
.map_io = omap_2430sdp_map_io,
.init_early = omap_2430sdp_init_early,
- .init_irq = omap_init_irq,
+ .init_irq = omap2_init_irq,
.init_machine = omap_2430sdp_init,
- .timer = &omap_timer,
+ .timer = &omap2_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c
index 5dac974be625..bd600cfb7f80 100644
--- a/arch/arm/mach-omap2/board-3430sdp.c
+++ b/arch/arm/mach-omap2/board-3430sdp.c
@@ -231,22 +231,6 @@ static void __init omap_3430sdp_init_early(void)
omap2_init_common_devices(hyb18m512160af6_sdrc_params, NULL);
}
-static int sdp3430_batt_table[] = {
-/* 0 C*/
-30800, 29500, 28300, 27100,
-26000, 24900, 23900, 22900, 22000, 21100, 20300, 19400, 18700, 17900,
-17200, 16500, 15900, 15300, 14700, 14100, 13600, 13100, 12600, 12100,
-11600, 11200, 10800, 10400, 10000, 9630, 9280, 8950, 8620, 8310,
-8020, 7730, 7460, 7200, 6950, 6710, 6470, 6250, 6040, 5830,
-5640, 5450, 5260, 5090, 4920, 4760, 4600, 4450, 4310, 4170,
-4040, 3910, 3790, 3670, 3550
-};
-
-static struct twl4030_bci_platform_data sdp3430_bci_data = {
- .battery_tmp_tbl = sdp3430_batt_table,
- .tblsize = ARRAY_SIZE(sdp3430_batt_table),
-};
-
static struct omap2_hsmmc_info mmc[] = {
{
.mmc = 1,
@@ -292,14 +276,6 @@ static struct twl4030_gpio_platform_data sdp3430_gpio_data = {
.setup = sdp3430_twl_gpio_setup,
};
-static struct twl4030_usb_data sdp3430_usb_data = {
- .usb_mode = T2_USB_MODE_ULPI,
-};
-
-static struct twl4030_madc_platform_data sdp3430_madc_data = {
- .irq_line = 1,
-};
-
/* regulator consumer mappings */
/* ads7846 on SPI */
@@ -307,16 +283,6 @@ static struct regulator_consumer_supply sdp3430_vaux3_supplies[] = {
REGULATOR_SUPPLY("vcc", "spi1.0"),
};
-static struct regulator_consumer_supply sdp3430_vdda_dac_supplies[] = {
- REGULATOR_SUPPLY("vdda_dac", "omapdss_venc"),
-};
-
-/* VPLL2 for digital video outputs */
-static struct regulator_consumer_supply sdp3430_vpll2_supplies[] = {
- REGULATOR_SUPPLY("vdds_dsi", "omapdss"),
- REGULATOR_SUPPLY("vdds_dsi", "omapdss_dsi1"),
-};
-
static struct regulator_consumer_supply sdp3430_vmmc1_supplies[] = {
REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
};
@@ -433,54 +399,10 @@ static struct regulator_init_data sdp3430_vsim = {
.consumer_supplies = sdp3430_vsim_supplies,
};
-/* VDAC for DSS driving S-Video */
-static struct regulator_init_data sdp3430_vdac = {
- .constraints = {
- .min_uV = 1800000,
- .max_uV = 1800000,
- .apply_uV = true,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(sdp3430_vdda_dac_supplies),
- .consumer_supplies = sdp3430_vdda_dac_supplies,
-};
-
-static struct regulator_init_data sdp3430_vpll2 = {
- .constraints = {
- .name = "VDVI",
- .min_uV = 1800000,
- .max_uV = 1800000,
- .apply_uV = true,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(sdp3430_vpll2_supplies),
- .consumer_supplies = sdp3430_vpll2_supplies,
-};
-
-static struct twl4030_codec_audio_data sdp3430_audio;
-
-static struct twl4030_codec_data sdp3430_codec = {
- .audio_mclk = 26000000,
- .audio = &sdp3430_audio,
-};
-
static struct twl4030_platform_data sdp3430_twldata = {
- .irq_base = TWL4030_IRQ_BASE,
- .irq_end = TWL4030_IRQ_END,
-
/* platform_data for children goes here */
- .bci = &sdp3430_bci_data,
.gpio = &sdp3430_gpio_data,
- .madc = &sdp3430_madc_data,
.keypad = &sdp3430_kp_data,
- .usb = &sdp3430_usb_data,
- .codec = &sdp3430_codec,
.vaux1 = &sdp3430_vaux1,
.vaux2 = &sdp3430_vaux2,
@@ -489,14 +411,21 @@ static struct twl4030_platform_data sdp3430_twldata = {
.vmmc1 = &sdp3430_vmmc1,
.vmmc2 = &sdp3430_vmmc2,
.vsim = &sdp3430_vsim,
- .vdac = &sdp3430_vdac,
- .vpll2 = &sdp3430_vpll2,
};
static int __init omap3430_i2c_init(void)
{
/* i2c1 for PMIC only */
+ omap3_pmic_get_config(&sdp3430_twldata,
+ TWL_COMMON_PDATA_USB | TWL_COMMON_PDATA_BCI |
+ TWL_COMMON_PDATA_MADC | TWL_COMMON_PDATA_AUDIO,
+ TWL_COMMON_REGULATOR_VDAC | TWL_COMMON_REGULATOR_VPLL2);
+ sdp3430_twldata.vdac->constraints.apply_uV = true;
+ sdp3430_twldata.vpll2->constraints.apply_uV = true;
+ sdp3430_twldata.vpll2->constraints.name = "VDVI";
+
omap3_pmic_init("twl4030", &sdp3430_twldata);
+
/* i2c2 on camera connector (for sensor control) and optional isp1301 */
omap_register_i2c_bus(2, 400, NULL, 0);
/* i2c3 on display connector (for DVI, tfp410) */
@@ -804,7 +733,7 @@ MACHINE_START(OMAP_3430SDP, "OMAP3430 3430SDP board")
.reserve = omap_reserve,
.map_io = omap3_map_io,
.init_early = omap_3430sdp_init_early,
- .init_irq = omap_init_irq,
+ .init_irq = omap3_init_irq,
.init_machine = omap_3430sdp_init,
- .timer = &omap_timer,
+ .timer = &omap3_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-3630sdp.c b/arch/arm/mach-omap2/board-3630sdp.c
index a5933cc15caa..e4f37b57a0c4 100644
--- a/arch/arm/mach-omap2/board-3630sdp.c
+++ b/arch/arm/mach-omap2/board-3630sdp.c
@@ -219,7 +219,7 @@ MACHINE_START(OMAP_3630SDP, "OMAP 3630SDP board")
.reserve = omap_reserve,
.map_io = omap3_map_io,
.init_early = omap_sdp_init_early,
- .init_irq = omap_init_irq,
+ .init_irq = omap3_init_irq,
.init_machine = omap_sdp_init,
- .timer = &omap_timer,
+ .timer = &omap3_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c
index 63de2d396e2d..933b25bb10de 100644
--- a/arch/arm/mach-omap2/board-4430sdp.c
+++ b/arch/arm/mach-omap2/board-4430sdp.c
@@ -40,7 +40,6 @@
#include "mux.h"
#include "hsmmc.h"
-#include "timer-gp.h"
#include "control.h"
#include "common-board-devices.h"
@@ -295,9 +294,6 @@ static void __init omap_4430sdp_init_early(void)
{
omap2_init_common_infrastructure();
omap2_init_common_devices(NULL, NULL);
-#ifdef CONFIG_OMAP_32K_TIMER
- omap2_gp_clockevent_set_gptimer(1);
-#endif
}
static struct omap_musb_board_data musb_board_data = {
@@ -306,14 +302,6 @@ static struct omap_musb_board_data musb_board_data = {
.power = 100,
};
-static struct twl4030_usb_data omap4_usbphy_data = {
- .phy_init = omap4430_phy_init,
- .phy_exit = omap4430_phy_exit,
- .phy_power = omap4430_phy_power,
- .phy_set_clock = omap4430_phy_set_clk,
- .phy_suspend = omap4430_phy_suspend,
-};
-
static struct omap2_hsmmc_info mmc[] = {
{
.mmc = 2,
@@ -333,16 +321,7 @@ static struct omap2_hsmmc_info mmc[] = {
};
static struct regulator_consumer_supply sdp4430_vaux_supply[] = {
- {
- .supply = "vmmc",
- .dev_name = "omap_hsmmc.1",
- },
-};
-static struct regulator_consumer_supply sdp4430_vmmc_supply[] = {
- {
- .supply = "vmmc",
- .dev_name = "omap_hsmmc.0",
- },
+ REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1"),
};
static int omap4_twl6030_hsmmc_late_init(struct device *dev)
@@ -399,65 +378,10 @@ static struct regulator_init_data sdp4430_vaux1 = {
| REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
+ .num_consumer_supplies = ARRAY_SIZE(sdp4430_vaux_supply),
.consumer_supplies = sdp4430_vaux_supply,
};
-static struct regulator_init_data sdp4430_vaux2 = {
- .constraints = {
- .min_uV = 1200000,
- .max_uV = 2800000,
- .apply_uV = true,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
- | REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
-};
-
-static struct regulator_init_data sdp4430_vaux3 = {
- .constraints = {
- .min_uV = 1000000,
- .max_uV = 3000000,
- .apply_uV = true,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
- | REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
-};
-
-/* VMMC1 for MMC1 card */
-static struct regulator_init_data sdp4430_vmmc = {
- .constraints = {
- .min_uV = 1200000,
- .max_uV = 3000000,
- .apply_uV = true,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
- | REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = 1,
- .consumer_supplies = sdp4430_vmmc_supply,
-};
-
-static struct regulator_init_data sdp4430_vpp = {
- .constraints = {
- .min_uV = 1800000,
- .max_uV = 2500000,
- .apply_uV = true,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
- | REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
-};
-
static struct regulator_init_data sdp4430_vusim = {
.constraints = {
.min_uV = 1200000,
@@ -471,74 +395,10 @@ static struct regulator_init_data sdp4430_vusim = {
},
};
-static struct regulator_init_data sdp4430_vana = {
- .constraints = {
- .min_uV = 2100000,
- .max_uV = 2100000,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
-};
-
-static struct regulator_init_data sdp4430_vcxio = {
- .constraints = {
- .min_uV = 1800000,
- .max_uV = 1800000,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
-};
-
-static struct regulator_init_data sdp4430_vdac = {
- .constraints = {
- .min_uV = 1800000,
- .max_uV = 1800000,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
-};
-
-static struct regulator_init_data sdp4430_vusb = {
- .constraints = {
- .min_uV = 3300000,
- .max_uV = 3300000,
- .apply_uV = true,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
-};
-
-static struct regulator_init_data sdp4430_clk32kg = {
- .constraints = {
- .valid_ops_mask = REGULATOR_CHANGE_STATUS,
- },
-};
-
static struct twl4030_platform_data sdp4430_twldata = {
- .irq_base = TWL6030_IRQ_BASE,
- .irq_end = TWL6030_IRQ_END,
-
/* Regulators */
- .vmmc = &sdp4430_vmmc,
- .vpp = &sdp4430_vpp,
.vusim = &sdp4430_vusim,
- .vana = &sdp4430_vana,
- .vcxio = &sdp4430_vcxio,
- .vdac = &sdp4430_vdac,
- .vusb = &sdp4430_vusb,
.vaux1 = &sdp4430_vaux1,
- .vaux2 = &sdp4430_vaux2,
- .vaux3 = &sdp4430_vaux3,
- .clk32kg = &sdp4430_clk32kg,
- .usb = &omap4_usbphy_data
};
static struct i2c_board_info __initdata sdp4430_i2c_3_boardinfo[] = {
@@ -556,6 +416,16 @@ static struct i2c_board_info __initdata sdp4430_i2c_4_boardinfo[] = {
};
static int __init omap4_i2c_init(void)
{
+ omap4_pmic_get_config(&sdp4430_twldata, TWL_COMMON_PDATA_USB,
+ TWL_COMMON_REGULATOR_VDAC |
+ TWL_COMMON_REGULATOR_VAUX2 |
+ TWL_COMMON_REGULATOR_VAUX3 |
+ TWL_COMMON_REGULATOR_VMMC |
+ TWL_COMMON_REGULATOR_VPP |
+ TWL_COMMON_REGULATOR_VANA |
+ TWL_COMMON_REGULATOR_VCXIO |
+ TWL_COMMON_REGULATOR_VUSB |
+ TWL_COMMON_REGULATOR_CLK32KG);
omap4_pmic_init("twl6030", &sdp4430_twldata);
omap_register_i2c_bus(2, 400, NULL, 0);
omap_register_i2c_bus(3, 400, sdp4430_i2c_3_boardinfo,
@@ -773,5 +643,5 @@ MACHINE_START(OMAP_4430SDP, "OMAP4430 4430SDP board")
.init_early = omap_4430sdp_init_early,
.init_irq = gic_init_irq,
.init_machine = omap_4430sdp_init,
- .timer = &omap_timer,
+ .timer = &omap4_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-am3517crane.c b/arch/arm/mach-omap2/board-am3517crane.c
index 5e438a77cd72..5f2b55ff04ff 100644
--- a/arch/arm/mach-omap2/board-am3517crane.c
+++ b/arch/arm/mach-omap2/board-am3517crane.c
@@ -104,7 +104,7 @@ MACHINE_START(CRANEBOARD, "AM3517/05 CRANEBOARD")
.reserve = omap_reserve,
.map_io = omap3_map_io,
.init_early = am3517_crane_init_early,
- .init_irq = omap_init_irq,
+ .init_irq = omap3_init_irq,
.init_machine = am3517_crane_init,
- .timer = &omap_timer,
+ .timer = &omap3_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-am3517evm.c b/arch/arm/mach-omap2/board-am3517evm.c
index 63af4171c043..f3006c304150 100644
--- a/arch/arm/mach-omap2/board-am3517evm.c
+++ b/arch/arm/mach-omap2/board-am3517evm.c
@@ -494,7 +494,7 @@ MACHINE_START(OMAP3517EVM, "OMAP3517/AM3517 EVM")
.reserve = omap_reserve,
.map_io = omap3_map_io,
.init_early = am3517_evm_init_early,
- .init_irq = omap_init_irq,
+ .init_irq = omap3_init_irq,
.init_machine = am3517_evm_init,
- .timer = &omap_timer,
+ .timer = &omap3_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-apollon.c b/arch/arm/mach-omap2/board-apollon.c
index b124bdfb4239..70211703ff9f 100644
--- a/arch/arm/mach-omap2/board-apollon.c
+++ b/arch/arm/mach-omap2/board-apollon.c
@@ -354,7 +354,7 @@ MACHINE_START(OMAP_APOLLON, "OMAP24xx Apollon")
.reserve = omap_reserve,
.map_io = omap_apollon_map_io,
.init_early = omap_apollon_init_early,
- .init_irq = omap_init_irq,
+ .init_irq = omap2_init_irq,
.init_machine = omap_apollon_init,
- .timer = &omap_timer,
+ .timer = &omap2_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-cm-t35.c b/arch/arm/mach-omap2/board-cm-t35.c
index 77456dec93ea..35891d49c631 100644
--- a/arch/arm/mach-omap2/board-cm-t35.c
+++ b/arch/arm/mach-omap2/board-cm-t35.c
@@ -162,9 +162,7 @@ static struct mtd_partition cm_t35_nand_partitions[] = {
static struct omap_nand_platform_data cm_t35_nand_data = {
.parts = cm_t35_nand_partitions,
.nr_parts = ARRAY_SIZE(cm_t35_nand_partitions),
- .dma_channel = -1, /* disable DMA in OMAP NAND driver */
.cs = 0,
-
};
static void __init cm_t35_init_nand(void)
@@ -337,19 +335,17 @@ static void __init cm_t35_init_display(void)
}
}
-static struct regulator_consumer_supply cm_t35_vmmc1_supply = {
- .supply = "vmmc",
+static struct regulator_consumer_supply cm_t35_vmmc1_supply[] = {
+ REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
};
-static struct regulator_consumer_supply cm_t35_vsim_supply = {
- .supply = "vmmc_aux",
+static struct regulator_consumer_supply cm_t35_vsim_supply[] = {
+ REGULATOR_SUPPLY("vmmc_aux", "omap_hsmmc.0"),
};
-static struct regulator_consumer_supply cm_t35_vdac_supply =
- REGULATOR_SUPPLY("vdda_dac", "omapdss_venc");
-
-static struct regulator_consumer_supply cm_t35_vdvi_supply =
- REGULATOR_SUPPLY("vdvi", "omapdss");
+static struct regulator_consumer_supply cm_t35_vdvi_supply[] = {
+ REGULATOR_SUPPLY("vdvi", "omapdss"),
+};
/* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */
static struct regulator_init_data cm_t35_vmmc1 = {
@@ -362,8 +358,8 @@ static struct regulator_init_data cm_t35_vmmc1 = {
| REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &cm_t35_vmmc1_supply,
+ .num_consumer_supplies = ARRAY_SIZE(cm_t35_vmmc1_supply),
+ .consumer_supplies = cm_t35_vmmc1_supply,
};
/* VSIM for MMC1 pins DAT4..DAT7 (2 mA, plus card == max 50 mA) */
@@ -377,41 +373,8 @@ static struct regulator_init_data cm_t35_vsim = {
| REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &cm_t35_vsim_supply,
-};
-
-/* VDAC for DSS driving S-Video (8 mA unloaded, max 65 mA) */
-static struct regulator_init_data cm_t35_vdac = {
- .constraints = {
- .min_uV = 1800000,
- .max_uV = 1800000,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = 1,
- .consumer_supplies = &cm_t35_vdac_supply,
-};
-
-/* VPLL2 for digital video outputs */
-static struct regulator_init_data cm_t35_vpll2 = {
- .constraints = {
- .name = "VDVI",
- .min_uV = 1800000,
- .max_uV = 1800000,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = 1,
- .consumer_supplies = &cm_t35_vdvi_supply,
-};
-
-static struct twl4030_usb_data cm_t35_usb_data = {
- .usb_mode = T2_USB_MODE_ULPI,
+ .num_consumer_supplies = ARRAY_SIZE(cm_t35_vsim_supply),
+ .consumer_supplies = cm_t35_vsim_supply,
};
static uint32_t cm_t35_keymap[] = {
@@ -481,10 +444,6 @@ static int cm_t35_twl_gpio_setup(struct device *dev, unsigned gpio,
mmc[0].gpio_cd = gpio + 0;
omap2_hsmmc_init(mmc);
- /* link regulators to MMC adapters */
- cm_t35_vmmc1_supply.dev = mmc[0].dev;
- cm_t35_vsim_supply.dev = mmc[0].dev;
-
return 0;
}
@@ -496,21 +455,23 @@ static struct twl4030_gpio_platform_data cm_t35_gpio_data = {
};
static struct twl4030_platform_data cm_t35_twldata = {
- .irq_base = TWL4030_IRQ_BASE,
- .irq_end = TWL4030_IRQ_END,
-
/* platform_data for children goes here */
.keypad = &cm_t35_kp_data,
- .usb = &cm_t35_usb_data,
.gpio = &cm_t35_gpio_data,
.vmmc1 = &cm_t35_vmmc1,
.vsim = &cm_t35_vsim,
- .vdac = &cm_t35_vdac,
- .vpll2 = &cm_t35_vpll2,
};
static void __init cm_t35_init_i2c(void)
{
+ omap3_pmic_get_config(&cm_t35_twldata, TWL_COMMON_PDATA_USB,
+ TWL_COMMON_REGULATOR_VDAC | TWL_COMMON_REGULATOR_VPLL2);
+
+ cm_t35_twldata.vpll2->constraints.name = "VDVI";
+ cm_t35_twldata.vpll2->num_consumer_supplies =
+ ARRAY_SIZE(cm_t35_vdvi_supply);
+ cm_t35_twldata.vpll2->consumer_supplies = cm_t35_vdvi_supply;
+
omap3_pmic_init("tps65930", &cm_t35_twldata);
}
@@ -646,7 +607,7 @@ MACHINE_START(CM_T35, "Compulab CM-T35")
.reserve = omap_reserve,
.map_io = omap3_map_io,
.init_early = cm_t35_init_early,
- .init_irq = omap_init_irq,
+ .init_irq = omap3_init_irq,
.init_machine = cm_t35_init,
- .timer = &omap_timer,
+ .timer = &omap3_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-cm-t3517.c b/arch/arm/mach-omap2/board-cm-t3517.c
index c3a9fd35034a..05c72f4c1b57 100644
--- a/arch/arm/mach-omap2/board-cm-t3517.c
+++ b/arch/arm/mach-omap2/board-cm-t3517.c
@@ -236,7 +236,6 @@ static struct mtd_partition cm_t3517_nand_partitions[] = {
static struct omap_nand_platform_data cm_t3517_nand_data = {
.parts = cm_t3517_nand_partitions,
.nr_parts = ARRAY_SIZE(cm_t3517_nand_partitions),
- .dma_channel = -1, /* disable DMA in OMAP NAND driver */
.cs = 0,
};
@@ -304,7 +303,7 @@ MACHINE_START(CM_T3517, "Compulab CM-T3517")
.reserve = omap_reserve,
.map_io = omap3_map_io,
.init_early = cm_t3517_init_early,
- .init_irq = omap_init_irq,
+ .init_irq = omap3_init_irq,
.init_machine = cm_t3517_init,
- .timer = &omap_timer,
+ .timer = &omap3_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c
index 34956ec83296..b6002ec31c6a 100644
--- a/arch/arm/mach-omap2/board-devkit8000.c
+++ b/arch/arm/mach-omap2/board-devkit8000.c
@@ -58,7 +58,6 @@
#include "mux.h"
#include "hsmmc.h"
-#include "timer-gp.h"
#include "common-board-devices.h"
#define OMAP_DM9000_GPIO_IRQ 25
@@ -130,13 +129,14 @@ static void devkit8000_panel_disable_dvi(struct omap_dss_device *dssdev)
gpio_set_value_cansleep(dssdev->reset_gpio, 0);
}
-static struct regulator_consumer_supply devkit8000_vmmc1_supply =
- REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0");
-
+static struct regulator_consumer_supply devkit8000_vmmc1_supply[] = {
+ REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
+};
/* ads7846 on SPI */
-static struct regulator_consumer_supply devkit8000_vio_supply =
- REGULATOR_SUPPLY("vcc", "spi2.0");
+static struct regulator_consumer_supply devkit8000_vio_supply[] = {
+ REGULATOR_SUPPLY("vcc", "spi2.0"),
+};
static struct panel_generic_dpi_data lcd_panel = {
.name = "generic",
@@ -186,9 +186,6 @@ static struct omap_dss_board_info devkit8000_dss_data = {
.default_device = &devkit8000_lcd_device,
};
-static struct regulator_consumer_supply devkit8000_vdda_dac_supply =
- REGULATOR_SUPPLY("vdda_dac", "omapdss_venc");
-
static uint32_t board_keymap[] = {
KEY(0, 0, KEY_1),
KEY(1, 0, KEY_2),
@@ -284,22 +281,8 @@ static struct regulator_init_data devkit8000_vmmc1 = {
| REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &devkit8000_vmmc1_supply,
-};
-
-/* VDAC for DSS driving S-Video (8 mA unloaded, max 65 mA) */
-static struct regulator_init_data devkit8000_vdac = {
- .constraints = {
- .min_uV = 1800000,
- .max_uV = 1800000,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = 1,
- .consumer_supplies = &devkit8000_vdda_dac_supply,
+ .num_consumer_supplies = ARRAY_SIZE(devkit8000_vmmc1_supply),
+ .consumer_supplies = devkit8000_vmmc1_supply,
};
/* VPLL1 for digital video outputs */
@@ -327,31 +310,14 @@ static struct regulator_init_data devkit8000_vio = {
.valid_ops_mask = REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &devkit8000_vio_supply,
-};
-
-static struct twl4030_usb_data devkit8000_usb_data = {
- .usb_mode = T2_USB_MODE_ULPI,
-};
-
-static struct twl4030_codec_audio_data devkit8000_audio_data;
-
-static struct twl4030_codec_data devkit8000_codec_data = {
- .audio_mclk = 26000000,
- .audio = &devkit8000_audio_data,
+ .num_consumer_supplies = ARRAY_SIZE(devkit8000_vio_supply),
+ .consumer_supplies = devkit8000_vio_supply,
};
static struct twl4030_platform_data devkit8000_twldata = {
- .irq_base = TWL4030_IRQ_BASE,
- .irq_end = TWL4030_IRQ_END,
-
/* platform_data for children goes here */
- .usb = &devkit8000_usb_data,
.gpio = &devkit8000_gpio_data,
- .codec = &devkit8000_codec_data,
.vmmc1 = &devkit8000_vmmc1,
- .vdac = &devkit8000_vdac,
.vpll1 = &devkit8000_vpll1,
.vio = &devkit8000_vio,
.keypad = &devkit8000_kp_data,
@@ -359,6 +325,9 @@ static struct twl4030_platform_data devkit8000_twldata = {
static int __init devkit8000_i2c_init(void)
{
+ omap3_pmic_get_config(&devkit8000_twldata,
+ TWL_COMMON_PDATA_USB | TWL_COMMON_PDATA_AUDIO,
+ TWL_COMMON_REGULATOR_VDAC);
omap3_pmic_init("tps65930", &devkit8000_twldata);
/* Bus 3 is attached to the DVI port where devices like the pico DLP
* projector don't work reliably with 400kHz */
@@ -438,10 +407,7 @@ static void __init devkit8000_init_early(void)
static void __init devkit8000_init_irq(void)
{
- omap_init_irq();
-#ifdef CONFIG_OMAP_32K_TIMER
- omap2_gp_clockevent_set_gptimer(12);
-#endif
+ omap3_init_irq();
}
#define OMAP_DM9000_BASE 0x2c000000
@@ -707,5 +673,5 @@ MACHINE_START(DEVKIT8000, "OMAP3 Devkit8000")
.init_early = devkit8000_init_early,
.init_irq = devkit8000_init_irq,
.init_machine = devkit8000_init,
- .timer = &omap_timer,
+ .timer = &omap3_secure_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-flash.c b/arch/arm/mach-omap2/board-flash.c
index 729892fdcf2e..aa1b0cbe19d2 100644
--- a/arch/arm/mach-omap2/board-flash.c
+++ b/arch/arm/mach-omap2/board-flash.c
@@ -132,11 +132,7 @@ static struct gpmc_timings nand_timings = {
};
static struct omap_nand_platform_data board_nand_data = {
- .nand_setup = NULL,
.gpmc_t = &nand_timings,
- .dma_channel = -1, /* disable DMA in OMAP NAND driver */
- .dev_ready = NULL,
- .devsize = 0, /* '0' for 8-bit, '1' for 16-bit device */
};
void
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index 73e3c31e8508..54db41a84a9b 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -70,7 +70,7 @@ MACHINE_START(OMAP_GENERIC, "Generic OMAP24xx")
.reserve = omap_reserve,
.map_io = omap_generic_map_io,
.init_early = omap_generic_init_early,
- .init_irq = omap_init_irq,
+ .init_irq = omap2_init_irq,
.init_machine = omap_generic_init,
- .timer = &omap_timer,
+ .timer = &omap2_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-h4.c b/arch/arm/mach-omap2/board-h4.c
index bac7933b8cbb..45de2b319ec9 100644
--- a/arch/arm/mach-omap2/board-h4.c
+++ b/arch/arm/mach-omap2/board-h4.c
@@ -298,7 +298,7 @@ static void __init omap_h4_init_early(void)
static void __init omap_h4_init_irq(void)
{
- omap_init_irq();
+ omap2_init_irq();
}
static struct at24_platform_data m24c01 = {
@@ -388,5 +388,5 @@ MACHINE_START(OMAP_H4, "OMAP2420 H4 board")
.init_early = omap_h4_init_early,
.init_irq = omap_h4_init_irq,
.init_machine = omap_h4_init,
- .timer = &omap_timer,
+ .timer = &omap2_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c
index 0c1bfca3f731..35be778caf1b 100644
--- a/arch/arm/mach-omap2/board-igep0020.c
+++ b/arch/arm/mach-omap2/board-igep0020.c
@@ -222,8 +222,9 @@ static inline void __init igep2_init_smsc911x(void)
static inline void __init igep2_init_smsc911x(void) { }
#endif
-static struct regulator_consumer_supply igep_vmmc1_supply =
- REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0");
+static struct regulator_consumer_supply igep_vmmc1_supply[] = {
+ REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
+};
/* VMMC1 for OMAP VDD_MMC1 (i/o) and MMC1 card */
static struct regulator_init_data igep_vmmc1 = {
@@ -236,12 +237,13 @@ static struct regulator_init_data igep_vmmc1 = {
| REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &igep_vmmc1_supply,
+ .num_consumer_supplies = ARRAY_SIZE(igep_vmmc1_supply),
+ .consumer_supplies = igep_vmmc1_supply,
};
-static struct regulator_consumer_supply igep_vio_supply =
- REGULATOR_SUPPLY("vmmc_aux", "omap_hsmmc.1");
+static struct regulator_consumer_supply igep_vio_supply[] = {
+ REGULATOR_SUPPLY("vmmc_aux", "omap_hsmmc.1"),
+};
static struct regulator_init_data igep_vio = {
.constraints = {
@@ -254,20 +256,21 @@ static struct regulator_init_data igep_vio = {
| REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &igep_vio_supply,
+ .num_consumer_supplies = ARRAY_SIZE(igep_vio_supply),
+ .consumer_supplies = igep_vio_supply,
};
-static struct regulator_consumer_supply igep_vmmc2_supply =
- REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1");
+static struct regulator_consumer_supply igep_vmmc2_supply[] = {
+ REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1"),
+};
static struct regulator_init_data igep_vmmc2 = {
.constraints = {
.valid_modes_mask = REGULATOR_MODE_NORMAL,
.always_on = 1,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &igep_vmmc2_supply,
+ .num_consumer_supplies = ARRAY_SIZE(igep_vmmc2_supply),
+ .consumer_supplies = igep_vmmc2_supply,
};
static struct fixed_voltage_config igep_vwlan = {
@@ -440,10 +443,6 @@ static struct twl4030_gpio_platform_data igep_twl4030_gpio_pdata = {
.setup = igep_twl_gpio_setup,
};
-static struct twl4030_usb_data igep_usb_data = {
- .usb_mode = T2_USB_MODE_ULPI,
-};
-
static int igep2_enable_dvi(struct omap_dss_device *dssdev)
{
gpio_direction_output(IGEP2_GPIO_DVI_PUP, 1);
@@ -480,26 +479,6 @@ static struct omap_dss_board_info igep2_dss_data = {
.default_device = &igep2_dvi_device,
};
-static struct regulator_consumer_supply igep2_vpll2_supplies[] = {
- REGULATOR_SUPPLY("vdds_dsi", "omapdss"),
- REGULATOR_SUPPLY("vdds_dsi", "omapdss_dsi1"),
-};
-
-static struct regulator_init_data igep2_vpll2 = {
- .constraints = {
- .name = "VDVI",
- .min_uV = 1800000,
- .max_uV = 1800000,
- .apply_uV = true,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(igep2_vpll2_supplies),
- .consumer_supplies = igep2_vpll2_supplies,
-};
-
static void __init igep2_display_init(void)
{
int err = gpio_request_one(IGEP2_GPIO_DVI_PUP, GPIOF_OUT_INIT_HIGH,
@@ -519,13 +498,6 @@ static void __init igep_init_early(void)
m65kxxxxam_sdrc_params);
}
-static struct twl4030_codec_audio_data igep2_audio_data;
-
-static struct twl4030_codec_data igep2_codec_data = {
- .audio_mclk = 26000000,
- .audio = &igep2_audio_data,
-};
-
static int igep2_keymap[] = {
KEY(0, 0, KEY_LEFT),
KEY(0, 1, KEY_RIGHT),
@@ -558,11 +530,7 @@ static struct twl4030_keypad_data igep2_keypad_pdata = {
};
static struct twl4030_platform_data igep_twldata = {
- .irq_base = TWL4030_IRQ_BASE,
- .irq_end = TWL4030_IRQ_END,
-
/* platform_data for children goes here */
- .usb = &igep_usb_data,
.gpio = &igep_twl4030_gpio_pdata,
.vmmc1 = &igep_vmmc1,
.vio = &igep_vio,
@@ -578,6 +546,8 @@ static void __init igep_i2c_init(void)
{
int ret;
+ omap3_pmic_get_config(&igep_twldata, TWL_COMMON_PDATA_USB, 0);
+
if (machine_is_igep0020()) {
/*
* Bus 3 is attached to the DVI port where devices like the
@@ -588,9 +558,12 @@ static void __init igep_i2c_init(void)
if (ret)
pr_warning("IGEP2: Could not register I2C3 bus (%d)\n", ret);
- igep_twldata.codec = &igep2_codec_data;
igep_twldata.keypad = &igep2_keypad_pdata;
- igep_twldata.vpll2 = &igep2_vpll2;
+ /* Get common pmic data */
+ omap3_pmic_get_config(&igep_twldata, TWL_COMMON_PDATA_AUDIO,
+ TWL_COMMON_REGULATOR_VPLL2);
+ igep_twldata.vpll2->constraints.apply_uV = true;
+ igep_twldata.vpll2->constraints.name = "VDVI";
}
omap3_pmic_init("twl4030", &igep_twldata);
@@ -703,9 +676,9 @@ MACHINE_START(IGEP0020, "IGEP v2 board")
.reserve = omap_reserve,
.map_io = omap3_map_io,
.init_early = igep_init_early,
- .init_irq = omap_init_irq,
+ .init_irq = omap3_init_irq,
.init_machine = igep_init,
- .timer = &omap_timer,
+ .timer = &omap3_timer,
MACHINE_END
MACHINE_START(IGEP0030, "IGEP OMAP3 module")
@@ -713,7 +686,7 @@ MACHINE_START(IGEP0030, "IGEP OMAP3 module")
.reserve = omap_reserve,
.map_io = omap3_map_io,
.init_early = igep_init_early,
- .init_irq = omap_init_irq,
+ .init_irq = omap3_init_irq,
.init_machine = igep_init,
- .timer = &omap_timer,
+ .timer = &omap3_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c
index f7d6038075f0..218764c9377e 100644
--- a/arch/arm/mach-omap2/board-ldp.c
+++ b/arch/arm/mach-omap2/board-ldp.c
@@ -199,22 +199,14 @@ static void __init omap_ldp_init_early(void)
omap2_init_common_devices(NULL, NULL);
}
-static struct twl4030_usb_data ldp_usb_data = {
- .usb_mode = T2_USB_MODE_ULPI,
-};
-
static struct twl4030_gpio_platform_data ldp_gpio_data = {
.gpio_base = OMAP_MAX_GPIO_LINES,
.irq_base = TWL4030_GPIO_IRQ_BASE,
.irq_end = TWL4030_GPIO_IRQ_END,
};
-static struct twl4030_madc_platform_data ldp_madc_data = {
- .irq_line = 1,
-};
-
-static struct regulator_consumer_supply ldp_vmmc1_supply = {
- .supply = "vmmc",
+static struct regulator_consumer_supply ldp_vmmc1_supply[] = {
+ REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
};
/* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */
@@ -228,8 +220,8 @@ static struct regulator_init_data ldp_vmmc1 = {
| REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &ldp_vmmc1_supply,
+ .num_consumer_supplies = ARRAY_SIZE(ldp_vmmc1_supply),
+ .consumer_supplies = ldp_vmmc1_supply,
};
/* ads7846 on SPI */
@@ -253,12 +245,7 @@ static struct regulator_init_data ldp_vaux1 = {
};
static struct twl4030_platform_data ldp_twldata = {
- .irq_base = TWL4030_IRQ_BASE,
- .irq_end = TWL4030_IRQ_END,
-
/* platform_data for children goes here */
- .madc = &ldp_madc_data,
- .usb = &ldp_usb_data,
.vmmc1 = &ldp_vmmc1,
.vaux1 = &ldp_vaux1,
.gpio = &ldp_gpio_data,
@@ -267,6 +254,8 @@ static struct twl4030_platform_data ldp_twldata = {
static int __init omap_i2c_init(void)
{
+ omap3_pmic_get_config(&ldp_twldata,
+ TWL_COMMON_PDATA_USB | TWL_COMMON_PDATA_MADC, 0);
omap3_pmic_init("twl4030", &ldp_twldata);
omap_register_i2c_bus(2, 400, NULL, 0);
omap_register_i2c_bus(3, 400, NULL, 0);
@@ -341,8 +330,6 @@ static void __init omap_ldp_init(void)
ARRAY_SIZE(ldp_nand_partitions), ZOOM_NAND_CS, 0);
omap2_hsmmc_init(mmc);
- /* link regulators to MMC adapters */
- ldp_vmmc1_supply.dev = mmc[0].dev;
}
MACHINE_START(OMAP_LDP, "OMAP LDP board")
@@ -350,7 +337,7 @@ MACHINE_START(OMAP_LDP, "OMAP LDP board")
.reserve = omap_reserve,
.map_io = omap3_map_io,
.init_early = omap_ldp_init_early,
- .init_irq = omap_init_irq,
+ .init_irq = omap3_init_irq,
.init_machine = omap_ldp_init,
- .timer = &omap_timer,
+ .timer = &omap3_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
index 8d74318ed495..e11f0c5d608a 100644
--- a/arch/arm/mach-omap2/board-n8x0.c
+++ b/arch/arm/mach-omap2/board-n8x0.c
@@ -699,9 +699,9 @@ MACHINE_START(NOKIA_N800, "Nokia N800")
.reserve = omap_reserve,
.map_io = n8x0_map_io,
.init_early = n8x0_init_early,
- .init_irq = omap_init_irq,
+ .init_irq = omap2_init_irq,
.init_machine = n8x0_init_machine,
- .timer = &omap_timer,
+ .timer = &omap2_timer,
MACHINE_END
MACHINE_START(NOKIA_N810, "Nokia N810")
@@ -709,9 +709,9 @@ MACHINE_START(NOKIA_N810, "Nokia N810")
.reserve = omap_reserve,
.map_io = n8x0_map_io,
.init_early = n8x0_init_early,
- .init_irq = omap_init_irq,
+ .init_irq = omap2_init_irq,
.init_machine = n8x0_init_machine,
- .timer = &omap_timer,
+ .timer = &omap2_timer,
MACHINE_END
MACHINE_START(NOKIA_N810_WIMAX, "Nokia N810 WiMAX")
@@ -719,7 +719,7 @@ MACHINE_START(NOKIA_N810_WIMAX, "Nokia N810 WiMAX")
.reserve = omap_reserve,
.map_io = n8x0_map_io,
.init_early = n8x0_init_early,
- .init_irq = omap_init_irq,
+ .init_irq = omap2_init_irq,
.init_machine = n8x0_init_machine,
- .timer = &omap_timer,
+ .timer = &omap2_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c
index 7f21d24bd437..34f841112768 100644
--- a/arch/arm/mach-omap2/board-omap3beagle.c
+++ b/arch/arm/mach-omap2/board-omap3beagle.c
@@ -50,7 +50,6 @@
#include "mux.h"
#include "hsmmc.h"
-#include "timer-gp.h"
#include "pm.h"
#include "common-board-devices.h"
@@ -210,14 +209,6 @@ static struct omap_dss_board_info beagle_dss_data = {
.default_device = &beagle_dvi_device,
};
-static struct regulator_consumer_supply beagle_vdac_supply =
- REGULATOR_SUPPLY("vdda_dac", "omapdss_venc");
-
-static struct regulator_consumer_supply beagle_vdvi_supplies[] = {
- REGULATOR_SUPPLY("vdds_dsi", "omapdss"),
- REGULATOR_SUPPLY("vdds_dsi", "omapdss_dsi1"),
-};
-
static void __init beagle_display_init(void)
{
int r;
@@ -239,12 +230,12 @@ static struct omap2_hsmmc_info mmc[] = {
{} /* Terminator */
};
-static struct regulator_consumer_supply beagle_vmmc1_supply = {
- .supply = "vmmc",
+static struct regulator_consumer_supply beagle_vmmc1_supply[] = {
+ REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
};
-static struct regulator_consumer_supply beagle_vsim_supply = {
- .supply = "vmmc_aux",
+static struct regulator_consumer_supply beagle_vsim_supply[] = {
+ REGULATOR_SUPPLY("vmmc_aux", "omap_hsmmc.0"),
};
static struct gpio_led gpio_leds[];
@@ -267,10 +258,6 @@ static int beagle_twl_gpio_setup(struct device *dev,
mmc[0].gpio_cd = gpio + 0;
omap2_hsmmc_init(mmc);
- /* link regulators to MMC adapters */
- beagle_vmmc1_supply.dev = mmc[0].dev;
- beagle_vsim_supply.dev = mmc[0].dev;
-
/*
* TWL4030_GPIO_MAX + 0 == ledA, EHCI nEN_USB_PWR (out, XM active
* high / others active low)
@@ -336,8 +323,8 @@ static struct regulator_init_data beagle_vmmc1 = {
| REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &beagle_vmmc1_supply,
+ .num_consumer_supplies = ARRAY_SIZE(beagle_vmmc1_supply),
+ .consumer_supplies = beagle_vmmc1_supply,
};
/* VSIM for MMC1 pins DAT4..DAT7 (2 mA, plus card == max 50 mA) */
@@ -351,62 +338,15 @@ static struct regulator_init_data beagle_vsim = {
| REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &beagle_vsim_supply,
-};
-
-/* VDAC for DSS driving S-Video (8 mA unloaded, max 65 mA) */
-static struct regulator_init_data beagle_vdac = {
- .constraints = {
- .min_uV = 1800000,
- .max_uV = 1800000,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = 1,
- .consumer_supplies = &beagle_vdac_supply,
-};
-
-/* VPLL2 for digital video outputs */
-static struct regulator_init_data beagle_vpll2 = {
- .constraints = {
- .name = "VDVI",
- .min_uV = 1800000,
- .max_uV = 1800000,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(beagle_vdvi_supplies),
- .consumer_supplies = beagle_vdvi_supplies,
-};
-
-static struct twl4030_usb_data beagle_usb_data = {
- .usb_mode = T2_USB_MODE_ULPI,
-};
-
-static struct twl4030_codec_audio_data beagle_audio_data;
-
-static struct twl4030_codec_data beagle_codec_data = {
- .audio_mclk = 26000000,
- .audio = &beagle_audio_data,
+ .num_consumer_supplies = ARRAY_SIZE(beagle_vsim_supply),
+ .consumer_supplies = beagle_vsim_supply,
};
static struct twl4030_platform_data beagle_twldata = {
- .irq_base = TWL4030_IRQ_BASE,
- .irq_end = TWL4030_IRQ_END,
-
/* platform_data for children goes here */
- .usb = &beagle_usb_data,
.gpio = &beagle_gpio_data,
- .codec = &beagle_codec_data,
.vmmc1 = &beagle_vmmc1,
.vsim = &beagle_vsim,
- .vdac = &beagle_vdac,
- .vpll2 = &beagle_vpll2,
};
static struct i2c_board_info __initdata beagle_i2c_eeprom[] = {
@@ -417,6 +357,12 @@ static struct i2c_board_info __initdata beagle_i2c_eeprom[] = {
static int __init omap3_beagle_i2c_init(void)
{
+ omap3_pmic_get_config(&beagle_twldata,
+ TWL_COMMON_PDATA_USB | TWL_COMMON_PDATA_AUDIO,
+ TWL_COMMON_REGULATOR_VDAC | TWL_COMMON_REGULATOR_VPLL2);
+
+ beagle_twldata.vpll2->constraints.name = "VDVI";
+
omap3_pmic_init("twl4030", &beagle_twldata);
/* Bus 3 is attached to the DVI port where devices like the pico DLP
* projector don't work reliably with 400kHz */
@@ -486,10 +432,7 @@ static void __init omap3_beagle_init_early(void)
static void __init omap3_beagle_init_irq(void)
{
- omap_init_irq();
-#ifdef CONFIG_OMAP_32K_TIMER
- omap2_gp_clockevent_set_gptimer(12);
-#endif
+ omap3_init_irq();
}
static struct platform_device *omap3_beagle_devices[] __initdata = {
@@ -599,5 +542,5 @@ MACHINE_START(OMAP3_BEAGLE, "OMAP3 Beagle Board")
.init_early = omap3_beagle_init_early,
.init_irq = omap3_beagle_init_irq,
.init_machine = omap3_beagle_init,
- .timer = &omap_timer,
+ .timer = &omap3_secure_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c
index b4d43464a303..c452b3f3331a 100644
--- a/arch/arm/mach-omap2/board-omap3evm.c
+++ b/arch/arm/mach-omap2/board-omap3evm.c
@@ -273,12 +273,12 @@ static struct omap_dss_board_info omap3_evm_dss_data = {
.default_device = &omap3_evm_lcd_device,
};
-static struct regulator_consumer_supply omap3evm_vmmc1_supply = {
- .supply = "vmmc",
+static struct regulator_consumer_supply omap3evm_vmmc1_supply[] = {
+ REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
};
-static struct regulator_consumer_supply omap3evm_vsim_supply = {
- .supply = "vmmc_aux",
+static struct regulator_consumer_supply omap3evm_vsim_supply[] = {
+ REGULATOR_SUPPLY("vmmc_aux", "omap_hsmmc.0"),
};
/* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */
@@ -292,8 +292,8 @@ static struct regulator_init_data omap3evm_vmmc1 = {
| REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &omap3evm_vmmc1_supply,
+ .num_consumer_supplies = ARRAY_SIZE(omap3evm_vmmc1_supply),
+ .consumer_supplies = omap3evm_vmmc1_supply,
};
/* VSIM for MMC1 pins DAT4..DAT7 (2 mA, plus card == max 50 mA) */
@@ -307,8 +307,8 @@ static struct regulator_init_data omap3evm_vsim = {
| REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &omap3evm_vsim_supply,
+ .num_consumer_supplies = ARRAY_SIZE(omap3evm_vsim_supply),
+ .consumer_supplies = omap3evm_vsim_supply,
};
static struct omap2_hsmmc_info mmc[] = {
@@ -365,10 +365,6 @@ static int omap3evm_twl_gpio_setup(struct device *dev,
mmc[0].gpio_cd = gpio + 0;
omap2_hsmmc_init(mmc);
- /* link regulators to MMC adapters */
- omap3evm_vmmc1_supply.dev = mmc[0].dev;
- omap3evm_vsim_supply.dev = mmc[0].dev;
-
/*
* Most GPIOs are for USB OTG. Some are mostly sent to
* the P2 connector; notably LEDA for the LCD backlight.
@@ -400,10 +396,6 @@ static struct twl4030_gpio_platform_data omap3evm_gpio_data = {
.setup = omap3evm_twl_gpio_setup,
};
-static struct twl4030_usb_data omap3evm_usb_data = {
- .usb_mode = T2_USB_MODE_ULPI,
-};
-
static uint32_t board_keymap[] = {
KEY(0, 0, KEY_LEFT),
KEY(0, 1, KEY_DOWN),
@@ -438,58 +430,10 @@ static struct twl4030_keypad_data omap3evm_kp_data = {
.rep = 1,
};
-static struct twl4030_madc_platform_data omap3evm_madc_data = {
- .irq_line = 1,
-};
-
-static struct twl4030_codec_audio_data omap3evm_audio_data;
-
-static struct twl4030_codec_data omap3evm_codec_data = {
- .audio_mclk = 26000000,
- .audio = &omap3evm_audio_data,
-};
-
-static struct regulator_consumer_supply omap3_evm_vdda_dac_supply =
- REGULATOR_SUPPLY("vdda_dac", "omapdss_venc");
-
-/* VDAC for DSS driving S-Video */
-static struct regulator_init_data omap3_evm_vdac = {
- .constraints = {
- .min_uV = 1800000,
- .max_uV = 1800000,
- .apply_uV = true,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = 1,
- .consumer_supplies = &omap3_evm_vdda_dac_supply,
-};
-
-/* VPLL2 for digital video outputs */
-static struct regulator_consumer_supply omap3_evm_vpll2_supplies[] = {
- REGULATOR_SUPPLY("vdds_dsi", "omapdss"),
- REGULATOR_SUPPLY("vdds_dsi", "omapdss_dsi1"),
-};
-
-static struct regulator_init_data omap3_evm_vpll2 = {
- .constraints = {
- .min_uV = 1800000,
- .max_uV = 1800000,
- .apply_uV = true,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(omap3_evm_vpll2_supplies),
- .consumer_supplies = omap3_evm_vpll2_supplies,
-};
-
/* ads7846 on SPI */
-static struct regulator_consumer_supply omap3evm_vio_supply =
- REGULATOR_SUPPLY("vcc", "spi1.0");
+static struct regulator_consumer_supply omap3evm_vio_supply[] = {
+ REGULATOR_SUPPLY("vcc", "spi1.0"),
+};
/* VIO for ads7846 */
static struct regulator_init_data omap3evm_vio = {
@@ -502,8 +446,8 @@ static struct regulator_init_data omap3evm_vio = {
.valid_ops_mask = REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &omap3evm_vio_supply,
+ .num_consumer_supplies = ARRAY_SIZE(omap3evm_vio_supply),
+ .consumer_supplies = omap3evm_vio_supply,
};
#ifdef CONFIG_WL12XX_PLATFORM_DATA
@@ -511,16 +455,17 @@ static struct regulator_init_data omap3evm_vio = {
#define OMAP3EVM_WLAN_PMENA_GPIO (150)
#define OMAP3EVM_WLAN_IRQ_GPIO (149)
-static struct regulator_consumer_supply omap3evm_vmmc2_supply =
- REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1");
+static struct regulator_consumer_supply omap3evm_vmmc2_supply[] = {
+ REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1"),
+};
/* VMMC2 for driving the WL12xx module */
static struct regulator_init_data omap3evm_vmmc2 = {
.constraints = {
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &omap3evm_vmmc2_supply,
+ .num_consumer_supplies = ARRAY_SIZE(omap3evm_vmmc2_supply),
+ .consumer_supplies = omap3evm_vmmc2_supply,
};
static struct fixed_voltage_config omap3evm_vwlan = {
@@ -548,17 +493,9 @@ struct wl12xx_platform_data omap3evm_wlan_data __initdata = {
#endif
static struct twl4030_platform_data omap3evm_twldata = {
- .irq_base = TWL4030_IRQ_BASE,
- .irq_end = TWL4030_IRQ_END,
-
/* platform_data for children goes here */
.keypad = &omap3evm_kp_data,
- .madc = &omap3evm_madc_data,
- .usb = &omap3evm_usb_data,
.gpio = &omap3evm_gpio_data,
- .codec = &omap3evm_codec_data,
- .vdac = &omap3_evm_vdac,
- .vpll2 = &omap3_evm_vpll2,
.vio = &omap3evm_vio,
.vmmc1 = &omap3evm_vmmc1,
.vsim = &omap3evm_vsim,
@@ -566,6 +503,14 @@ static struct twl4030_platform_data omap3evm_twldata = {
static int __init omap3_evm_i2c_init(void)
{
+ omap3_pmic_get_config(&omap3evm_twldata,
+ TWL_COMMON_PDATA_USB | TWL_COMMON_PDATA_MADC |
+ TWL_COMMON_PDATA_AUDIO,
+ TWL_COMMON_REGULATOR_VDAC | TWL_COMMON_REGULATOR_VPLL2);
+
+ omap3evm_twldata.vdac->constraints.apply_uV = true;
+ omap3evm_twldata.vpll2->constraints.apply_uV = true;
+
omap3_pmic_init("twl4030", &omap3evm_twldata);
omap_register_i2c_bus(2, 400, NULL, 0);
omap_register_i2c_bus(3, 400, NULL, 0);
@@ -740,7 +685,7 @@ MACHINE_START(OMAP3EVM, "OMAP3 EVM")
.reserve = omap_reserve,
.map_io = omap3_map_io,
.init_early = omap3_evm_init_early,
- .init_irq = omap_init_irq,
+ .init_irq = omap3_init_irq,
.init_machine = omap3_evm_init,
- .timer = &omap_timer,
+ .timer = &omap3_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3logic.c b/arch/arm/mach-omap2/board-omap3logic.c
index 60d9be49dbab..703aeb5b8fd4 100644
--- a/arch/arm/mach-omap2/board-omap3logic.c
+++ b/arch/arm/mach-omap2/board-omap3logic.c
@@ -35,7 +35,6 @@
#include "mux.h"
#include "hsmmc.h"
-#include "timer-gp.h"
#include "control.h"
#include "common-board-devices.h"
@@ -55,8 +54,8 @@
#define OMAP3_TORPEDO_MMC_GPIO_CD 127
#define OMAP3_TORPEDO_SMSC911X_GPIO_IRQ 129
-static struct regulator_consumer_supply omap3logic_vmmc1_supply = {
- .supply = "vmmc",
+static struct regulator_consumer_supply omap3logic_vmmc1_supply[] = {
+ REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
};
/* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */
@@ -71,8 +70,8 @@ static struct regulator_init_data omap3logic_vmmc1 = {
| REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &omap3logic_vmmc1_supply,
+ .num_consumer_supplies = ARRAY_SIZE(omap3logic_vmmc1_supply),
+ .consumer_supplies = omap3logic_vmmc1_supply,
};
static struct twl4030_gpio_platform_data omap3logic_gpio_data = {
@@ -130,8 +129,6 @@ static void __init board_mmc_init(void)
}
omap2_hsmmc_init(board_mmc_info);
- /* link regulators to MMC adapters */
- omap3logic_vmmc1_supply.dev = board_mmc_info[0].dev;
}
static struct omap_smsc911x_platform_data __initdata board_smsc911x_data = {
@@ -215,16 +212,16 @@ MACHINE_START(OMAP3_TORPEDO, "Logic OMAP3 Torpedo board")
.boot_params = 0x80000100,
.map_io = omap3_map_io,
.init_early = omap3logic_init_early,
- .init_irq = omap_init_irq,
+ .init_irq = omap3_init_irq,
.init_machine = omap3logic_init,
- .timer = &omap_timer,
+ .timer = &omap3_timer,
MACHINE_END
MACHINE_START(OMAP3530_LV_SOM, "OMAP Logic 3530 LV SOM board")
.boot_params = 0x80000100,
.map_io = omap3_map_io,
.init_early = omap3logic_init_early,
- .init_irq = omap_init_irq,
+ .init_irq = omap3_init_irq,
.init_machine = omap3logic_init,
- .timer = &omap_timer,
+ .timer = &omap3_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c
index 23f71d40883e..080d7bd6795e 100644
--- a/arch/arm/mach-omap2/board-omap3pandora.c
+++ b/arch/arm/mach-omap2/board-omap3pandora.c
@@ -320,17 +320,17 @@ static struct twl4030_gpio_platform_data omap3pandora_gpio_data = {
.setup = omap3pandora_twl_gpio_setup,
};
-static struct regulator_consumer_supply pandora_vmmc1_supply =
- REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0");
-
-static struct regulator_consumer_supply pandora_vmmc2_supply =
- REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1");
+static struct regulator_consumer_supply pandora_vmmc1_supply[] = {
+ REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
+};
-static struct regulator_consumer_supply pandora_vmmc3_supply =
- REGULATOR_SUPPLY("vmmc", "omap_hsmmc.2");
+static struct regulator_consumer_supply pandora_vmmc2_supply[] = {
+ REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1")
+};
-static struct regulator_consumer_supply pandora_vdda_dac_supply =
- REGULATOR_SUPPLY("vdda_dac", "omapdss_venc");
+static struct regulator_consumer_supply pandora_vmmc3_supply[] = {
+ REGULATOR_SUPPLY("vmmc", "omap_hsmmc.2"),
+};
static struct regulator_consumer_supply pandora_vdds_supplies[] = {
REGULATOR_SUPPLY("vdds_sdi", "omapdss"),
@@ -338,11 +338,13 @@ static struct regulator_consumer_supply pandora_vdds_supplies[] = {
REGULATOR_SUPPLY("vdds_dsi", "omapdss_dsi1"),
};
-static struct regulator_consumer_supply pandora_vcc_lcd_supply =
- REGULATOR_SUPPLY("vcc", "display0");
+static struct regulator_consumer_supply pandora_vcc_lcd_supply[] = {
+ REGULATOR_SUPPLY("vcc", "display0"),
+};
-static struct regulator_consumer_supply pandora_usb_phy_supply =
- REGULATOR_SUPPLY("hsusb0", "ehci-omap.0");
+static struct regulator_consumer_supply pandora_usb_phy_supply[] = {
+ REGULATOR_SUPPLY("hsusb0", "ehci-omap.0"),
+};
/* ads7846 on SPI and 2 nub controllers on I2C */
static struct regulator_consumer_supply pandora_vaux4_supplies[] = {
@@ -351,8 +353,9 @@ static struct regulator_consumer_supply pandora_vaux4_supplies[] = {
REGULATOR_SUPPLY("vcc", "3-0067"),
};
-static struct regulator_consumer_supply pandora_adac_supply =
- REGULATOR_SUPPLY("vcc", "soc-audio");
+static struct regulator_consumer_supply pandora_adac_supply[] = {
+ REGULATOR_SUPPLY("vcc", "soc-audio"),
+};
/* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */
static struct regulator_init_data pandora_vmmc1 = {
@@ -365,8 +368,8 @@ static struct regulator_init_data pandora_vmmc1 = {
| REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &pandora_vmmc1_supply,
+ .num_consumer_supplies = ARRAY_SIZE(pandora_vmmc1_supply),
+ .consumer_supplies = pandora_vmmc1_supply,
};
/* VMMC2 for MMC2 pins CMD, CLK, DAT0..DAT3 (max 100 mA) */
@@ -380,38 +383,8 @@ static struct regulator_init_data pandora_vmmc2 = {
| REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &pandora_vmmc2_supply,
-};
-
-/* VDAC for DSS driving S-Video */
-static struct regulator_init_data pandora_vdac = {
- .constraints = {
- .min_uV = 1800000,
- .max_uV = 1800000,
- .apply_uV = true,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = 1,
- .consumer_supplies = &pandora_vdda_dac_supply,
-};
-
-/* VPLL2 for digital video outputs */
-static struct regulator_init_data pandora_vpll2 = {
- .constraints = {
- .min_uV = 1800000,
- .max_uV = 1800000,
- .apply_uV = true,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(pandora_vdds_supplies),
- .consumer_supplies = pandora_vdds_supplies,
+ .num_consumer_supplies = ARRAY_SIZE(pandora_vmmc2_supply),
+ .consumer_supplies = pandora_vmmc2_supply,
};
/* VAUX1 for LCD */
@@ -425,8 +398,8 @@ static struct regulator_init_data pandora_vaux1 = {
.valid_ops_mask = REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &pandora_vcc_lcd_supply,
+ .num_consumer_supplies = ARRAY_SIZE(pandora_vcc_lcd_supply),
+ .consumer_supplies = pandora_vcc_lcd_supply,
};
/* VAUX2 for USB host PHY */
@@ -440,8 +413,8 @@ static struct regulator_init_data pandora_vaux2 = {
.valid_ops_mask = REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &pandora_usb_phy_supply,
+ .num_consumer_supplies = ARRAY_SIZE(pandora_usb_phy_supply),
+ .consumer_supplies = pandora_usb_phy_supply,
};
/* VAUX4 for ads7846 and nubs */
@@ -470,8 +443,8 @@ static struct regulator_init_data pandora_vsim = {
.valid_ops_mask = REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &pandora_adac_supply,
+ .num_consumer_supplies = ARRAY_SIZE(pandora_adac_supply),
+ .consumer_supplies = pandora_adac_supply,
};
/* Fixed regulator internal to Wifi module */
@@ -479,8 +452,8 @@ static struct regulator_init_data pandora_vmmc3 = {
.constraints = {
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &pandora_vmmc3_supply,
+ .num_consumer_supplies = ARRAY_SIZE(pandora_vmmc3_supply),
+ .consumer_supplies = pandora_vmmc3_supply,
};
static struct fixed_voltage_config pandora_vwlan = {
@@ -501,29 +474,12 @@ static struct platform_device pandora_vwlan_device = {
},
};
-static struct twl4030_usb_data omap3pandora_usb_data = {
- .usb_mode = T2_USB_MODE_ULPI,
-};
-
-static struct twl4030_codec_audio_data omap3pandora_audio_data;
-
-static struct twl4030_codec_data omap3pandora_codec_data = {
- .audio_mclk = 26000000,
- .audio = &omap3pandora_audio_data,
-};
-
static struct twl4030_bci_platform_data pandora_bci_data;
static struct twl4030_platform_data omap3pandora_twldata = {
- .irq_base = TWL4030_IRQ_BASE,
- .irq_end = TWL4030_IRQ_END,
.gpio = &omap3pandora_gpio_data,
- .usb = &omap3pandora_usb_data,
- .codec = &omap3pandora_codec_data,
.vmmc1 = &pandora_vmmc1,
.vmmc2 = &pandora_vmmc2,
- .vdac = &pandora_vdac,
- .vpll2 = &pandora_vpll2,
.vaux1 = &pandora_vaux1,
.vaux2 = &pandora_vaux2,
.vaux4 = &pandora_vaux4,
@@ -541,6 +497,17 @@ static struct i2c_board_info __initdata omap3pandora_i2c3_boardinfo[] = {
static int __init omap3pandora_i2c_init(void)
{
+ omap3_pmic_get_config(&omap3pandora_twldata,
+ TWL_COMMON_PDATA_USB | TWL_COMMON_PDATA_AUDIO,
+ TWL_COMMON_REGULATOR_VDAC | TWL_COMMON_REGULATOR_VPLL2);
+
+ omap3pandora_twldata.vdac->constraints.apply_uV = true;
+
+ omap3pandora_twldata.vpll2->constraints.apply_uV = true;
+ omap3pandora_twldata.vpll2->num_consumer_supplies =
+ ARRAY_SIZE(pandora_vdds_supplies);
+ omap3pandora_twldata.vpll2->consumer_supplies = pandora_vdds_supplies;
+
omap3_pmic_init("tps65950", &omap3pandora_twldata);
/* i2c2 pins are not connected */
omap_register_i2c_bus(3, 100, omap3pandora_i2c3_boardinfo,
@@ -643,7 +610,7 @@ MACHINE_START(OMAP3_PANDORA, "Pandora Handheld Console")
.reserve = omap_reserve,
.map_io = omap3_map_io,
.init_early = omap3pandora_init_early,
- .init_irq = omap_init_irq,
+ .init_irq = omap3_init_irq,
.init_machine = omap3pandora_init,
- .timer = &omap_timer,
+ .timer = &omap3_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3stalker.c b/arch/arm/mach-omap2/board-omap3stalker.c
index 0c108a212ea2..8e104980ea26 100644
--- a/arch/arm/mach-omap2/board-omap3stalker.c
+++ b/arch/arm/mach-omap2/board-omap3stalker.c
@@ -52,7 +52,6 @@
#include "sdram-micron-mt46h32m32lf-6.h"
#include "mux.h"
#include "hsmmc.h"
-#include "timer-gp.h"
#include "common-board-devices.h"
#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE)
@@ -206,12 +205,12 @@ static struct omap_dss_board_info omap3_stalker_dss_data = {
.default_device = &omap3_stalker_dvi_device,
};
-static struct regulator_consumer_supply omap3stalker_vmmc1_supply = {
- .supply = "vmmc",
+static struct regulator_consumer_supply omap3stalker_vmmc1_supply[] = {
+ REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
};
-static struct regulator_consumer_supply omap3stalker_vsim_supply = {
- .supply = "vmmc_aux",
+static struct regulator_consumer_supply omap3stalker_vsim_supply[] = {
+ REGULATOR_SUPPLY("vmmc_aux", "omap_hsmmc.0"),
};
/* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */
@@ -224,8 +223,8 @@ static struct regulator_init_data omap3stalker_vmmc1 = {
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
| REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &omap3stalker_vmmc1_supply,
+ .num_consumer_supplies = ARRAY_SIZE(omap3stalker_vmmc1_supply),
+ .consumer_supplies = omap3stalker_vmmc1_supply,
};
/* VSIM for MMC1 pins DAT4..DAT7 (2 mA, plus card == max 50 mA) */
@@ -238,8 +237,8 @@ static struct regulator_init_data omap3stalker_vsim = {
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
| REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &omap3stalker_vsim_supply,
+ .num_consumer_supplies = ARRAY_SIZE(omap3stalker_vsim_supply),
+ .consumer_supplies = omap3stalker_vsim_supply,
};
static struct omap2_hsmmc_info mmc[] = {
@@ -321,10 +320,6 @@ omap3stalker_twl_gpio_setup(struct device *dev,
mmc[0].gpio_cd = gpio + 0;
omap2_hsmmc_init(mmc);
- /* link regulators to MMC adapters */
- omap3stalker_vmmc1_supply.dev = mmc[0].dev;
- omap3stalker_vsim_supply.dev = mmc[0].dev;
-
/*
* Most GPIOs are for USB OTG. Some are mostly sent to
* the P2 connector; notably LEDA for the LCD backlight.
@@ -354,10 +349,6 @@ static struct twl4030_gpio_platform_data omap3stalker_gpio_data = {
.setup = omap3stalker_twl_gpio_setup,
};
-static struct twl4030_usb_data omap3stalker_usb_data = {
- .usb_mode = T2_USB_MODE_ULPI,
-};
-
static uint32_t board_keymap[] = {
KEY(0, 0, KEY_LEFT),
KEY(0, 1, KEY_DOWN),
@@ -392,68 +383,10 @@ static struct twl4030_keypad_data omap3stalker_kp_data = {
.rep = 1,
};
-static struct twl4030_madc_platform_data omap3stalker_madc_data = {
- .irq_line = 1,
-};
-
-static struct twl4030_codec_audio_data omap3stalker_audio_data;
-
-static struct twl4030_codec_data omap3stalker_codec_data = {
- .audio_mclk = 26000000,
- .audio = &omap3stalker_audio_data,
-};
-
-static struct regulator_consumer_supply omap3_stalker_vdda_dac_supply =
- REGULATOR_SUPPLY("vdda_dac", "omapdss_venc");
-
-/* VDAC for DSS driving S-Video */
-static struct regulator_init_data omap3_stalker_vdac = {
- .constraints = {
- .min_uV = 1800000,
- .max_uV = 1800000,
- .apply_uV = true,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = 1,
- .consumer_supplies = &omap3_stalker_vdda_dac_supply,
-};
-
-/* VPLL2 for digital video outputs */
-static struct regulator_consumer_supply omap3_stalker_vpll2_supplies[] = {
- REGULATOR_SUPPLY("vdds_dsi", "omapdss"),
- REGULATOR_SUPPLY("vdds_dsi", "omapdss_dsi1"),
-};
-
-static struct regulator_init_data omap3_stalker_vpll2 = {
- .constraints = {
- .name = "VDVI",
- .min_uV = 1800000,
- .max_uV = 1800000,
- .apply_uV = true,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(omap3_stalker_vpll2_supplies),
- .consumer_supplies = omap3_stalker_vpll2_supplies,
-};
-
static struct twl4030_platform_data omap3stalker_twldata = {
- .irq_base = TWL4030_IRQ_BASE,
- .irq_end = TWL4030_IRQ_END,
-
/* platform_data for children goes here */
.keypad = &omap3stalker_kp_data,
- .madc = &omap3stalker_madc_data,
- .usb = &omap3stalker_usb_data,
.gpio = &omap3stalker_gpio_data,
- .codec = &omap3stalker_codec_data,
- .vdac = &omap3_stalker_vdac,
- .vpll2 = &omap3_stalker_vpll2,
.vmmc1 = &omap3stalker_vmmc1,
.vsim = &omap3stalker_vsim,
};
@@ -474,6 +407,15 @@ static struct i2c_board_info __initdata omap3stalker_i2c_boardinfo3[] = {
static int __init omap3_stalker_i2c_init(void)
{
+ omap3_pmic_get_config(&omap3stalker_twldata,
+ TWL_COMMON_PDATA_USB | TWL_COMMON_PDATA_MADC |
+ TWL_COMMON_PDATA_AUDIO,
+ TWL_COMMON_REGULATOR_VDAC | TWL_COMMON_REGULATOR_VPLL2);
+
+ omap3stalker_twldata.vdac->constraints.apply_uV = true;
+ omap3stalker_twldata.vpll2->constraints.apply_uV = true;
+ omap3stalker_twldata.vpll2->constraints.name = "VDVI";
+
omap3_pmic_init("twl4030", &omap3stalker_twldata);
omap_register_i2c_bus(2, 400, NULL, 0);
omap_register_i2c_bus(3, 400, omap3stalker_i2c_boardinfo3,
@@ -494,10 +436,7 @@ static void __init omap3_stalker_init_early(void)
static void __init omap3_stalker_init_irq(void)
{
- omap_init_irq();
-#ifdef CONFIG_OMAP_32K_TIMER
- omap2_gp_clockevent_set_gptimer(12);
-#endif
+ omap3_init_irq();
}
static struct platform_device *omap3_stalker_devices[] __initdata = {
@@ -560,5 +499,5 @@ MACHINE_START(SBC3530, "OMAP3 STALKER")
.init_early = omap3_stalker_init_early,
.init_irq = omap3_stalker_init_irq,
.init_machine = omap3_stalker_init,
- .timer = &omap_timer,
+ .timer = &omap3_secure_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3touchbook.c b/arch/arm/mach-omap2/board-omap3touchbook.c
index 5f649faf7377..852ea0464057 100644
--- a/arch/arm/mach-omap2/board-omap3touchbook.c
+++ b/arch/arm/mach-omap2/board-omap3touchbook.c
@@ -51,7 +51,6 @@
#include "mux.h"
#include "hsmmc.h"
-#include "timer-gp.h"
#include "common-board-devices.h"
#include <asm/setup.h>
@@ -114,12 +113,12 @@ static struct omap_lcd_config omap3_touchbook_lcd_config __initdata = {
.ctrl_name = "internal",
};
-static struct regulator_consumer_supply touchbook_vmmc1_supply = {
- .supply = "vmmc",
+static struct regulator_consumer_supply touchbook_vmmc1_supply[] = {
+ REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
};
-static struct regulator_consumer_supply touchbook_vsim_supply = {
- .supply = "vmmc_aux",
+static struct regulator_consumer_supply touchbook_vsim_supply[] = {
+ REGULATOR_SUPPLY("vmmc_aux", "omap_hsmmc.0"),
};
static struct gpio_led gpio_leds[];
@@ -137,10 +136,6 @@ static int touchbook_twl_gpio_setup(struct device *dev,
mmc[0].gpio_cd = gpio + 0;
omap2_hsmmc_init(mmc);
- /* link regulators to MMC adapters */
- touchbook_vmmc1_supply.dev = mmc[0].dev;
- touchbook_vsim_supply.dev = mmc[0].dev;
-
/* REVISIT: need ehci-omap hooks for external VBUS
* power switch and overcurrent detect
*/
@@ -167,14 +162,18 @@ static struct twl4030_gpio_platform_data touchbook_gpio_data = {
.setup = touchbook_twl_gpio_setup,
};
-static struct regulator_consumer_supply touchbook_vdac_supply = {
+static struct regulator_consumer_supply touchbook_vdac_supply[] = {
+{
.supply = "vdac",
.dev = &omap3_touchbook_lcd_device.dev,
+},
};
-static struct regulator_consumer_supply touchbook_vdvi_supply = {
+static struct regulator_consumer_supply touchbook_vdvi_supply[] = {
+{
.supply = "vdvi",
.dev = &omap3_touchbook_lcd_device.dev,
+},
};
/* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */
@@ -188,8 +187,8 @@ static struct regulator_init_data touchbook_vmmc1 = {
| REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &touchbook_vmmc1_supply,
+ .num_consumer_supplies = ARRAY_SIZE(touchbook_vmmc1_supply),
+ .consumer_supplies = touchbook_vmmc1_supply,
};
/* VSIM for MMC1 pins DAT4..DAT7 (2 mA, plus card == max 50 mA) */
@@ -203,62 +202,15 @@ static struct regulator_init_data touchbook_vsim = {
| REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &touchbook_vsim_supply,
-};
-
-/* VDAC for DSS driving S-Video (8 mA unloaded, max 65 mA) */
-static struct regulator_init_data touchbook_vdac = {
- .constraints = {
- .min_uV = 1800000,
- .max_uV = 1800000,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = 1,
- .consumer_supplies = &touchbook_vdac_supply,
-};
-
-/* VPLL2 for digital video outputs */
-static struct regulator_init_data touchbook_vpll2 = {
- .constraints = {
- .name = "VDVI",
- .min_uV = 1800000,
- .max_uV = 1800000,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = 1,
- .consumer_supplies = &touchbook_vdvi_supply,
-};
-
-static struct twl4030_usb_data touchbook_usb_data = {
- .usb_mode = T2_USB_MODE_ULPI,
-};
-
-static struct twl4030_codec_audio_data touchbook_audio_data;
-
-static struct twl4030_codec_data touchbook_codec_data = {
- .audio_mclk = 26000000,
- .audio = &touchbook_audio_data,
+ .num_consumer_supplies = ARRAY_SIZE(touchbook_vsim_supply),
+ .consumer_supplies = touchbook_vsim_supply,
};
static struct twl4030_platform_data touchbook_twldata = {
- .irq_base = TWL4030_IRQ_BASE,
- .irq_end = TWL4030_IRQ_END,
-
/* platform_data for children goes here */
- .usb = &touchbook_usb_data,
.gpio = &touchbook_gpio_data,
- .codec = &touchbook_codec_data,
.vmmc1 = &touchbook_vmmc1,
.vsim = &touchbook_vsim,
- .vdac = &touchbook_vdac,
- .vpll2 = &touchbook_vpll2,
};
static struct i2c_board_info __initdata touchBook_i2c_boardinfo[] = {
@@ -270,8 +222,20 @@ static struct i2c_board_info __initdata touchBook_i2c_boardinfo[] = {
static int __init omap3_touchbook_i2c_init(void)
{
/* Standard TouchBook bus */
- omap3_pmic_init("twl4030", &touchbook_twldata);
+ omap3_pmic_get_config(&touchbook_twldata,
+ TWL_COMMON_PDATA_USB | TWL_COMMON_PDATA_AUDIO,
+ TWL_COMMON_REGULATOR_VDAC | TWL_COMMON_REGULATOR_VPLL2);
+
+ touchbook_twldata.vdac->num_consumer_supplies =
+ ARRAY_SIZE(touchbook_vdac_supply);
+ touchbook_twldata.vdac->consumer_supplies = touchbook_vdac_supply;
+ touchbook_twldata.vpll2->constraints.name = "VDVI";
+ touchbook_twldata.vpll2->num_consumer_supplies =
+ ARRAY_SIZE(touchbook_vdvi_supply);
+ touchbook_twldata.vpll2->consumer_supplies = touchbook_vdvi_supply;
+
+ omap3_pmic_init("twl4030", &touchbook_twldata);
/* Additional TouchBook bus */
omap_register_i2c_bus(3, 100, touchBook_i2c_boardinfo,
ARRAY_SIZE(touchBook_i2c_boardinfo));
@@ -371,10 +335,7 @@ static void __init omap3_touchbook_init_early(void)
static void __init omap3_touchbook_init_irq(void)
{
- omap_init_irq();
-#ifdef CONFIG_OMAP_32K_TIMER
- omap2_gp_clockevent_set_gptimer(12);
-#endif
+ omap3_init_irq();
}
static struct platform_device *omap3_touchbook_devices[] __initdata = {
@@ -449,5 +410,5 @@ MACHINE_START(TOUCHBOOK, "OMAP3 touchbook Board")
.init_early = omap3_touchbook_init_early,
.init_irq = omap3_touchbook_init_irq,
.init_machine = omap3_touchbook_init,
- .timer = &omap_timer,
+ .timer = &omap3_secure_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c
index 0cfe2005cb50..9aaa96057666 100644
--- a/arch/arm/mach-omap2/board-omap4panda.c
+++ b/arch/arm/mach-omap2/board-omap4panda.c
@@ -41,7 +41,6 @@
#include <plat/usb.h>
#include <plat/mmc.h>
#include <video/omap-panel-generic-dpi.h>
-#include "timer-gp.h"
#include "hsmmc.h"
#include "control.h"
@@ -155,14 +154,6 @@ static struct omap_musb_board_data musb_board_data = {
.power = 100,
};
-static struct twl4030_usb_data omap4_usbphy_data = {
- .phy_init = omap4430_phy_init,
- .phy_exit = omap4430_phy_exit,
- .phy_power = omap4430_phy_power,
- .phy_set_clock = omap4430_phy_set_clk,
- .phy_suspend = omap4430_phy_suspend,
-};
-
static struct omap2_hsmmc_info mmc[] = {
{
.mmc = 1,
@@ -182,24 +173,16 @@ static struct omap2_hsmmc_info mmc[] = {
{} /* Terminator */
};
-static struct regulator_consumer_supply omap4_panda_vmmc_supply[] = {
- {
- .supply = "vmmc",
- .dev_name = "omap_hsmmc.0",
- },
-};
-
-static struct regulator_consumer_supply omap4_panda_vmmc5_supply = {
- .supply = "vmmc",
- .dev_name = "omap_hsmmc.4",
+static struct regulator_consumer_supply omap4_panda_vmmc5_supply[] = {
+ REGULATOR_SUPPLY("vmmc", "omap_hsmmc.4"),
};
static struct regulator_init_data panda_vmmc5 = {
.constraints = {
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &omap4_panda_vmmc5_supply,
+ .num_consumer_supplies = ARRAY_SIZE(omap4_panda_vmmc5_supply),
+ .consumer_supplies = omap4_panda_vmmc5_supply,
};
static struct fixed_voltage_config panda_vwlan = {
@@ -274,128 +257,8 @@ static int __init omap4_twl6030_hsmmc_init(struct omap2_hsmmc_info *controllers)
return 0;
}
-static struct regulator_init_data omap4_panda_vaux2 = {
- .constraints = {
- .min_uV = 1200000,
- .max_uV = 2800000,
- .apply_uV = true,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
- | REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
-};
-
-static struct regulator_init_data omap4_panda_vaux3 = {
- .constraints = {
- .min_uV = 1000000,
- .max_uV = 3000000,
- .apply_uV = true,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
- | REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
-};
-
-/* VMMC1 for MMC1 card */
-static struct regulator_init_data omap4_panda_vmmc = {
- .constraints = {
- .min_uV = 1200000,
- .max_uV = 3000000,
- .apply_uV = true,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
- | REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = 1,
- .consumer_supplies = omap4_panda_vmmc_supply,
-};
-
-static struct regulator_init_data omap4_panda_vpp = {
- .constraints = {
- .min_uV = 1800000,
- .max_uV = 2500000,
- .apply_uV = true,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
- | REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
-};
-
-static struct regulator_init_data omap4_panda_vana = {
- .constraints = {
- .min_uV = 2100000,
- .max_uV = 2100000,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
-};
-
-static struct regulator_init_data omap4_panda_vcxio = {
- .constraints = {
- .min_uV = 1800000,
- .max_uV = 1800000,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
-};
-
-static struct regulator_init_data omap4_panda_vdac = {
- .constraints = {
- .min_uV = 1800000,
- .max_uV = 1800000,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
-};
-
-static struct regulator_init_data omap4_panda_vusb = {
- .constraints = {
- .min_uV = 3300000,
- .max_uV = 3300000,
- .apply_uV = true,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
-};
-
-static struct regulator_init_data omap4_panda_clk32kg = {
- .constraints = {
- .valid_ops_mask = REGULATOR_CHANGE_STATUS,
- },
-};
-
-static struct twl4030_platform_data omap4_panda_twldata = {
- .irq_base = TWL6030_IRQ_BASE,
- .irq_end = TWL6030_IRQ_END,
-
- /* Regulators */
- .vmmc = &omap4_panda_vmmc,
- .vpp = &omap4_panda_vpp,
- .vana = &omap4_panda_vana,
- .vcxio = &omap4_panda_vcxio,
- .vdac = &omap4_panda_vdac,
- .vusb = &omap4_panda_vusb,
- .vaux2 = &omap4_panda_vaux2,
- .vaux3 = &omap4_panda_vaux3,
- .clk32kg = &omap4_panda_clk32kg,
- .usb = &omap4_usbphy_data,
-};
+/* Panda board uses the common PMIC configuration */
+static struct twl4030_platform_data omap4_panda_twldata;
/*
* Display monitor features are burnt in their EEPROM as EDID data. The EEPROM
@@ -409,6 +272,16 @@ static struct i2c_board_info __initdata panda_i2c_eeprom[] = {
static int __init omap4_panda_i2c_init(void)
{
+ omap4_pmic_get_config(&omap4_panda_twldata, TWL_COMMON_PDATA_USB,
+ TWL_COMMON_REGULATOR_VDAC |
+ TWL_COMMON_REGULATOR_VAUX2 |
+ TWL_COMMON_REGULATOR_VAUX3 |
+ TWL_COMMON_REGULATOR_VMMC |
+ TWL_COMMON_REGULATOR_VPP |
+ TWL_COMMON_REGULATOR_VANA |
+ TWL_COMMON_REGULATOR_VCXIO |
+ TWL_COMMON_REGULATOR_VUSB |
+ TWL_COMMON_REGULATOR_CLK32KG);
omap4_pmic_init("twl6030", &omap4_panda_twldata);
omap_register_i2c_bus(2, 400, NULL, 0);
/*
@@ -716,5 +589,5 @@ MACHINE_START(OMAP4_PANDA, "OMAP4 Panda board")
.init_early = omap4_panda_init_early,
.init_irq = gic_init_irq,
.init_machine = omap4_panda_init,
- .timer = &omap_timer,
+ .timer = &omap4_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-overo.c b/arch/arm/mach-omap2/board-overo.c
index 175e1ab2b04d..f1f18d03d24c 100644
--- a/arch/arm/mach-omap2/board-overo.c
+++ b/arch/arm/mach-omap2/board-overo.c
@@ -74,15 +74,16 @@
defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE)
/* fixed regulator for ads7846 */
-static struct regulator_consumer_supply ads7846_supply =
- REGULATOR_SUPPLY("vcc", "spi1.0");
+static struct regulator_consumer_supply ads7846_supply[] = {
+ REGULATOR_SUPPLY("vcc", "spi1.0"),
+};
static struct regulator_init_data vads7846_regulator = {
.constraints = {
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &ads7846_supply,
+ .num_consumer_supplies = ARRAY_SIZE(ads7846_supply),
+ .consumer_supplies = ads7846_supply,
};
static struct fixed_voltage_config vads7846 = {
@@ -264,14 +265,6 @@ static struct omap_dss_board_info overo_dss_data = {
.default_device = &overo_dvi_device,
};
-static struct regulator_consumer_supply overo_vdda_dac_supply =
- REGULATOR_SUPPLY("vdda_dac", "omapdss_venc");
-
-static struct regulator_consumer_supply overo_vdds_dsi_supply[] = {
- REGULATOR_SUPPLY("vdds_dsi", "omapdss"),
- REGULATOR_SUPPLY("vdds_dsi", "omapdss_dsi1"),
-};
-
static struct mtd_partition overo_nand_partitions[] = {
{
.name = "xloader",
@@ -319,8 +312,8 @@ static struct omap2_hsmmc_info mmc[] = {
{} /* Terminator */
};
-static struct regulator_consumer_supply overo_vmmc1_supply = {
- .supply = "vmmc",
+static struct regulator_consumer_supply overo_vmmc1_supply[] = {
+ REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
};
#if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE)
@@ -415,8 +408,6 @@ static int overo_twl_gpio_setup(struct device *dev,
{
omap2_hsmmc_init(mmc);
- overo_vmmc1_supply.dev = mmc[0].dev;
-
#if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE)
/* TWL4030_GPIO_MAX + 1 == ledB, PMU_STAT (out, active low LED) */
gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1;
@@ -433,10 +424,6 @@ static struct twl4030_gpio_platform_data overo_gpio_data = {
.setup = overo_twl_gpio_setup,
};
-static struct twl4030_usb_data overo_usb_data = {
- .usb_mode = T2_USB_MODE_ULPI,
-};
-
static struct regulator_init_data overo_vmmc1 = {
.constraints = {
.min_uV = 1850000,
@@ -447,59 +434,23 @@ static struct regulator_init_data overo_vmmc1 = {
| REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &overo_vmmc1_supply,
-};
-
-/* VDAC for DSS driving S-Video (8 mA unloaded, max 65 mA) */
-static struct regulator_init_data overo_vdac = {
- .constraints = {
- .min_uV = 1800000,
- .max_uV = 1800000,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = 1,
- .consumer_supplies = &overo_vdda_dac_supply,
-};
-
-/* VPLL2 for digital video outputs */
-static struct regulator_init_data overo_vpll2 = {
- .constraints = {
- .name = "VDVI",
- .min_uV = 1800000,
- .max_uV = 1800000,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(overo_vdds_dsi_supply),
- .consumer_supplies = overo_vdds_dsi_supply,
-};
-
-static struct twl4030_codec_audio_data overo_audio_data;
-
-static struct twl4030_codec_data overo_codec_data = {
- .audio_mclk = 26000000,
- .audio = &overo_audio_data,
+ .num_consumer_supplies = ARRAY_SIZE(overo_vmmc1_supply),
+ .consumer_supplies = overo_vmmc1_supply,
};
static struct twl4030_platform_data overo_twldata = {
- .irq_base = TWL4030_IRQ_BASE,
- .irq_end = TWL4030_IRQ_END,
.gpio = &overo_gpio_data,
- .usb = &overo_usb_data,
- .codec = &overo_codec_data,
.vmmc1 = &overo_vmmc1,
- .vdac = &overo_vdac,
- .vpll2 = &overo_vpll2,
};
static int __init overo_i2c_init(void)
{
+ omap3_pmic_get_config(&overo_twldata,
+ TWL_COMMON_PDATA_USB | TWL_COMMON_PDATA_AUDIO,
+ TWL_COMMON_REGULATOR_VDAC | TWL_COMMON_REGULATOR_VPLL2);
+
+ overo_twldata.vpll2->constraints.name = "VDVI";
+
omap3_pmic_init("tps65950", &overo_twldata);
/* i2c2 pins are used for gpio */
omap_register_i2c_bus(3, 400, NULL, 0);
@@ -615,7 +566,7 @@ MACHINE_START(OVERO, "Gumstix Overo")
.reserve = omap_reserve,
.map_io = omap3_map_io,
.init_early = overo_init_early,
- .init_irq = omap_init_irq,
+ .init_irq = omap3_init_irq,
.init_machine = overo_init,
- .timer = &omap_timer,
+ .timer = &omap3_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-rm680.c b/arch/arm/mach-omap2/board-rm680.c
index 42d10b12da3c..7dfed24ee12e 100644
--- a/arch/arm/mach-omap2/board-rm680.c
+++ b/arch/arm/mach-omap2/board-rm680.c
@@ -79,20 +79,14 @@ static struct twl4030_gpio_platform_data rm680_gpio_data = {
.pulldowns = BIT(1) | BIT(2) | BIT(8) | BIT(15),
};
-static struct twl4030_usb_data rm680_usb_data = {
- .usb_mode = T2_USB_MODE_ULPI,
-};
-
static struct twl4030_platform_data rm680_twl_data = {
- .irq_base = TWL4030_IRQ_BASE,
- .irq_end = TWL4030_IRQ_END,
.gpio = &rm680_gpio_data,
- .usb = &rm680_usb_data,
/* add rest of the children here */
};
static void __init rm680_i2c_init(void)
{
+ omap3_pmic_get_config(&rm680_twl_data, TWL_COMMON_PDATA_USB, 0);
omap_pmic_init(1, 2900, "twl5031", INT_34XX_SYS_NIRQ, &rm680_twl_data);
omap_register_i2c_bus(2, 400, NULL, 0);
omap_register_i2c_bus(3, 400, NULL, 0);
@@ -163,7 +157,7 @@ MACHINE_START(NOKIA_RM680, "Nokia RM-680 board")
.reserve = omap_reserve,
.map_io = rm680_map_io,
.init_early = rm680_init_early,
- .init_irq = omap_init_irq,
+ .init_irq = omap3_init_irq,
.init_machine = rm680_init,
- .timer = &omap_timer,
+ .timer = &omap3_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index 88bd6f7705f0..bdb24db36004 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -288,10 +288,6 @@ static struct twl4030_keypad_data rx51_kp_data = {
.rep = 1,
};
-static struct twl4030_madc_platform_data rx51_madc_data = {
- .irq_line = 1,
-};
-
/* Enable input logic and pull all lines up when eMMC is on. */
static struct omap_board_mux rx51_mmc2_on_mux[] = {
OMAP3_MUX(SDMMC2_CMD, OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0),
@@ -358,14 +354,17 @@ static struct omap2_hsmmc_info mmc[] __initdata = {
{} /* Terminator */
};
-static struct regulator_consumer_supply rx51_vmmc1_supply =
- REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0");
+static struct regulator_consumer_supply rx51_vmmc1_supply[] = {
+ REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
+};
-static struct regulator_consumer_supply rx51_vaux3_supply =
- REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1");
+static struct regulator_consumer_supply rx51_vaux3_supply[] = {
+ REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1"),
+};
-static struct regulator_consumer_supply rx51_vsim_supply =
- REGULATOR_SUPPLY("vmmc_aux", "omap_hsmmc.1");
+static struct regulator_consumer_supply rx51_vsim_supply[] = {
+ REGULATOR_SUPPLY("vmmc_aux", "omap_hsmmc.1"),
+};
static struct regulator_consumer_supply rx51_vmmc2_supplies[] = {
/* tlv320aic3x analog supplies */
@@ -395,10 +394,6 @@ static struct regulator_consumer_supply rx51_vaux1_consumers[] = {
REGULATOR_SUPPLY("vdd", "2-0063"),
};
-static struct regulator_consumer_supply rx51_vdac_supply[] = {
- REGULATOR_SUPPLY("vdda_dac", "omapdss_venc"),
-};
-
static struct regulator_init_data rx51_vaux1 = {
.constraints = {
.name = "V28",
@@ -452,8 +447,8 @@ static struct regulator_init_data rx51_vaux3_mmc = {
| REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &rx51_vaux3_supply,
+ .num_consumer_supplies = ARRAY_SIZE(rx51_vaux3_supply),
+ .consumer_supplies = rx51_vaux3_supply,
};
static struct regulator_init_data rx51_vaux4 = {
@@ -479,8 +474,8 @@ static struct regulator_init_data rx51_vmmc1 = {
| REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &rx51_vmmc1_supply,
+ .num_consumer_supplies = ARRAY_SIZE(rx51_vmmc1_supply),
+ .consumer_supplies = rx51_vmmc1_supply,
};
static struct regulator_init_data rx51_vmmc2 = {
@@ -511,23 +506,8 @@ static struct regulator_init_data rx51_vsim = {
.valid_ops_mask = REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &rx51_vsim_supply,
-};
-
-static struct regulator_init_data rx51_vdac = {
- .constraints = {
- .name = "VDAC",
- .min_uV = 1800000,
- .max_uV = 1800000,
- .apply_uV = true,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = 1,
- .consumer_supplies = rx51_vdac_supply,
+ .num_consumer_supplies = ARRAY_SIZE(rx51_vsim_supply),
+ .consumer_supplies = rx51_vsim_supply,
};
static struct regulator_init_data rx51_vio = {
@@ -600,10 +580,6 @@ static struct twl4030_gpio_platform_data rx51_gpio_data = {
.setup = rx51_twlgpio_setup,
};
-static struct twl4030_usb_data rx51_usb_data = {
- .usb_mode = T2_USB_MODE_ULPI,
-};
-
static struct twl4030_ins sleep_on_seq[] __initdata = {
/*
* Turn off everything
@@ -775,14 +751,9 @@ struct twl4030_codec_data rx51_codec_data __initdata = {
};
static struct twl4030_platform_data rx51_twldata __initdata = {
- .irq_base = TWL4030_IRQ_BASE,
- .irq_end = TWL4030_IRQ_END,
-
/* platform_data for children goes here */
.gpio = &rx51_gpio_data,
.keypad = &rx51_kp_data,
- .madc = &rx51_madc_data,
- .usb = &rx51_usb_data,
.power = &rx51_t2scripts_data,
.codec = &rx51_codec_data,
@@ -791,7 +762,6 @@ static struct twl4030_platform_data rx51_twldata __initdata = {
.vaux4 = &rx51_vaux4,
.vmmc1 = &rx51_vmmc1,
.vsim = &rx51_vsim,
- .vdac = &rx51_vdac,
.vio = &rx51_vio,
};
@@ -847,6 +817,13 @@ static int __init rx51_i2c_init(void)
rx51_twldata.vaux3 = &rx51_vaux3_cam;
}
rx51_twldata.vmmc2 = &rx51_vmmc2;
+ omap3_pmic_get_config(&rx51_twldata,
+ TWL_COMMON_PDATA_USB | TWL_COMMON_PDATA_MADC,
+ TWL_COMMON_REGULATOR_VDAC);
+
+ rx51_twldata.vdac->constraints.apply_uV = true;
+ rx51_twldata.vdac->constraints.name = "VDAC";
+
omap_pmic_init(1, 2200, "twl5030", INT_34XX_SYS_NIRQ, &rx51_twldata);
omap_register_i2c_bus(2, 100, rx51_peripherals_i2c_board_info_2,
ARRAY_SIZE(rx51_peripherals_i2c_board_info_2));
diff --git a/arch/arm/mach-omap2/board-rx51.c b/arch/arm/mach-omap2/board-rx51.c
index fec4cac8fa0a..5ea142f9bc97 100644
--- a/arch/arm/mach-omap2/board-rx51.c
+++ b/arch/arm/mach-omap2/board-rx51.c
@@ -160,7 +160,7 @@ MACHINE_START(NOKIA_RX51, "Nokia RX-51 board")
.reserve = rx51_reserve,
.map_io = rx51_map_io,
.init_early = rx51_init_early,
- .init_irq = omap_init_irq,
+ .init_irq = omap3_init_irq,
.init_machine = rx51_init,
- .timer = &omap_timer,
+ .timer = &omap3_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-ti8168evm.c b/arch/arm/mach-omap2/board-ti8168evm.c
index 09fa7bfff8d6..a85d5b0b11da 100644
--- a/arch/arm/mach-omap2/board-ti8168evm.c
+++ b/arch/arm/mach-omap2/board-ti8168evm.c
@@ -33,11 +33,6 @@ static void __init ti8168_init_early(void)
omap2_init_common_devices(NULL, NULL);
}
-static void __init ti8168_evm_init_irq(void)
-{
- omap_init_irq();
-}
-
static void __init ti8168_evm_init(void)
{
omap_serial_init();
@@ -56,7 +51,7 @@ MACHINE_START(TI8168EVM, "ti8168evm")
.boot_params = 0x80000100,
.map_io = ti8168_evm_map_io,
.init_early = ti8168_init_early,
- .init_irq = ti8168_evm_init_irq,
- .timer = &omap_timer,
+ .init_irq = ti816x_init_irq,
+ .timer = &omap3_timer,
.init_machine = ti8168_evm_init,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-zoom-peripherals.c b/arch/arm/mach-omap2/board-zoom-peripherals.c
index 118c6f53c5eb..13a644233667 100644
--- a/arch/arm/mach-omap2/board-zoom-peripherals.c
+++ b/arch/arm/mach-omap2/board-zoom-peripherals.c
@@ -105,21 +105,20 @@ static struct twl4030_keypad_data zoom_kp_twl4030_data = {
.rep = 1,
};
-static struct regulator_consumer_supply zoom_vmmc1_supply = {
- .supply = "vmmc",
+static struct regulator_consumer_supply zoom_vmmc1_supply[] = {
+ REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
};
-static struct regulator_consumer_supply zoom_vsim_supply = {
- .supply = "vmmc_aux",
+static struct regulator_consumer_supply zoom_vsim_supply[] = {
+ REGULATOR_SUPPLY("vmmc_aux", "omap_hsmmc.0"),
};
-static struct regulator_consumer_supply zoom_vmmc2_supply = {
- .supply = "vmmc",
+static struct regulator_consumer_supply zoom_vmmc2_supply[] = {
+ REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1"),
};
-static struct regulator_consumer_supply zoom_vmmc3_supply = {
- .supply = "vmmc",
- .dev_name = "omap_hsmmc.2",
+static struct regulator_consumer_supply zoom_vmmc3_supply[] = {
+ REGULATOR_SUPPLY("vmmc", "omap_hsmmc.2"),
};
/* VMMC1 for OMAP VDD_MMC1 (i/o) and MMC1 card */
@@ -133,8 +132,8 @@ static struct regulator_init_data zoom_vmmc1 = {
| REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &zoom_vmmc1_supply,
+ .num_consumer_supplies = ARRAY_SIZE(zoom_vmmc1_supply),
+ .consumer_supplies = zoom_vmmc1_supply,
};
/* VMMC2 for MMC2 card */
@@ -148,8 +147,8 @@ static struct regulator_init_data zoom_vmmc2 = {
.valid_ops_mask = REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &zoom_vmmc2_supply,
+ .num_consumer_supplies = ARRAY_SIZE(zoom_vmmc2_supply),
+ .consumer_supplies = zoom_vmmc2_supply,
};
/* VSIM for OMAP VDD_MMC1A (i/o for DAT4..DAT7) */
@@ -163,16 +162,16 @@ static struct regulator_init_data zoom_vsim = {
| REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &zoom_vsim_supply,
+ .num_consumer_supplies = ARRAY_SIZE(zoom_vsim_supply),
+ .consumer_supplies = zoom_vsim_supply,
};
static struct regulator_init_data zoom_vmmc3 = {
.constraints = {
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &zoom_vmmc3_supply,
+ .num_consumer_supplies = ARRAY_SIZE(zoom_vmmc3_supply),
+ .consumer_supplies = zoom_vmmc3_supply,
};
static struct fixed_voltage_config zoom_vwlan = {
@@ -227,40 +226,6 @@ static struct omap2_hsmmc_info mmc[] = {
{} /* Terminator */
};
-static struct regulator_consumer_supply zoom_vpll2_supplies[] = {
- REGULATOR_SUPPLY("vdds_dsi", "omapdss"),
- REGULATOR_SUPPLY("vdds_dsi", "omapdss_dsi1"),
-};
-
-static struct regulator_consumer_supply zoom_vdda_dac_supply =
- REGULATOR_SUPPLY("vdda_dac", "omapdss_venc");
-
-static struct regulator_init_data zoom_vpll2 = {
- .constraints = {
- .min_uV = 1800000,
- .max_uV = 1800000,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(zoom_vpll2_supplies),
- .consumer_supplies = zoom_vpll2_supplies,
-};
-
-static struct regulator_init_data zoom_vdac = {
- .constraints = {
- .min_uV = 1800000,
- .max_uV = 1800000,
- .valid_modes_mask = REGULATOR_MODE_NORMAL
- | REGULATOR_MODE_STANDBY,
- .valid_ops_mask = REGULATOR_CHANGE_MODE
- | REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = 1,
- .consumer_supplies = &zoom_vdda_dac_supply,
-};
-
static int zoom_twl_gpio_setup(struct device *dev,
unsigned gpio, unsigned ngpio)
{
@@ -270,13 +235,6 @@ static int zoom_twl_gpio_setup(struct device *dev,
mmc[0].gpio_cd = gpio + 0;
omap2_hsmmc_init(mmc);
- /* link regulators to MMC adapters ... we "know" the
- * regulators will be set up only *after* we return.
- */
- zoom_vmmc1_supply.dev = mmc[0].dev;
- zoom_vsim_supply.dev = mmc[0].dev;
- zoom_vmmc2_supply.dev = mmc[1].dev;
-
ret = gpio_request_one(LCD_PANEL_ENABLE_GPIO, GPIOF_OUT_INIT_LOW,
"lcd enable");
if (ret)
@@ -292,26 +250,6 @@ static void zoom2_set_hs_extmute(int mute)
gpio_set_value(ZOOM2_HEADSET_EXTMUTE_GPIO, mute);
}
-static int zoom_batt_table[] = {
-/* 0 C*/
-30800, 29500, 28300, 27100,
-26000, 24900, 23900, 22900, 22000, 21100, 20300, 19400, 18700, 17900,
-17200, 16500, 15900, 15300, 14700, 14100, 13600, 13100, 12600, 12100,
-11600, 11200, 10800, 10400, 10000, 9630, 9280, 8950, 8620, 8310,
-8020, 7730, 7460, 7200, 6950, 6710, 6470, 6250, 6040, 5830,
-5640, 5450, 5260, 5090, 4920, 4760, 4600, 4450, 4310, 4170,
-4040, 3910, 3790, 3670, 3550
-};
-
-static struct twl4030_bci_platform_data zoom_bci_data = {
- .battery_tmp_tbl = zoom_batt_table,
- .tblsize = ARRAY_SIZE(zoom_batt_table),
-};
-
-static struct twl4030_usb_data zoom_usb_data = {
- .usb_mode = T2_USB_MODE_ULPI,
-};
-
static struct twl4030_gpio_platform_data zoom_gpio_data = {
.gpio_base = OMAP_MAX_GPIO_LINES,
.irq_base = TWL4030_GPIO_IRQ_BASE,
@@ -319,41 +257,29 @@ static struct twl4030_gpio_platform_data zoom_gpio_data = {
.setup = zoom_twl_gpio_setup,
};
-static struct twl4030_madc_platform_data zoom_madc_data = {
- .irq_line = 1,
-};
-
-static struct twl4030_codec_audio_data zoom_audio_data;
-
-static struct twl4030_codec_data zoom_codec_data = {
- .audio_mclk = 26000000,
- .audio = &zoom_audio_data,
-};
-
static struct twl4030_platform_data zoom_twldata = {
- .irq_base = TWL4030_IRQ_BASE,
- .irq_end = TWL4030_IRQ_END,
-
/* platform_data for children goes here */
- .bci = &zoom_bci_data,
- .madc = &zoom_madc_data,
- .usb = &zoom_usb_data,
.gpio = &zoom_gpio_data,
.keypad = &zoom_kp_twl4030_data,
- .codec = &zoom_codec_data,
.vmmc1 = &zoom_vmmc1,
.vmmc2 = &zoom_vmmc2,
.vsim = &zoom_vsim,
- .vpll2 = &zoom_vpll2,
- .vdac = &zoom_vdac,
};
static int __init omap_i2c_init(void)
{
+ omap3_pmic_get_config(&zoom_twldata,
+ TWL_COMMON_PDATA_USB | TWL_COMMON_PDATA_BCI |
+ TWL_COMMON_PDATA_MADC | TWL_COMMON_PDATA_AUDIO,
+ TWL_COMMON_REGULATOR_VDAC | TWL_COMMON_REGULATOR_VPLL2);
+
if (machine_is_omap_zoom2()) {
- zoom_audio_data.ramp_delay_value = 3; /* 161 ms */
- zoom_audio_data.hs_extmute = 1;
- zoom_audio_data.set_hs_extmute = zoom2_set_hs_extmute;
+ struct twl4030_codec_audio_data *audio_data;
+ audio_data = zoom_twldata.codec->audio;
+
+ audio_data->ramp_delay_value = 3; /* 161 ms */
+ audio_data->hs_extmute = 1;
+ audio_data->set_hs_extmute = zoom2_set_hs_extmute;
}
omap_pmic_init(1, 2400, "twl5030", INT_34XX_SYS_NIRQ, &zoom_twldata);
omap_register_i2c_bus(2, 400, NULL, 0);
diff --git a/arch/arm/mach-omap2/board-zoom.c b/arch/arm/mach-omap2/board-zoom.c
index 4b133d75c935..8a98c3c303fc 100644
--- a/arch/arm/mach-omap2/board-zoom.c
+++ b/arch/arm/mach-omap2/board-zoom.c
@@ -137,9 +137,9 @@ MACHINE_START(OMAP_ZOOM2, "OMAP Zoom2 board")
.reserve = omap_reserve,
.map_io = omap3_map_io,
.init_early = omap_zoom_init_early,
- .init_irq = omap_init_irq,
+ .init_irq = omap3_init_irq,
.init_machine = omap_zoom_init,
- .timer = &omap_timer,
+ .timer = &omap3_timer,
MACHINE_END
MACHINE_START(OMAP_ZOOM3, "OMAP Zoom3 board")
@@ -147,7 +147,7 @@ MACHINE_START(OMAP_ZOOM3, "OMAP Zoom3 board")
.reserve = omap_reserve,
.map_io = omap3_map_io,
.init_early = omap_zoom_init_early,
- .init_irq = omap_init_irq,
+ .init_irq = omap3_init_irq,
.init_machine = omap_zoom_init,
- .timer = &omap_timer,
+ .timer = &omap3_timer,
MACHINE_END
diff --git a/arch/arm/mach-omap2/clock44xx.h b/arch/arm/mach-omap2/clock44xx.h
index 6be1095936db..7ceb870e7ab8 100644
--- a/arch/arm/mach-omap2/clock44xx.h
+++ b/arch/arm/mach-omap2/clock44xx.h
@@ -8,13 +8,6 @@
#ifndef __ARCH_ARM_MACH_OMAP2_CLOCK44XX_H
#define __ARCH_ARM_MACH_OMAP2_CLOCK44XX_H
-/*
- * XXX Missing values for the OMAP4 DPLL_USB
- * XXX Missing min_multiplier values for all OMAP4 DPLLs
- */
-#define OMAP4430_MAX_DPLL_MULT 2047
-#define OMAP4430_MAX_DPLL_DIV 128
-
int omap4xxx_clk_init(void);
#endif
diff --git a/arch/arm/mach-omap2/clock44xx_data.c b/arch/arm/mach-omap2/clock44xx_data.c
index 8c965671b4d4..044df38f65ce 100644
--- a/arch/arm/mach-omap2/clock44xx_data.c
+++ b/arch/arm/mach-omap2/clock44xx_data.c
@@ -53,9 +53,9 @@ static struct clk extalt_clkin_ck = {
static struct clk pad_clks_ck = {
.name = "pad_clks_ck",
.rate = 12000000,
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_CLKSEL_ABE,
- .enable_bit = OMAP4430_PAD_CLKS_GATE_SHIFT,
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_CLKSEL_ABE,
+ .enable_bit = OMAP4430_PAD_CLKS_GATE_SHIFT,
};
static struct clk pad_slimbus_core_clks_ck = {
@@ -73,9 +73,9 @@ static struct clk secure_32k_clk_src_ck = {
static struct clk slimbus_clk = {
.name = "slimbus_clk",
.rate = 12000000,
- .ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_CLKSEL_ABE,
- .enable_bit = OMAP4430_SLIMBUS_CLK_GATE_SHIFT,
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_CLKSEL_ABE,
+ .enable_bit = OMAP4430_SLIMBUS_CLK_GATE_SHIFT,
};
static struct clk sys_32k_ck = {
@@ -258,8 +258,8 @@ static struct dpll_data dpll_abe_dd = {
.enable_mask = OMAP4430_DPLL_EN_MASK,
.autoidle_mask = OMAP4430_AUTO_DPLL_MODE_MASK,
.idlest_mask = OMAP4430_ST_DPLL_CLK_MASK,
- .max_multiplier = OMAP4430_MAX_DPLL_MULT,
- .max_divider = OMAP4430_MAX_DPLL_DIV,
+ .max_multiplier = 2047,
+ .max_divider = 128,
.min_divider = 1,
};
@@ -278,10 +278,10 @@ static struct clk dpll_abe_ck = {
static struct clk dpll_abe_x2_ck = {
.name = "dpll_abe_x2_ck",
.parent = &dpll_abe_ck,
+ .clksel_reg = OMAP4430_CM_DIV_M2_DPLL_ABE,
.flags = CLOCK_CLKOUTX2,
.ops = &clkops_omap4_dpllmx_ops,
.recalc = &omap3_clkoutx2_recalc,
- .clksel_reg = OMAP4430_CM_DIV_M2_DPLL_ABE,
};
static const struct clksel_rate div31_1to31_rates[] = {
@@ -434,8 +434,8 @@ static struct dpll_data dpll_core_dd = {
.enable_mask = OMAP4430_DPLL_EN_MASK,
.autoidle_mask = OMAP4430_AUTO_DPLL_MODE_MASK,
.idlest_mask = OMAP4430_ST_DPLL_CLK_MASK,
- .max_multiplier = OMAP4430_MAX_DPLL_MULT,
- .max_divider = OMAP4430_MAX_DPLL_DIV,
+ .max_multiplier = 2047,
+ .max_divider = 128,
.min_divider = 1,
};
@@ -622,11 +622,11 @@ static struct clk dpll_core_m3x2_ck = {
.clksel_reg = OMAP4430_CM_DIV_M3_DPLL_CORE,
.clksel_mask = OMAP4430_DPLL_CLKOUTHIF_DIV_MASK,
.ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_DIV_M3_DPLL_CORE,
- .enable_bit = OMAP4430_DPLL_CLKOUTHIF_GATE_CTRL_SHIFT,
.recalc = &omap2_clksel_recalc,
.round_rate = &omap2_clksel_round_rate,
.set_rate = &omap2_clksel_set_rate,
+ .enable_reg = OMAP4430_CM_DIV_M3_DPLL_CORE,
+ .enable_bit = OMAP4430_DPLL_CLKOUTHIF_GATE_CTRL_SHIFT,
};
static struct clk dpll_core_m7x2_ck = {
@@ -672,8 +672,8 @@ static struct dpll_data dpll_iva_dd = {
.enable_mask = OMAP4430_DPLL_EN_MASK,
.autoidle_mask = OMAP4430_AUTO_DPLL_MODE_MASK,
.idlest_mask = OMAP4430_ST_DPLL_CLK_MASK,
- .max_multiplier = OMAP4430_MAX_DPLL_MULT,
- .max_divider = OMAP4430_MAX_DPLL_DIV,
+ .max_multiplier = 2047,
+ .max_divider = 128,
.min_divider = 1,
};
@@ -740,8 +740,8 @@ static struct dpll_data dpll_mpu_dd = {
.enable_mask = OMAP4430_DPLL_EN_MASK,
.autoidle_mask = OMAP4430_AUTO_DPLL_MODE_MASK,
.idlest_mask = OMAP4430_ST_DPLL_CLK_MASK,
- .max_multiplier = OMAP4430_MAX_DPLL_MULT,
- .max_divider = OMAP4430_MAX_DPLL_DIV,
+ .max_multiplier = 2047,
+ .max_divider = 128,
.min_divider = 1,
};
@@ -813,8 +813,8 @@ static struct dpll_data dpll_per_dd = {
.enable_mask = OMAP4430_DPLL_EN_MASK,
.autoidle_mask = OMAP4430_AUTO_DPLL_MODE_MASK,
.idlest_mask = OMAP4430_ST_DPLL_CLK_MASK,
- .max_multiplier = OMAP4430_MAX_DPLL_MULT,
- .max_divider = OMAP4430_MAX_DPLL_DIV,
+ .max_multiplier = 2047,
+ .max_divider = 128,
.min_divider = 1,
};
@@ -850,10 +850,10 @@ static struct clk dpll_per_m2_ck = {
static struct clk dpll_per_x2_ck = {
.name = "dpll_per_x2_ck",
.parent = &dpll_per_ck,
+ .clksel_reg = OMAP4430_CM_DIV_M2_DPLL_PER,
.flags = CLOCK_CLKOUTX2,
.ops = &clkops_omap4_dpllmx_ops,
.recalc = &omap3_clkoutx2_recalc,
- .clksel_reg = OMAP4430_CM_DIV_M2_DPLL_PER,
};
static const struct clksel dpll_per_m2x2_div[] = {
@@ -880,11 +880,11 @@ static struct clk dpll_per_m3x2_ck = {
.clksel_reg = OMAP4430_CM_DIV_M3_DPLL_PER,
.clksel_mask = OMAP4430_DPLL_CLKOUTHIF_DIV_MASK,
.ops = &clkops_omap2_dflt,
- .enable_reg = OMAP4430_CM_DIV_M3_DPLL_PER,
- .enable_bit = OMAP4430_DPLL_CLKOUTHIF_GATE_CTRL_SHIFT,
.recalc = &omap2_clksel_recalc,
.round_rate = &omap2_clksel_round_rate,
.set_rate = &omap2_clksel_set_rate,
+ .enable_reg = OMAP4430_CM_DIV_M3_DPLL_PER,
+ .enable_bit = OMAP4430_DPLL_CLKOUTHIF_GATE_CTRL_SHIFT,
};
static struct clk dpll_per_m4x2_ck = {
@@ -935,63 +935,6 @@ static struct clk dpll_per_m7x2_ck = {
.set_rate = &omap2_clksel_set_rate,
};
-/* DPLL_UNIPRO */
-static struct dpll_data dpll_unipro_dd = {
- .mult_div1_reg = OMAP4430_CM_CLKSEL_DPLL_UNIPRO,
- .clk_bypass = &sys_clkin_ck,
- .clk_ref = &sys_clkin_ck,
- .control_reg = OMAP4430_CM_CLKMODE_DPLL_UNIPRO,
- .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
- .autoidle_reg = OMAP4430_CM_AUTOIDLE_DPLL_UNIPRO,
- .idlest_reg = OMAP4430_CM_IDLEST_DPLL_UNIPRO,
- .mult_mask = OMAP4430_DPLL_MULT_MASK,
- .div1_mask = OMAP4430_DPLL_DIV_MASK,
- .enable_mask = OMAP4430_DPLL_EN_MASK,
- .autoidle_mask = OMAP4430_AUTO_DPLL_MODE_MASK,
- .idlest_mask = OMAP4430_ST_DPLL_CLK_MASK,
- .sddiv_mask = OMAP4430_DPLL_SD_DIV_MASK,
- .max_multiplier = OMAP4430_MAX_DPLL_MULT,
- .max_divider = OMAP4430_MAX_DPLL_DIV,
- .min_divider = 1,
-};
-
-
-static struct clk dpll_unipro_ck = {
- .name = "dpll_unipro_ck",
- .parent = &sys_clkin_ck,
- .dpll_data = &dpll_unipro_dd,
- .init = &omap2_init_dpll_parent,
- .ops = &clkops_omap3_noncore_dpll_ops,
- .recalc = &omap3_dpll_recalc,
- .round_rate = &omap2_dpll_round_rate,
- .set_rate = &omap3_noncore_dpll_set_rate,
-};
-
-static struct clk dpll_unipro_x2_ck = {
- .name = "dpll_unipro_x2_ck",
- .parent = &dpll_unipro_ck,
- .flags = CLOCK_CLKOUTX2,
- .ops = &clkops_null,
- .recalc = &omap3_clkoutx2_recalc,
-};
-
-static const struct clksel dpll_unipro_m2x2_div[] = {
- { .parent = &dpll_unipro_x2_ck, .rates = div31_1to31_rates },
- { .parent = NULL },
-};
-
-static struct clk dpll_unipro_m2x2_ck = {
- .name = "dpll_unipro_m2x2_ck",
- .parent = &dpll_unipro_x2_ck,
- .clksel = dpll_unipro_m2x2_div,
- .clksel_reg = OMAP4430_CM_DIV_M2_DPLL_UNIPRO,
- .clksel_mask = OMAP4430_DPLL_CLKOUT_DIV_MASK,
- .ops = &clkops_omap4_dpllmx_ops,
- .recalc = &omap2_clksel_recalc,
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap2_clksel_set_rate,
-};
-
static struct clk usb_hs_clk_div_ck = {
.name = "usb_hs_clk_div_ck",
.parent = &dpll_abe_m3x2_ck,
@@ -1015,8 +958,9 @@ static struct dpll_data dpll_usb_dd = {
.enable_mask = OMAP4430_DPLL_EN_MASK,
.autoidle_mask = OMAP4430_AUTO_DPLL_MODE_MASK,
.idlest_mask = OMAP4430_ST_DPLL_CLK_MASK,
- .max_multiplier = OMAP4430_MAX_DPLL_MULT,
- .max_divider = OMAP4430_MAX_DPLL_DIV,
+ .sddiv_mask = OMAP4430_DPLL_SD_DIV_MASK,
+ .max_multiplier = 4095,
+ .max_divider = 256,
.min_divider = 1,
};
@@ -1035,8 +979,8 @@ static struct clk dpll_usb_ck = {
static struct clk dpll_usb_clkdcoldo_ck = {
.name = "dpll_usb_clkdcoldo_ck",
.parent = &dpll_usb_ck,
- .ops = &clkops_omap4_dpllmx_ops,
.clksel_reg = OMAP4430_CM_CLKDCOLDO_DPLL_USB,
+ .ops = &clkops_omap4_dpllmx_ops,
.recalc = &followparent_recalc,
};
@@ -1169,19 +1113,6 @@ static struct clk func_96m_fclk = {
.set_rate = &omap2_clksel_set_rate,
};
-static const struct clksel hsmmc6_fclk_sel[] = {
- { .parent = &func_64m_fclk, .rates = div_1_0_rates },
- { .parent = &func_96m_fclk, .rates = div_1_1_rates },
- { .parent = NULL },
-};
-
-static struct clk hsmmc6_fclk = {
- .name = "hsmmc6_fclk",
- .parent = &func_64m_fclk,
- .ops = &clkops_null,
- .recalc = &followparent_recalc,
-};
-
static const struct clksel_rate div2_1to8_rates[] = {
{ .div = 1, .val = 0, .flags = RATE_IN_4430 },
{ .div = 8, .val = 1, .flags = RATE_IN_4430 },
@@ -1264,6 +1195,21 @@ static struct clk l4_wkup_clk_mux_ck = {
.recalc = &omap2_clksel_recalc,
};
+static struct clk ocp_abe_iclk = {
+ .name = "ocp_abe_iclk",
+ .parent = &aess_fclk,
+ .ops = &clkops_null,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk per_abe_24m_fclk = {
+ .name = "per_abe_24m_fclk",
+ .parent = &dpll_abe_m2_ck,
+ .ops = &clkops_null,
+ .fixed_div = 4,
+ .recalc = &omap_fixed_divisor_recalc,
+};
+
static const struct clksel per_abe_nc_fclk_div[] = {
{ .parent = &dpll_abe_m2_ck, .rates = div2_1to2_rates },
{ .parent = NULL },
@@ -1281,41 +1227,6 @@ static struct clk per_abe_nc_fclk = {
.set_rate = &omap2_clksel_set_rate,
};
-static const struct clksel mcasp2_fclk_sel[] = {
- { .parent = &func_96m_fclk, .rates = div_1_0_rates },
- { .parent = &per_abe_nc_fclk, .rates = div_1_1_rates },
- { .parent = NULL },
-};
-
-static struct clk mcasp2_fclk = {
- .name = "mcasp2_fclk",
- .parent = &func_96m_fclk,
- .ops = &clkops_null,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mcasp3_fclk = {
- .name = "mcasp3_fclk",
- .parent = &func_96m_fclk,
- .ops = &clkops_null,
- .recalc = &followparent_recalc,
-};
-
-static struct clk ocp_abe_iclk = {
- .name = "ocp_abe_iclk",
- .parent = &aess_fclk,
- .ops = &clkops_null,
- .recalc = &followparent_recalc,
-};
-
-static struct clk per_abe_24m_fclk = {
- .name = "per_abe_24m_fclk",
- .parent = &dpll_abe_m2_ck,
- .ops = &clkops_null,
- .fixed_div = 4,
- .recalc = &omap_fixed_divisor_recalc,
-};
-
static const struct clksel pmd_stm_clock_mux_sel[] = {
{ .parent = &sys_clkin_ck, .rates = div_1_0_rates },
{ .parent = &dpll_core_m6x2_ck, .rates = div_1_1_rates },
@@ -1846,8 +1757,8 @@ static struct clk l3_instr_ick = {
.ops = &clkops_omap2_dflt,
.enable_reg = OMAP4430_CM_L3INSTR_L3_INSTR_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_HWCTRL,
- .clkdm_name = "l3_instr_clkdm",
.flags = ENABLE_ON_INIT,
+ .clkdm_name = "l3_instr_clkdm",
.parent = &l3_div_ck,
.recalc = &followparent_recalc,
};
@@ -1857,8 +1768,8 @@ static struct clk l3_main_3_ick = {
.ops = &clkops_omap2_dflt,
.enable_reg = OMAP4430_CM_L3INSTR_L3_3_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_HWCTRL,
- .clkdm_name = "l3_instr_clkdm",
.flags = ENABLE_ON_INIT,
+ .clkdm_name = "l3_instr_clkdm",
.parent = &l3_div_ck,
.recalc = &followparent_recalc,
};
@@ -1995,10 +1906,16 @@ static struct clk mcbsp3_fck = {
.clkdm_name = "abe_clkdm",
};
+static const struct clksel mcbsp4_sync_mux_sel[] = {
+ { .parent = &func_96m_fclk, .rates = div_1_0_rates },
+ { .parent = &per_abe_nc_fclk, .rates = div_1_1_rates },
+ { .parent = NULL },
+};
+
static struct clk mcbsp4_sync_mux_ck = {
.name = "mcbsp4_sync_mux_ck",
.parent = &func_96m_fclk,
- .clksel = mcasp2_fclk_sel,
+ .clksel = mcbsp4_sync_mux_sel,
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP4430_CM_L4PER_MCBSP4_CLKCTRL,
.clksel_mask = OMAP4430_CLKSEL_INTERNAL_SOURCE_MASK,
@@ -2077,11 +1994,17 @@ static struct clk mcspi4_fck = {
.recalc = &followparent_recalc,
};
+static const struct clksel hsmmc1_fclk_sel[] = {
+ { .parent = &func_64m_fclk, .rates = div_1_0_rates },
+ { .parent = &func_96m_fclk, .rates = div_1_1_rates },
+ { .parent = NULL },
+};
+
/* Merged hsmmc1_fclk into mmc1 */
static struct clk mmc1_fck = {
.name = "mmc1_fck",
.parent = &func_64m_fclk,
- .clksel = hsmmc6_fclk_sel,
+ .clksel = hsmmc1_fclk_sel,
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP4430_CM_L3INIT_MMC1_CLKCTRL,
.clksel_mask = OMAP4430_CLKSEL_MASK,
@@ -2096,7 +2019,7 @@ static struct clk mmc1_fck = {
static struct clk mmc2_fck = {
.name = "mmc2_fck",
.parent = &func_64m_fclk,
- .clksel = hsmmc6_fclk_sel,
+ .clksel = hsmmc1_fclk_sel,
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP4430_CM_L3INIT_MMC2_CLKCTRL,
.clksel_mask = OMAP4430_CLKSEL_MASK,
@@ -2162,8 +2085,8 @@ static struct clk ocp_wp_noc_ick = {
.ops = &clkops_omap2_dflt,
.enable_reg = OMAP4430_CM_L3INSTR_OCP_WP1_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_HWCTRL,
- .clkdm_name = "l3_instr_clkdm",
.flags = ENABLE_ON_INIT,
+ .clkdm_name = "l3_instr_clkdm",
.parent = &l3_div_ck,
.recalc = &followparent_recalc,
};
@@ -2895,6 +2818,7 @@ static struct clk auxclk2_ck = {
.enable_reg = OMAP4_SCRM_AUXCLK2,
.enable_bit = OMAP4_ENABLE_SHIFT,
};
+
static struct clk auxclk3_ck = {
.name = "auxclk3_ck",
.parent = &sys_clkin_ck,
@@ -3077,9 +3001,6 @@ static struct omap_clk omap44xx_clks[] = {
CLK(NULL, "dpll_per_m5x2_ck", &dpll_per_m5x2_ck, CK_443X),
CLK(NULL, "dpll_per_m6x2_ck", &dpll_per_m6x2_ck, CK_443X),
CLK(NULL, "dpll_per_m7x2_ck", &dpll_per_m7x2_ck, CK_443X),
- CLK(NULL, "dpll_unipro_ck", &dpll_unipro_ck, CK_443X),
- CLK(NULL, "dpll_unipro_x2_ck", &dpll_unipro_x2_ck, CK_443X),
- CLK(NULL, "dpll_unipro_m2x2_ck", &dpll_unipro_m2x2_ck, CK_443X),
CLK(NULL, "usb_hs_clk_div_ck", &usb_hs_clk_div_ck, CK_443X),
CLK(NULL, "dpll_usb_ck", &dpll_usb_ck, CK_443X),
CLK(NULL, "dpll_usb_clkdcoldo_ck", &dpll_usb_clkdcoldo_ck, CK_443X),
@@ -3092,17 +3013,14 @@ static struct omap_clk omap44xx_clks[] = {
CLK(NULL, "func_48mc_fclk", &func_48mc_fclk, CK_443X),
CLK(NULL, "func_64m_fclk", &func_64m_fclk, CK_443X),
CLK(NULL, "func_96m_fclk", &func_96m_fclk, CK_443X),
- CLK(NULL, "hsmmc6_fclk", &hsmmc6_fclk, CK_443X),
CLK(NULL, "init_60m_fclk", &init_60m_fclk, CK_443X),
CLK(NULL, "l3_div_ck", &l3_div_ck, CK_443X),
CLK(NULL, "l4_div_ck", &l4_div_ck, CK_443X),
CLK(NULL, "lp_clk_div_ck", &lp_clk_div_ck, CK_443X),
CLK(NULL, "l4_wkup_clk_mux_ck", &l4_wkup_clk_mux_ck, CK_443X),
- CLK(NULL, "per_abe_nc_fclk", &per_abe_nc_fclk, CK_443X),
- CLK(NULL, "mcasp2_fclk", &mcasp2_fclk, CK_443X),
- CLK(NULL, "mcasp3_fclk", &mcasp3_fclk, CK_443X),
CLK(NULL, "ocp_abe_iclk", &ocp_abe_iclk, CK_443X),
CLK(NULL, "per_abe_24m_fclk", &per_abe_24m_fclk, CK_443X),
+ CLK(NULL, "per_abe_nc_fclk", &per_abe_nc_fclk, CK_443X),
CLK(NULL, "pmd_stm_clock_mux_ck", &pmd_stm_clock_mux_ck, CK_443X),
CLK(NULL, "pmd_trace_clk_mux_ck", &pmd_trace_clk_mux_ck, CK_443X),
CLK(NULL, "syc_clk_div_ck", &syc_clk_div_ck, CK_443X),
@@ -3204,7 +3122,6 @@ static struct omap_clk omap44xx_clks[] = {
CLK(NULL, "uart2_fck", &uart2_fck, CK_443X),
CLK(NULL, "uart3_fck", &uart3_fck, CK_443X),
CLK(NULL, "uart4_fck", &uart4_fck, CK_443X),
- CLK(NULL, "usb_host_fs_fck", &usb_host_fs_fck, CK_443X),
CLK("usbhs-omap.0", "fs_fck", &usb_host_fs_fck, CK_443X),
CLK(NULL, "utmi_p1_gfclk", &utmi_p1_gfclk, CK_443X),
CLK(NULL, "usb_host_hs_utmi_p1_clk", &usb_host_hs_utmi_p1_clk, CK_443X),
@@ -3216,9 +3133,7 @@ static struct omap_clk omap44xx_clks[] = {
CLK(NULL, "usb_host_hs_hsic60m_p2_clk", &usb_host_hs_hsic60m_p2_clk, CK_443X),
CLK(NULL, "usb_host_hs_hsic480m_p2_clk", &usb_host_hs_hsic480m_p2_clk, CK_443X),
CLK(NULL, "usb_host_hs_func48mclk", &usb_host_hs_func48mclk, CK_443X),
- CLK(NULL, "usb_host_hs_fck", &usb_host_hs_fck, CK_443X),
CLK("usbhs-omap.0", "hs_fck", &usb_host_hs_fck, CK_443X),
- CLK("usbhs-omap.0", "usbhost_ick", &dummy_ck, CK_443X),
CLK(NULL, "otg_60m_gfclk", &otg_60m_gfclk, CK_443X),
CLK(NULL, "usb_otg_hs_xclk", &usb_otg_hs_xclk, CK_443X),
CLK("musb-omap2430", "ick", &usb_otg_hs_ick, CK_443X),
@@ -3226,17 +3141,26 @@ static struct omap_clk omap44xx_clks[] = {
CLK(NULL, "usb_tll_hs_usb_ch2_clk", &usb_tll_hs_usb_ch2_clk, CK_443X),
CLK(NULL, "usb_tll_hs_usb_ch0_clk", &usb_tll_hs_usb_ch0_clk, CK_443X),
CLK(NULL, "usb_tll_hs_usb_ch1_clk", &usb_tll_hs_usb_ch1_clk, CK_443X),
- CLK(NULL, "usb_tll_hs_ick", &usb_tll_hs_ick, CK_443X),
CLK("usbhs-omap.0", "usbtll_ick", &usb_tll_hs_ick, CK_443X),
- CLK("usbhs-omap.0", "usbtll_fck", &dummy_ck, CK_443X),
CLK(NULL, "usim_ck", &usim_ck, CK_443X),
CLK(NULL, "usim_fclk", &usim_fclk, CK_443X),
CLK(NULL, "usim_fck", &usim_fck, CK_443X),
CLK("omap_wdt", "fck", &wd_timer2_fck, CK_443X),
- CLK(NULL, "mailboxes_ick", &dummy_ck, CK_443X),
CLK(NULL, "wd_timer3_fck", &wd_timer3_fck, CK_443X),
CLK(NULL, "stm_clk_div_ck", &stm_clk_div_ck, CK_443X),
CLK(NULL, "trace_clk_div_ck", &trace_clk_div_ck, CK_443X),
+ CLK(NULL, "auxclk0_ck", &auxclk0_ck, CK_443X),
+ CLK(NULL, "auxclk1_ck", &auxclk1_ck, CK_443X),
+ CLK(NULL, "auxclk2_ck", &auxclk2_ck, CK_443X),
+ CLK(NULL, "auxclk3_ck", &auxclk3_ck, CK_443X),
+ CLK(NULL, "auxclk4_ck", &auxclk4_ck, CK_443X),
+ CLK(NULL, "auxclk5_ck", &auxclk5_ck, CK_443X),
+ CLK(NULL, "auxclkreq0_ck", &auxclkreq0_ck, CK_443X),
+ CLK(NULL, "auxclkreq1_ck", &auxclkreq1_ck, CK_443X),
+ CLK(NULL, "auxclkreq2_ck", &auxclkreq2_ck, CK_443X),
+ CLK(NULL, "auxclkreq3_ck", &auxclkreq3_ck, CK_443X),
+ CLK(NULL, "auxclkreq4_ck", &auxclkreq4_ck, CK_443X),
+ CLK(NULL, "auxclkreq5_ck", &auxclkreq5_ck, CK_443X),
CLK(NULL, "gpmc_ck", &dummy_ck, CK_443X),
CLK(NULL, "gpt1_ick", &dummy_ck, CK_443X),
CLK(NULL, "gpt2_ick", &dummy_ck, CK_443X),
@@ -3253,6 +3177,7 @@ static struct omap_clk omap44xx_clks[] = {
CLK("omap_i2c.2", "ick", &dummy_ck, CK_443X),
CLK("omap_i2c.3", "ick", &dummy_ck, CK_443X),
CLK("omap_i2c.4", "ick", &dummy_ck, CK_443X),
+ CLK(NULL, "mailboxes_ick", &dummy_ck, CK_443X),
CLK("omap_hsmmc.0", "ick", &dummy_ck, CK_443X),
CLK("omap_hsmmc.1", "ick", &dummy_ck, CK_443X),
CLK("omap_hsmmc.2", "ick", &dummy_ck, CK_443X),
@@ -3270,19 +3195,9 @@ static struct omap_clk omap44xx_clks[] = {
CLK(NULL, "uart2_ick", &dummy_ck, CK_443X),
CLK(NULL, "uart3_ick", &dummy_ck, CK_443X),
CLK(NULL, "uart4_ick", &dummy_ck, CK_443X),
+ CLK("usbhs-omap.0", "usbhost_ick", &dummy_ck, CK_443X),
+ CLK("usbhs-omap.0", "usbtll_fck", &dummy_ck, CK_443X),
CLK("omap_wdt", "ick", &dummy_ck, CK_443X),
- CLK(NULL, "auxclk0_ck", &auxclk0_ck, CK_443X),
- CLK(NULL, "auxclk1_ck", &auxclk1_ck, CK_443X),
- CLK(NULL, "auxclk2_ck", &auxclk2_ck, CK_443X),
- CLK(NULL, "auxclk3_ck", &auxclk3_ck, CK_443X),
- CLK(NULL, "auxclk4_ck", &auxclk4_ck, CK_443X),
- CLK(NULL, "auxclk5_ck", &auxclk5_ck, CK_443X),
- CLK(NULL, "auxclkreq0_ck", &auxclkreq0_ck, CK_443X),
- CLK(NULL, "auxclkreq1_ck", &auxclkreq1_ck, CK_443X),
- CLK(NULL, "auxclkreq2_ck", &auxclkreq2_ck, CK_443X),
- CLK(NULL, "auxclkreq3_ck", &auxclkreq3_ck, CK_443X),
- CLK(NULL, "auxclkreq4_ck", &auxclkreq4_ck, CK_443X),
- CLK(NULL, "auxclkreq5_ck", &auxclkreq5_ck, CK_443X),
};
int __init omap4xxx_clk_init(void)
diff --git a/arch/arm/mach-omap2/clockdomains44xx_data.c b/arch/arm/mach-omap2/clockdomains44xx_data.c
index a607ec196e8b..66090f2676ce 100644
--- a/arch/arm/mach-omap2/clockdomains44xx_data.c
+++ b/arch/arm/mach-omap2/clockdomains44xx_data.c
@@ -1,11 +1,12 @@
/*
* OMAP4 Clock domains framework
*
- * Copyright (C) 2009 Texas Instruments, Inc.
- * Copyright (C) 2009 Nokia Corporation
+ * Copyright (C) 2009-2011 Texas Instruments, Inc.
+ * Copyright (C) 2009-2011 Nokia Corporation
*
* Abhijit Pagare (abhijitpagare@ti.com)
* Benoit Cousson (b-cousson@ti.com)
+ * Paul Walmsley (paul@pwsan.com)
*
* This file is automatically generated from the OMAP hardware databases.
* We respectfully ask that any modifications to this file be coordinated
@@ -32,7 +33,7 @@
/* Static Dependencies for OMAP4 Clock Domains */
-static struct clkdm_dep ducati_wkup_sleep_deps[] = {
+static struct clkdm_dep d2d_wkup_sleep_deps[] = {
{
.clkdm_name = "abe_clkdm",
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
@@ -50,103 +51,103 @@ static struct clkdm_dep ducati_wkup_sleep_deps[] = {
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
},
{
- .clkdm_name = "l3_dss_clkdm",
+ .clkdm_name = "l3_emif_clkdm",
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
},
{
- .clkdm_name = "l3_emif_clkdm",
+ .clkdm_name = "l3_init_clkdm",
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
},
{
- .clkdm_name = "l3_gfx_clkdm",
+ .clkdm_name = "l4_cfg_clkdm",
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
},
{
- .clkdm_name = "l3_init_clkdm",
+ .clkdm_name = "l4_per_clkdm",
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
},
+ { NULL },
+};
+
+static struct clkdm_dep ducati_wkup_sleep_deps[] = {
{
- .clkdm_name = "l4_cfg_clkdm",
+ .clkdm_name = "abe_clkdm",
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
},
{
- .clkdm_name = "l4_per_clkdm",
+ .clkdm_name = "ivahd_clkdm",
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
},
{
- .clkdm_name = "l4_secure_clkdm",
+ .clkdm_name = "l3_1_clkdm",
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
},
{
- .clkdm_name = "l4_wkup_clkdm",
+ .clkdm_name = "l3_2_clkdm",
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
},
{
- .clkdm_name = "tesla_clkdm",
+ .clkdm_name = "l3_dss_clkdm",
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
},
- { NULL },
-};
-
-static struct clkdm_dep iss_wkup_sleep_deps[] = {
{
- .clkdm_name = "ivahd_clkdm",
+ .clkdm_name = "l3_emif_clkdm",
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
},
{
- .clkdm_name = "l3_1_clkdm",
+ .clkdm_name = "l3_gfx_clkdm",
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
},
{
- .clkdm_name = "l3_emif_clkdm",
+ .clkdm_name = "l3_init_clkdm",
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
},
- { NULL },
-};
-
-static struct clkdm_dep ivahd_wkup_sleep_deps[] = {
{
- .clkdm_name = "l3_1_clkdm",
+ .clkdm_name = "l4_cfg_clkdm",
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
},
{
- .clkdm_name = "l3_emif_clkdm",
+ .clkdm_name = "l4_per_clkdm",
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
},
- { NULL },
-};
-
-static struct clkdm_dep l3_d2d_wkup_sleep_deps[] = {
{
- .clkdm_name = "abe_clkdm",
+ .clkdm_name = "l4_secure_clkdm",
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
},
{
- .clkdm_name = "ivahd_clkdm",
+ .clkdm_name = "l4_wkup_clkdm",
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
},
{
- .clkdm_name = "l3_1_clkdm",
+ .clkdm_name = "tesla_clkdm",
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
},
+ { NULL },
+};
+
+static struct clkdm_dep iss_wkup_sleep_deps[] = {
{
- .clkdm_name = "l3_2_clkdm",
+ .clkdm_name = "ivahd_clkdm",
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
},
{
- .clkdm_name = "l3_emif_clkdm",
+ .clkdm_name = "l3_1_clkdm",
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
},
{
- .clkdm_name = "l3_init_clkdm",
+ .clkdm_name = "l3_emif_clkdm",
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
},
+ { NULL },
+};
+
+static struct clkdm_dep ivahd_wkup_sleep_deps[] = {
{
- .clkdm_name = "l4_cfg_clkdm",
+ .clkdm_name = "l3_1_clkdm",
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
},
{
- .clkdm_name = "l4_per_clkdm",
+ .clkdm_name = "l3_emif_clkdm",
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
},
{ NULL },
@@ -280,7 +281,7 @@ static struct clkdm_dep l4_secure_wkup_sleep_deps[] = {
{ NULL },
};
-static struct clkdm_dep mpuss_wkup_sleep_deps[] = {
+static struct clkdm_dep mpu_wkup_sleep_deps[] = {
{
.clkdm_name = "abe_clkdm",
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430)
@@ -497,14 +498,14 @@ static struct clockdomain l3_init_44xx_clkdm = {
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
};
-static struct clockdomain mpuss_44xx_clkdm = {
- .name = "mpuss_clkdm",
- .pwrdm = { .name = "mpu_pwrdm" },
- .prcm_partition = OMAP4430_CM1_PARTITION,
- .cm_inst = OMAP4430_CM1_MPU_INST,
- .clkdm_offs = OMAP4430_CM1_MPU_MPU_CDOFFS,
- .wkdep_srcs = mpuss_wkup_sleep_deps,
- .sleepdep_srcs = mpuss_wkup_sleep_deps,
+static struct clockdomain d2d_44xx_clkdm = {
+ .name = "d2d_clkdm",
+ .pwrdm = { .name = "core_pwrdm" },
+ .prcm_partition = OMAP4430_CM2_PARTITION,
+ .cm_inst = OMAP4430_CM2_CORE_INST,
+ .clkdm_offs = OMAP4430_CM2_CORE_D2D_CDOFFS,
+ .wkdep_srcs = d2d_wkup_sleep_deps,
+ .sleepdep_srcs = d2d_wkup_sleep_deps,
.flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
};
@@ -563,6 +564,18 @@ static struct clockdomain ducati_44xx_clkdm = {
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
};
+static struct clockdomain mpu_44xx_clkdm = {
+ .name = "mpu_clkdm",
+ .pwrdm = { .name = "mpu_pwrdm" },
+ .prcm_partition = OMAP4430_CM1_PARTITION,
+ .cm_inst = OMAP4430_CM1_MPU_INST,
+ .clkdm_offs = OMAP4430_CM1_MPU_MPU_CDOFFS,
+ .wkdep_srcs = mpu_wkup_sleep_deps,
+ .sleepdep_srcs = mpu_wkup_sleep_deps,
+ .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
+ .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
static struct clockdomain l3_2_44xx_clkdm = {
.name = "l3_2_clkdm",
.pwrdm = { .name = "core_pwrdm" },
@@ -585,18 +598,6 @@ static struct clockdomain l3_1_44xx_clkdm = {
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
};
-static struct clockdomain l3_d2d_44xx_clkdm = {
- .name = "l3_d2d_clkdm",
- .pwrdm = { .name = "core_pwrdm" },
- .prcm_partition = OMAP4430_CM2_PARTITION,
- .cm_inst = OMAP4430_CM2_CORE_INST,
- .clkdm_offs = OMAP4430_CM2_CORE_D2D_CDOFFS,
- .wkdep_srcs = l3_d2d_wkup_sleep_deps,
- .sleepdep_srcs = l3_d2d_wkup_sleep_deps,
- .flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
- .omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-};
-
static struct clockdomain iss_44xx_clkdm = {
.name = "iss_clkdm",
.pwrdm = { .name = "cam_pwrdm" },
@@ -655,6 +656,7 @@ static struct clockdomain l3_dma_44xx_clkdm = {
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
};
+/* As clockdomains are added or removed above, this list must also be changed */
static struct clockdomain *clockdomains_omap44xx[] __initdata = {
&l4_cefuse_44xx_clkdm,
&l4_cfg_44xx_clkdm,
@@ -666,21 +668,21 @@ static struct clockdomain *clockdomains_omap44xx[] __initdata = {
&abe_44xx_clkdm,
&l3_instr_44xx_clkdm,
&l3_init_44xx_clkdm,
- &mpuss_44xx_clkdm,
+ &d2d_44xx_clkdm,
&mpu0_44xx_clkdm,
&mpu1_44xx_clkdm,
&l3_emif_44xx_clkdm,
&l4_ao_44xx_clkdm,
&ducati_44xx_clkdm,
+ &mpu_44xx_clkdm,
&l3_2_44xx_clkdm,
&l3_1_44xx_clkdm,
- &l3_d2d_44xx_clkdm,
&iss_44xx_clkdm,
&l3_dss_44xx_clkdm,
&l4_wkup_44xx_clkdm,
&emu_sys_44xx_clkdm,
&l3_dma_44xx_clkdm,
- NULL,
+ NULL
};
void __init omap44xx_clockdomains_init(void)
diff --git a/arch/arm/mach-omap2/cm-regbits-44xx.h b/arch/arm/mach-omap2/cm-regbits-44xx.h
index 9d47a05b17b4..0e77945d26ec 100644
--- a/arch/arm/mach-omap2/cm-regbits-44xx.h
+++ b/arch/arm/mach-omap2/cm-regbits-44xx.h
@@ -22,22 +22,18 @@
#ifndef __ARCH_ARM_MACH_OMAP2_CM_REGBITS_44XX_H
#define __ARCH_ARM_MACH_OMAP2_CM_REGBITS_44XX_H
-/*
- * Used by CM_L3_1_DYNAMICDEP, CM_L3_1_DYNAMICDEP_RESTORE, CM_MPU_DYNAMICDEP,
- * CM_TESLA_DYNAMICDEP
- */
+/* Used by CM_L3_1_DYNAMICDEP, CM_MPU_DYNAMICDEP, CM_TESLA_DYNAMICDEP */
#define OMAP4430_ABE_DYNDEP_SHIFT 3
#define OMAP4430_ABE_DYNDEP_MASK (1 << 3)
/*
- * Used by CM_D2D_STATICDEP, CM_D2D_STATICDEP_RESTORE, CM_DUCATI_STATICDEP,
- * CM_L3INIT_STATICDEP, CM_MPU_STATICDEP, CM_SDMA_STATICDEP,
- * CM_SDMA_STATICDEP_RESTORE, CM_TESLA_STATICDEP
+ * Used by CM_D2D_STATICDEP, CM_DUCATI_STATICDEP, CM_L3INIT_STATICDEP,
+ * CM_MPU_STATICDEP, CM_SDMA_STATICDEP, CM_TESLA_STATICDEP
*/
#define OMAP4430_ABE_STATDEP_SHIFT 3
#define OMAP4430_ABE_STATDEP_MASK (1 << 3)
-/* Used by CM_L4CFG_DYNAMICDEP, CM_L4CFG_DYNAMICDEP_RESTORE */
+/* Used by CM_L4CFG_DYNAMICDEP */
#define OMAP4430_ALWONCORE_DYNDEP_SHIFT 16
#define OMAP4430_ALWONCORE_DYNDEP_MASK (1 << 16)
@@ -47,14 +43,13 @@
/*
* Used by CM_AUTOIDLE_DPLL_ABE, CM_AUTOIDLE_DPLL_CORE,
- * CM_AUTOIDLE_DPLL_CORE_RESTORE, CM_AUTOIDLE_DPLL_DDRPHY,
- * CM_AUTOIDLE_DPLL_IVA, CM_AUTOIDLE_DPLL_MPU, CM_AUTOIDLE_DPLL_PER,
- * CM_AUTOIDLE_DPLL_UNIPRO, CM_AUTOIDLE_DPLL_USB
+ * CM_AUTOIDLE_DPLL_DDRPHY, CM_AUTOIDLE_DPLL_IVA, CM_AUTOIDLE_DPLL_MPU,
+ * CM_AUTOIDLE_DPLL_PER, CM_AUTOIDLE_DPLL_UNIPRO, CM_AUTOIDLE_DPLL_USB
*/
#define OMAP4430_AUTO_DPLL_MODE_SHIFT 0
#define OMAP4430_AUTO_DPLL_MODE_MASK (0x7 << 0)
-/* Used by CM_L4CFG_DYNAMICDEP, CM_L4CFG_DYNAMICDEP_RESTORE */
+/* Used by CM_L4CFG_DYNAMICDEP */
#define OMAP4430_CEFUSE_DYNDEP_SHIFT 17
#define OMAP4430_CEFUSE_DYNDEP_MASK (1 << 17)
@@ -82,15 +77,15 @@
#define OMAP4430_CLKACTIVITY_ABE_X2_CLK_SHIFT 8
#define OMAP4430_CLKACTIVITY_ABE_X2_CLK_MASK (1 << 8)
-/* Used by CM_MEMIF_CLKSTCTRL, CM_MEMIF_CLKSTCTRL_RESTORE */
+/* Used by CM_MEMIF_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_ASYNC_DLL_CLK_SHIFT 11
#define OMAP4430_CLKACTIVITY_ASYNC_DLL_CLK_MASK (1 << 11)
-/* Used by CM_MEMIF_CLKSTCTRL, CM_MEMIF_CLKSTCTRL_RESTORE */
+/* Used by CM_MEMIF_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_ASYNC_PHY1_CLK_SHIFT 12
#define OMAP4430_CLKACTIVITY_ASYNC_PHY1_CLK_MASK (1 << 12)
-/* Used by CM_MEMIF_CLKSTCTRL, CM_MEMIF_CLKSTCTRL_RESTORE */
+/* Used by CM_MEMIF_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_ASYNC_PHY2_CLK_SHIFT 13
#define OMAP4430_CLKACTIVITY_ASYNC_PHY2_CLK_MASK (1 << 13)
@@ -110,31 +105,31 @@
#define OMAP4430_CLKACTIVITY_CUST_EFUSE_SYS_CLK_SHIFT 9
#define OMAP4430_CLKACTIVITY_CUST_EFUSE_SYS_CLK_MASK (1 << 9)
-/* Used by CM_MEMIF_CLKSTCTRL, CM_MEMIF_CLKSTCTRL_RESTORE */
+/* Used by CM_MEMIF_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_DLL_CLK_SHIFT 9
#define OMAP4430_CLKACTIVITY_DLL_CLK_MASK (1 << 9)
-/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+/* Used by CM_L4PER_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_DMT10_GFCLK_SHIFT 9
#define OMAP4430_CLKACTIVITY_DMT10_GFCLK_MASK (1 << 9)
-/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+/* Used by CM_L4PER_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_DMT11_GFCLK_SHIFT 10
#define OMAP4430_CLKACTIVITY_DMT11_GFCLK_MASK (1 << 10)
-/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+/* Used by CM_L4PER_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_DMT2_GFCLK_SHIFT 11
#define OMAP4430_CLKACTIVITY_DMT2_GFCLK_MASK (1 << 11)
-/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+/* Used by CM_L4PER_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_DMT3_GFCLK_SHIFT 12
#define OMAP4430_CLKACTIVITY_DMT3_GFCLK_MASK (1 << 12)
-/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+/* Used by CM_L4PER_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_DMT4_GFCLK_SHIFT 13
#define OMAP4430_CLKACTIVITY_DMT4_GFCLK_MASK (1 << 13)
-/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+/* Used by CM_L4PER_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_DMT9_GFCLK_SHIFT 14
#define OMAP4430_CLKACTIVITY_DMT9_GFCLK_MASK (1 << 14)
@@ -158,7 +153,7 @@
#define OMAP4430_CLKACTIVITY_FDIF_GFCLK_SHIFT 10
#define OMAP4430_CLKACTIVITY_FDIF_GFCLK_MASK (1 << 10)
-/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+/* Used by CM_L4PER_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_FUNC_12M_GFCLK_SHIFT 15
#define OMAP4430_CLKACTIVITY_FUNC_12M_GFCLK_MASK (1 << 15)
@@ -170,55 +165,55 @@
#define OMAP4430_CLKACTIVITY_HDMI_PHY_48MHZ_GFCLK_SHIFT 11
#define OMAP4430_CLKACTIVITY_HDMI_PHY_48MHZ_GFCLK_MASK (1 << 11)
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+/* Used by CM_L3INIT_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_HSIC_P1_480M_GFCLK_SHIFT 20
#define OMAP4430_CLKACTIVITY_HSIC_P1_480M_GFCLK_MASK (1 << 20)
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+/* Used by CM_L3INIT_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_HSIC_P1_GFCLK_SHIFT 26
#define OMAP4430_CLKACTIVITY_HSIC_P1_GFCLK_MASK (1 << 26)
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+/* Used by CM_L3INIT_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_HSIC_P2_480M_GFCLK_SHIFT 21
#define OMAP4430_CLKACTIVITY_HSIC_P2_480M_GFCLK_MASK (1 << 21)
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+/* Used by CM_L3INIT_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_HSIC_P2_GFCLK_SHIFT 27
#define OMAP4430_CLKACTIVITY_HSIC_P2_GFCLK_MASK (1 << 27)
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+/* Used by CM_L3INIT_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_INIT_48MC_GFCLK_SHIFT 13
#define OMAP4430_CLKACTIVITY_INIT_48MC_GFCLK_MASK (1 << 13)
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+/* Used by CM_L3INIT_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_INIT_48M_GFCLK_SHIFT 12
#define OMAP4430_CLKACTIVITY_INIT_48M_GFCLK_MASK (1 << 12)
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+/* Used by CM_L3INIT_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_INIT_60M_P1_GFCLK_SHIFT 28
#define OMAP4430_CLKACTIVITY_INIT_60M_P1_GFCLK_MASK (1 << 28)
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+/* Used by CM_L3INIT_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_INIT_60M_P2_GFCLK_SHIFT 29
#define OMAP4430_CLKACTIVITY_INIT_60M_P2_GFCLK_MASK (1 << 29)
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+/* Used by CM_L3INIT_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_INIT_96M_GFCLK_SHIFT 11
#define OMAP4430_CLKACTIVITY_INIT_96M_GFCLK_MASK (1 << 11)
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+/* Used by CM_L3INIT_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_INIT_HSI_GFCLK_SHIFT 16
#define OMAP4430_CLKACTIVITY_INIT_HSI_GFCLK_MASK (1 << 16)
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+/* Used by CM_L3INIT_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_INIT_HSMMC1_GFCLK_SHIFT 17
#define OMAP4430_CLKACTIVITY_INIT_HSMMC1_GFCLK_MASK (1 << 17)
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+/* Used by CM_L3INIT_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_INIT_HSMMC2_GFCLK_SHIFT 18
#define OMAP4430_CLKACTIVITY_INIT_HSMMC2_GFCLK_MASK (1 << 18)
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+/* Used by CM_L3INIT_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_INIT_HSMMC6_GFCLK_SHIFT 19
#define OMAP4430_CLKACTIVITY_INIT_HSMMC6_GFCLK_MASK (1 << 19)
@@ -234,11 +229,11 @@
#define OMAP4430_CLKACTIVITY_L3X2_D2D_GICLK_SHIFT 10
#define OMAP4430_CLKACTIVITY_L3X2_D2D_GICLK_MASK (1 << 10)
-/* Used by CM_L3_1_CLKSTCTRL, CM_L3_1_CLKSTCTRL_RESTORE */
+/* Used by CM_L3_1_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_L3_1_GICLK_SHIFT 8
#define OMAP4430_CLKACTIVITY_L3_1_GICLK_MASK (1 << 8)
-/* Used by CM_L3_2_CLKSTCTRL, CM_L3_2_CLKSTCTRL_RESTORE */
+/* Used by CM_L3_2_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_L3_2_GICLK_SHIFT 8
#define OMAP4430_CLKACTIVITY_L3_2_GICLK_MASK (1 << 8)
@@ -254,7 +249,7 @@
#define OMAP4430_CLKACTIVITY_L3_DSS_GICLK_SHIFT 8
#define OMAP4430_CLKACTIVITY_L3_DSS_GICLK_MASK (1 << 8)
-/* Used by CM_MEMIF_CLKSTCTRL, CM_MEMIF_CLKSTCTRL_RESTORE */
+/* Used by CM_MEMIF_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_L3_EMIF_GICLK_SHIFT 8
#define OMAP4430_CLKACTIVITY_L3_EMIF_GICLK_MASK (1 << 8)
@@ -262,7 +257,7 @@
#define OMAP4430_CLKACTIVITY_L3_GFX_GICLK_SHIFT 8
#define OMAP4430_CLKACTIVITY_L3_GFX_GICLK_MASK (1 << 8)
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+/* Used by CM_L3INIT_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_L3_INIT_GICLK_SHIFT 8
#define OMAP4430_CLKACTIVITY_L3_INIT_GICLK_MASK (1 << 8)
@@ -282,7 +277,7 @@
#define OMAP4430_CLKACTIVITY_L4_CEFUSE_GICLK_SHIFT 8
#define OMAP4430_CLKACTIVITY_L4_CEFUSE_GICLK_MASK (1 << 8)
-/* Used by CM_L4CFG_CLKSTCTRL, CM_L4CFG_CLKSTCTRL_RESTORE */
+/* Used by CM_L4CFG_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_L4_CFG_GICLK_SHIFT 8
#define OMAP4430_CLKACTIVITY_L4_CFG_GICLK_MASK (1 << 8)
@@ -290,11 +285,11 @@
#define OMAP4430_CLKACTIVITY_L4_D2D_GICLK_SHIFT 9
#define OMAP4430_CLKACTIVITY_L4_D2D_GICLK_MASK (1 << 9)
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+/* Used by CM_L3INIT_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_L4_INIT_GICLK_SHIFT 9
#define OMAP4430_CLKACTIVITY_L4_INIT_GICLK_MASK (1 << 9)
-/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+/* Used by CM_L4PER_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_L4_PER_GICLK_SHIFT 8
#define OMAP4430_CLKACTIVITY_L4_PER_GICLK_MASK (1 << 8)
@@ -306,7 +301,7 @@
#define OMAP4430_CLKACTIVITY_L4_WKUP_GICLK_SHIFT 12
#define OMAP4430_CLKACTIVITY_L4_WKUP_GICLK_MASK (1 << 12)
-/* Used by CM_MPU_CLKSTCTRL, CM_MPU_CLKSTCTRL_RESTORE */
+/* Used by CM_MPU_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_MPU_DPLL_CLK_SHIFT 8
#define OMAP4430_CLKACTIVITY_MPU_DPLL_CLK_MASK (1 << 8)
@@ -314,43 +309,43 @@
#define OMAP4430_CLKACTIVITY_OCP_ABE_GICLK_SHIFT 9
#define OMAP4430_CLKACTIVITY_OCP_ABE_GICLK_MASK (1 << 9)
-/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+/* Used by CM_L4PER_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_PER_24MC_GFCLK_SHIFT 16
#define OMAP4430_CLKACTIVITY_PER_24MC_GFCLK_MASK (1 << 16)
-/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+/* Used by CM_L4PER_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_PER_32K_GFCLK_SHIFT 17
#define OMAP4430_CLKACTIVITY_PER_32K_GFCLK_MASK (1 << 17)
-/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+/* Used by CM_L4PER_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_PER_48M_GFCLK_SHIFT 18
#define OMAP4430_CLKACTIVITY_PER_48M_GFCLK_MASK (1 << 18)
-/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+/* Used by CM_L4PER_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_PER_96M_GFCLK_SHIFT 19
#define OMAP4430_CLKACTIVITY_PER_96M_GFCLK_MASK (1 << 19)
-/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+/* Used by CM_L4PER_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_PER_ABE_24M_GFCLK_SHIFT 25
#define OMAP4430_CLKACTIVITY_PER_ABE_24M_GFCLK_MASK (1 << 25)
-/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+/* Used by CM_L4PER_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_PER_MCASP2_GFCLK_SHIFT 20
#define OMAP4430_CLKACTIVITY_PER_MCASP2_GFCLK_MASK (1 << 20)
-/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+/* Used by CM_L4PER_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_PER_MCASP3_GFCLK_SHIFT 21
#define OMAP4430_CLKACTIVITY_PER_MCASP3_GFCLK_MASK (1 << 21)
-/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+/* Used by CM_L4PER_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_PER_MCBSP4_GFCLK_SHIFT 22
#define OMAP4430_CLKACTIVITY_PER_MCBSP4_GFCLK_MASK (1 << 22)
-/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+/* Used by CM_L4PER_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_PER_SYS_GFCLK_SHIFT 24
#define OMAP4430_CLKACTIVITY_PER_SYS_GFCLK_MASK (1 << 24)
-/* Used by CM_MEMIF_CLKSTCTRL, CM_MEMIF_CLKSTCTRL_RESTORE */
+/* Used by CM_MEMIF_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_PHY_ROOT_CLK_SHIFT 10
#define OMAP4430_CLKACTIVITY_PHY_ROOT_CLK_MASK (1 << 10)
@@ -378,27 +373,27 @@
#define OMAP4430_CLKACTIVITY_TESLA_ROOT_CLK_SHIFT 8
#define OMAP4430_CLKACTIVITY_TESLA_ROOT_CLK_MASK (1 << 8)
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+/* Used by CM_L3INIT_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_TLL_CH0_GFCLK_SHIFT 22
#define OMAP4430_CLKACTIVITY_TLL_CH0_GFCLK_MASK (1 << 22)
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+/* Used by CM_L3INIT_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_TLL_CH1_GFCLK_SHIFT 23
#define OMAP4430_CLKACTIVITY_TLL_CH1_GFCLK_MASK (1 << 23)
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+/* Used by CM_L3INIT_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_TLL_CH2_GFCLK_SHIFT 24
#define OMAP4430_CLKACTIVITY_TLL_CH2_GFCLK_MASK (1 << 24)
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+/* Used by CM_L3INIT_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_UNIPRO_DPLL_CLK_SHIFT 10
#define OMAP4430_CLKACTIVITY_UNIPRO_DPLL_CLK_MASK (1 << 10)
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+/* Used by CM_L3INIT_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_USB_DPLL_CLK_SHIFT 14
#define OMAP4430_CLKACTIVITY_USB_DPLL_CLK_MASK (1 << 14)
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+/* Used by CM_L3INIT_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_USB_DPLL_HS_CLK_SHIFT 15
#define OMAP4430_CLKACTIVITY_USB_DPLL_HS_CLK_MASK (1 << 15)
@@ -406,11 +401,11 @@
#define OMAP4430_CLKACTIVITY_USIM_GFCLK_SHIFT 10
#define OMAP4430_CLKACTIVITY_USIM_GFCLK_MASK (1 << 10)
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+/* Used by CM_L3INIT_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_UTMI_P3_GFCLK_SHIFT 30
#define OMAP4430_CLKACTIVITY_UTMI_P3_GFCLK_MASK (1 << 30)
-/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+/* Used by CM_L3INIT_CLKSTCTRL */
#define OMAP4430_CLKACTIVITY_UTMI_ROOT_GFCLK_SHIFT 25
#define OMAP4430_CLKACTIVITY_UTMI_ROOT_GFCLK_MASK (1 << 25)
@@ -432,7 +427,7 @@
/*
* Renamed from CLKSEL Used by CM_ABE_DSS_SYS_CLKSEL, CM_ABE_PLL_REF_CLKSEL,
- * CM_L4_WKUP_CLKSEL, CM_CLKSEL_DUCATI_ISS_ROOT, CM_CLKSEL_USB_60MHZ
+ * CM_CLKSEL_DUCATI_ISS_ROOT, CM_CLKSEL_USB_60MHZ, CM_L4_WKUP_CLKSEL
*/
#define OMAP4430_CLKSEL_0_0_SHIFT 0
#define OMAP4430_CLKSEL_0_0_MASK (1 << 0)
@@ -453,14 +448,11 @@
#define OMAP4430_CLKSEL_AESS_FCLK_SHIFT 24
#define OMAP4430_CLKSEL_AESS_FCLK_MASK (1 << 24)
-/* Used by CM_CLKSEL_CORE, CM_CLKSEL_CORE_RESTORE */
+/* Used by CM_CLKSEL_CORE */
#define OMAP4430_CLKSEL_CORE_SHIFT 0
#define OMAP4430_CLKSEL_CORE_MASK (1 << 0)
-/*
- * Renamed from CLKSEL_CORE Used by CM_SHADOW_FREQ_CONFIG2_RESTORE,
- * CM_SHADOW_FREQ_CONFIG2
- */
+/* Renamed from CLKSEL_CORE Used by CM_SHADOW_FREQ_CONFIG2 */
#define OMAP4430_CLKSEL_CORE_1_1_SHIFT 1
#define OMAP4430_CLKSEL_CORE_1_1_MASK (1 << 1)
@@ -484,18 +476,15 @@
#define OMAP4430_CLKSEL_INTERNAL_SOURCE_CM1_ABE_DMIC_SHIFT 26
#define OMAP4430_CLKSEL_INTERNAL_SOURCE_CM1_ABE_DMIC_MASK (0x3 << 26)
-/* Used by CM_CLKSEL_CORE, CM_CLKSEL_CORE_RESTORE */
+/* Used by CM_CLKSEL_CORE */
#define OMAP4430_CLKSEL_L3_SHIFT 4
#define OMAP4430_CLKSEL_L3_MASK (1 << 4)
-/*
- * Renamed from CLKSEL_L3 Used by CM_SHADOW_FREQ_CONFIG2_RESTORE,
- * CM_SHADOW_FREQ_CONFIG2
- */
+/* Renamed from CLKSEL_L3 Used by CM_SHADOW_FREQ_CONFIG2 */
#define OMAP4430_CLKSEL_L3_SHADOW_SHIFT 2
#define OMAP4430_CLKSEL_L3_SHADOW_MASK (1 << 2)
-/* Used by CM_CLKSEL_CORE, CM_CLKSEL_CORE_RESTORE */
+/* Used by CM_CLKSEL_CORE */
#define OMAP4430_CLKSEL_L4_SHIFT 8
#define OMAP4430_CLKSEL_L4_MASK (1 << 8)
@@ -526,11 +515,11 @@
#define OMAP4430_CLKSEL_SOURCE_24_24_SHIFT 24
#define OMAP4430_CLKSEL_SOURCE_24_24_MASK (1 << 24)
-/* Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE */
+/* Used by CM_L3INIT_USB_HOST_CLKCTRL */
#define OMAP4430_CLKSEL_UTMI_P1_SHIFT 24
#define OMAP4430_CLKSEL_UTMI_P1_MASK (1 << 24)
-/* Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE */
+/* Used by CM_L3INIT_USB_HOST_CLKCTRL */
#define OMAP4430_CLKSEL_UTMI_P2_SHIFT 25
#define OMAP4430_CLKSEL_UTMI_P2_MASK (1 << 25)
@@ -538,13 +527,10 @@
* Used by CM1_ABE_CLKSTCTRL, CM_ALWON_CLKSTCTRL, CM_CAM_CLKSTCTRL,
* CM_CEFUSE_CLKSTCTRL, CM_D2D_CLKSTCTRL, CM_DSS_CLKSTCTRL,
* CM_DUCATI_CLKSTCTRL, CM_EMU_CLKSTCTRL, CM_GFX_CLKSTCTRL, CM_IVAHD_CLKSTCTRL,
- * CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE, CM_L3INSTR_CLKSTCTRL,
- * CM_L3_1_CLKSTCTRL, CM_L3_1_CLKSTCTRL_RESTORE, CM_L3_2_CLKSTCTRL,
- * CM_L3_2_CLKSTCTRL_RESTORE, CM_L4CFG_CLKSTCTRL, CM_L4CFG_CLKSTCTRL_RESTORE,
- * CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE, CM_L4SEC_CLKSTCTRL,
- * CM_MEMIF_CLKSTCTRL, CM_MEMIF_CLKSTCTRL_RESTORE, CM_MPU_CLKSTCTRL,
- * CM_MPU_CLKSTCTRL_RESTORE, CM_SDMA_CLKSTCTRL, CM_TESLA_CLKSTCTRL,
- * CM_WKUP_CLKSTCTRL
+ * CM_L3INIT_CLKSTCTRL, CM_L3INSTR_CLKSTCTRL, CM_L3_1_CLKSTCTRL,
+ * CM_L3_2_CLKSTCTRL, CM_L4CFG_CLKSTCTRL, CM_L4PER_CLKSTCTRL,
+ * CM_L4SEC_CLKSTCTRL, CM_MEMIF_CLKSTCTRL, CM_MPU_CLKSTCTRL, CM_SDMA_CLKSTCTRL,
+ * CM_TESLA_CLKSTCTRL, CM_WKUP_CLKSTCTRL
*/
#define OMAP4430_CLKTRCTRL_SHIFT 0
#define OMAP4430_CLKTRCTRL_MASK (0x3 << 0)
@@ -561,10 +547,7 @@
#define OMAP4430_CUSTOM_SHIFT 6
#define OMAP4430_CUSTOM_MASK (0x3 << 6)
-/*
- * Used by CM_L3_2_DYNAMICDEP, CM_L3_2_DYNAMICDEP_RESTORE, CM_L4CFG_DYNAMICDEP,
- * CM_L4CFG_DYNAMICDEP_RESTORE
- */
+/* Used by CM_L3_2_DYNAMICDEP, CM_L4CFG_DYNAMICDEP */
#define OMAP4430_D2D_DYNDEP_SHIFT 18
#define OMAP4430_D2D_DYNDEP_MASK (1 << 18)
@@ -574,31 +557,29 @@
/*
* Used by CM_SSC_DELTAMSTEP_DPLL_ABE, CM_SSC_DELTAMSTEP_DPLL_CORE,
- * CM_SSC_DELTAMSTEP_DPLL_CORE_RESTORE, CM_SSC_DELTAMSTEP_DPLL_DDRPHY,
- * CM_SSC_DELTAMSTEP_DPLL_IVA, CM_SSC_DELTAMSTEP_DPLL_MPU,
- * CM_SSC_DELTAMSTEP_DPLL_PER, CM_SSC_DELTAMSTEP_DPLL_UNIPRO,
- * CM_SSC_DELTAMSTEP_DPLL_USB
+ * CM_SSC_DELTAMSTEP_DPLL_DDRPHY, CM_SSC_DELTAMSTEP_DPLL_IVA,
+ * CM_SSC_DELTAMSTEP_DPLL_MPU, CM_SSC_DELTAMSTEP_DPLL_PER,
+ * CM_SSC_DELTAMSTEP_DPLL_UNIPRO, CM_SSC_DELTAMSTEP_DPLL_USB
*/
#define OMAP4430_DELTAMSTEP_SHIFT 0
#define OMAP4430_DELTAMSTEP_MASK (0xfffff << 0)
-/* Used by CM_SHADOW_FREQ_CONFIG1, CM_SHADOW_FREQ_CONFIG1_RESTORE */
-#define OMAP4430_DLL_OVERRIDE_SHIFT 2
-#define OMAP4430_DLL_OVERRIDE_MASK (1 << 2)
+/* Used by CM_DLL_CTRL */
+#define OMAP4430_DLL_OVERRIDE_SHIFT 0
+#define OMAP4430_DLL_OVERRIDE_MASK (1 << 0)
-/* Renamed from DLL_OVERRIDE Used by CM_DLL_CTRL */
-#define OMAP4430_DLL_OVERRIDE_0_0_SHIFT 0
-#define OMAP4430_DLL_OVERRIDE_0_0_MASK (1 << 0)
+/* Renamed from DLL_OVERRIDE Used by CM_SHADOW_FREQ_CONFIG1 */
+#define OMAP4430_DLL_OVERRIDE_2_2_SHIFT 2
+#define OMAP4430_DLL_OVERRIDE_2_2_MASK (1 << 2)
-/* Used by CM_SHADOW_FREQ_CONFIG1, CM_SHADOW_FREQ_CONFIG1_RESTORE */
+/* Used by CM_SHADOW_FREQ_CONFIG1 */
#define OMAP4430_DLL_RESET_SHIFT 3
#define OMAP4430_DLL_RESET_MASK (1 << 3)
/*
- * Used by CM_CLKSEL_DPLL_ABE, CM_CLKSEL_DPLL_CORE,
- * CM_CLKSEL_DPLL_CORE_RESTORE, CM_CLKSEL_DPLL_DDRPHY, CM_CLKSEL_DPLL_IVA,
- * CM_CLKSEL_DPLL_MPU, CM_CLKSEL_DPLL_PER, CM_CLKSEL_DPLL_UNIPRO,
- * CM_CLKSEL_DPLL_USB
+ * Used by CM_CLKSEL_DPLL_ABE, CM_CLKSEL_DPLL_CORE, CM_CLKSEL_DPLL_DDRPHY,
+ * CM_CLKSEL_DPLL_IVA, CM_CLKSEL_DPLL_MPU, CM_CLKSEL_DPLL_PER,
+ * CM_CLKSEL_DPLL_UNIPRO, CM_CLKSEL_DPLL_USB
*/
#define OMAP4430_DPLL_BYP_CLKSEL_SHIFT 23
#define OMAP4430_DPLL_BYP_CLKSEL_MASK (1 << 23)
@@ -607,28 +588,19 @@
#define OMAP4430_DPLL_CLKDCOLDO_GATE_CTRL_SHIFT 8
#define OMAP4430_DPLL_CLKDCOLDO_GATE_CTRL_MASK (1 << 8)
-/* Used by CM_CLKSEL_DPLL_CORE, CM_CLKSEL_DPLL_CORE_RESTORE */
+/* Used by CM_CLKSEL_DPLL_CORE */
#define OMAP4430_DPLL_CLKOUTHIF_CLKSEL_SHIFT 20
#define OMAP4430_DPLL_CLKOUTHIF_CLKSEL_MASK (1 << 20)
-/*
- * Used by CM_DIV_M3_DPLL_ABE, CM_DIV_M3_DPLL_CORE,
- * CM_DIV_M3_DPLL_CORE_RESTORE, CM_DIV_M3_DPLL_PER
- */
+/* Used by CM_DIV_M3_DPLL_ABE, CM_DIV_M3_DPLL_CORE, CM_DIV_M3_DPLL_PER */
#define OMAP4430_DPLL_CLKOUTHIF_DIV_SHIFT 0
#define OMAP4430_DPLL_CLKOUTHIF_DIV_MASK (0x1f << 0)
-/*
- * Used by CM_DIV_M3_DPLL_ABE, CM_DIV_M3_DPLL_CORE,
- * CM_DIV_M3_DPLL_CORE_RESTORE, CM_DIV_M3_DPLL_PER
- */
+/* Used by CM_DIV_M3_DPLL_ABE, CM_DIV_M3_DPLL_CORE, CM_DIV_M3_DPLL_PER */
#define OMAP4430_DPLL_CLKOUTHIF_DIVCHACK_SHIFT 5
#define OMAP4430_DPLL_CLKOUTHIF_DIVCHACK_MASK (1 << 5)
-/*
- * Used by CM_DIV_M3_DPLL_ABE, CM_DIV_M3_DPLL_CORE,
- * CM_DIV_M3_DPLL_CORE_RESTORE, CM_DIV_M3_DPLL_PER
- */
+/* Used by CM_DIV_M3_DPLL_ABE, CM_DIV_M3_DPLL_CORE, CM_DIV_M3_DPLL_PER */
#define OMAP4430_DPLL_CLKOUTHIF_GATE_CTRL_SHIFT 8
#define OMAP4430_DPLL_CLKOUTHIF_GATE_CTRL_MASK (1 << 8)
@@ -637,9 +609,8 @@
#define OMAP4430_DPLL_CLKOUTX2_GATE_CTRL_MASK (1 << 10)
/*
- * Used by CM_DIV_M2_DPLL_ABE, CM_DIV_M2_DPLL_CORE,
- * CM_DIV_M2_DPLL_CORE_RESTORE, CM_DIV_M2_DPLL_DDRPHY, CM_DIV_M2_DPLL_MPU,
- * CM_DIV_M2_DPLL_PER, CM_DIV_M2_DPLL_UNIPRO
+ * Used by CM_DIV_M2_DPLL_ABE, CM_DIV_M2_DPLL_CORE, CM_DIV_M2_DPLL_DDRPHY,
+ * CM_DIV_M2_DPLL_MPU, CM_DIV_M2_DPLL_PER, CM_DIV_M2_DPLL_UNIPRO
*/
#define OMAP4430_DPLL_CLKOUT_DIV_SHIFT 0
#define OMAP4430_DPLL_CLKOUT_DIV_MASK (0x1f << 0)
@@ -649,9 +620,8 @@
#define OMAP4430_DPLL_CLKOUT_DIV_0_6_MASK (0x7f << 0)
/*
- * Used by CM_DIV_M2_DPLL_ABE, CM_DIV_M2_DPLL_CORE,
- * CM_DIV_M2_DPLL_CORE_RESTORE, CM_DIV_M2_DPLL_DDRPHY, CM_DIV_M2_DPLL_MPU,
- * CM_DIV_M2_DPLL_PER, CM_DIV_M2_DPLL_UNIPRO
+ * Used by CM_DIV_M2_DPLL_ABE, CM_DIV_M2_DPLL_CORE, CM_DIV_M2_DPLL_DDRPHY,
+ * CM_DIV_M2_DPLL_MPU, CM_DIV_M2_DPLL_PER, CM_DIV_M2_DPLL_UNIPRO
*/
#define OMAP4430_DPLL_CLKOUT_DIVCHACK_SHIFT 5
#define OMAP4430_DPLL_CLKOUT_DIVCHACK_MASK (1 << 5)
@@ -661,29 +631,28 @@
#define OMAP4430_DPLL_CLKOUT_DIVCHACK_M2_USB_MASK (1 << 7)
/*
- * Used by CM_DIV_M2_DPLL_ABE, CM_DIV_M2_DPLL_CORE,
- * CM_DIV_M2_DPLL_CORE_RESTORE, CM_DIV_M2_DPLL_DDRPHY, CM_DIV_M2_DPLL_MPU,
- * CM_DIV_M2_DPLL_PER, CM_DIV_M2_DPLL_USB
+ * Used by CM_DIV_M2_DPLL_ABE, CM_DIV_M2_DPLL_CORE, CM_DIV_M2_DPLL_DDRPHY,
+ * CM_DIV_M2_DPLL_MPU, CM_DIV_M2_DPLL_PER, CM_DIV_M2_DPLL_USB
*/
#define OMAP4430_DPLL_CLKOUT_GATE_CTRL_SHIFT 8
#define OMAP4430_DPLL_CLKOUT_GATE_CTRL_MASK (1 << 8)
-/* Used by CM_SHADOW_FREQ_CONFIG1, CM_SHADOW_FREQ_CONFIG1_RESTORE */
+/* Used by CM_SHADOW_FREQ_CONFIG1 */
#define OMAP4430_DPLL_CORE_DPLL_EN_SHIFT 8
#define OMAP4430_DPLL_CORE_DPLL_EN_MASK (0x7 << 8)
-/* Used by CM_SHADOW_FREQ_CONFIG1, CM_SHADOW_FREQ_CONFIG1_RESTORE */
+/* Used by CM_SHADOW_FREQ_CONFIG1 */
#define OMAP4430_DPLL_CORE_M2_DIV_SHIFT 11
#define OMAP4430_DPLL_CORE_M2_DIV_MASK (0x1f << 11)
-/* Used by CM_SHADOW_FREQ_CONFIG2, CM_SHADOW_FREQ_CONFIG2_RESTORE */
+/* Used by CM_SHADOW_FREQ_CONFIG2 */
#define OMAP4430_DPLL_CORE_M5_DIV_SHIFT 3
#define OMAP4430_DPLL_CORE_M5_DIV_MASK (0x1f << 3)
/*
- * Used by CM_CLKSEL_DPLL_ABE, CM_CLKSEL_DPLL_CORE,
- * CM_CLKSEL_DPLL_CORE_RESTORE, CM_CLKSEL_DPLL_DDRPHY, CM_CLKSEL_DPLL_IVA,
- * CM_CLKSEL_DPLL_MPU, CM_CLKSEL_DPLL_PER, CM_CLKSEL_DPLL_UNIPRO
+ * Used by CM_CLKSEL_DPLL_ABE, CM_CLKSEL_DPLL_CORE, CM_CLKSEL_DPLL_DDRPHY,
+ * CM_CLKSEL_DPLL_IVA, CM_CLKSEL_DPLL_MPU, CM_CLKSEL_DPLL_PER,
+ * CM_CLKSEL_DPLL_UNIPRO
*/
#define OMAP4430_DPLL_DIV_SHIFT 0
#define OMAP4430_DPLL_DIV_MASK (0x7f << 0)
@@ -693,9 +662,8 @@
#define OMAP4430_DPLL_DIV_0_7_MASK (0xff << 0)
/*
- * Used by CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE,
- * CM_CLKMODE_DPLL_CORE_RESTORE, CM_CLKMODE_DPLL_DDRPHY, CM_CLKMODE_DPLL_IVA,
- * CM_CLKMODE_DPLL_MPU, CM_CLKMODE_DPLL_PER
+ * Used by CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE, CM_CLKMODE_DPLL_DDRPHY,
+ * CM_CLKMODE_DPLL_IVA, CM_CLKMODE_DPLL_MPU, CM_CLKMODE_DPLL_PER
*/
#define OMAP4430_DPLL_DRIFTGUARD_EN_SHIFT 8
#define OMAP4430_DPLL_DRIFTGUARD_EN_MASK (1 << 8)
@@ -705,26 +673,25 @@
#define OMAP4430_DPLL_DRIFTGUARD_EN_3_3_MASK (1 << 3)
/*
- * Used by CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE,
- * CM_CLKMODE_DPLL_CORE_RESTORE, CM_CLKMODE_DPLL_DDRPHY, CM_CLKMODE_DPLL_IVA,
- * CM_CLKMODE_DPLL_MPU, CM_CLKMODE_DPLL_PER, CM_CLKMODE_DPLL_UNIPRO,
- * CM_CLKMODE_DPLL_USB
+ * Used by CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE, CM_CLKMODE_DPLL_DDRPHY,
+ * CM_CLKMODE_DPLL_IVA, CM_CLKMODE_DPLL_MPU, CM_CLKMODE_DPLL_PER,
+ * CM_CLKMODE_DPLL_UNIPRO, CM_CLKMODE_DPLL_USB
*/
#define OMAP4430_DPLL_EN_SHIFT 0
#define OMAP4430_DPLL_EN_MASK (0x7 << 0)
/*
- * Used by CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE,
- * CM_CLKMODE_DPLL_CORE_RESTORE, CM_CLKMODE_DPLL_DDRPHY, CM_CLKMODE_DPLL_IVA,
- * CM_CLKMODE_DPLL_MPU, CM_CLKMODE_DPLL_PER, CM_CLKMODE_DPLL_UNIPRO
+ * Used by CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE, CM_CLKMODE_DPLL_DDRPHY,
+ * CM_CLKMODE_DPLL_IVA, CM_CLKMODE_DPLL_MPU, CM_CLKMODE_DPLL_PER,
+ * CM_CLKMODE_DPLL_UNIPRO
*/
#define OMAP4430_DPLL_LPMODE_EN_SHIFT 10
#define OMAP4430_DPLL_LPMODE_EN_MASK (1 << 10)
/*
- * Used by CM_CLKSEL_DPLL_ABE, CM_CLKSEL_DPLL_CORE,
- * CM_CLKSEL_DPLL_CORE_RESTORE, CM_CLKSEL_DPLL_DDRPHY, CM_CLKSEL_DPLL_IVA,
- * CM_CLKSEL_DPLL_MPU, CM_CLKSEL_DPLL_PER, CM_CLKSEL_DPLL_UNIPRO
+ * Used by CM_CLKSEL_DPLL_ABE, CM_CLKSEL_DPLL_CORE, CM_CLKSEL_DPLL_DDRPHY,
+ * CM_CLKSEL_DPLL_IVA, CM_CLKSEL_DPLL_MPU, CM_CLKSEL_DPLL_PER,
+ * CM_CLKSEL_DPLL_UNIPRO
*/
#define OMAP4430_DPLL_MULT_SHIFT 8
#define OMAP4430_DPLL_MULT_MASK (0x7ff << 8)
@@ -734,9 +701,9 @@
#define OMAP4430_DPLL_MULT_USB_MASK (0xfff << 8)
/*
- * Used by CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE,
- * CM_CLKMODE_DPLL_CORE_RESTORE, CM_CLKMODE_DPLL_DDRPHY, CM_CLKMODE_DPLL_IVA,
- * CM_CLKMODE_DPLL_MPU, CM_CLKMODE_DPLL_PER, CM_CLKMODE_DPLL_UNIPRO
+ * Used by CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE, CM_CLKMODE_DPLL_DDRPHY,
+ * CM_CLKMODE_DPLL_IVA, CM_CLKMODE_DPLL_MPU, CM_CLKMODE_DPLL_PER,
+ * CM_CLKMODE_DPLL_UNIPRO
*/
#define OMAP4430_DPLL_REGM4XEN_SHIFT 11
#define OMAP4430_DPLL_REGM4XEN_MASK (1 << 11)
@@ -746,55 +713,46 @@
#define OMAP4430_DPLL_SD_DIV_MASK (0xff << 24)
/*
- * Used by CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE,
- * CM_CLKMODE_DPLL_CORE_RESTORE, CM_CLKMODE_DPLL_DDRPHY, CM_CLKMODE_DPLL_IVA,
- * CM_CLKMODE_DPLL_MPU, CM_CLKMODE_DPLL_PER, CM_CLKMODE_DPLL_UNIPRO,
- * CM_CLKMODE_DPLL_USB
+ * Used by CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE, CM_CLKMODE_DPLL_DDRPHY,
+ * CM_CLKMODE_DPLL_IVA, CM_CLKMODE_DPLL_MPU, CM_CLKMODE_DPLL_PER,
+ * CM_CLKMODE_DPLL_UNIPRO, CM_CLKMODE_DPLL_USB
*/
#define OMAP4430_DPLL_SSC_ACK_SHIFT 13
#define OMAP4430_DPLL_SSC_ACK_MASK (1 << 13)
/*
- * Used by CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE,
- * CM_CLKMODE_DPLL_CORE_RESTORE, CM_CLKMODE_DPLL_DDRPHY, CM_CLKMODE_DPLL_IVA,
- * CM_CLKMODE_DPLL_MPU, CM_CLKMODE_DPLL_PER, CM_CLKMODE_DPLL_UNIPRO,
- * CM_CLKMODE_DPLL_USB
+ * Used by CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE, CM_CLKMODE_DPLL_DDRPHY,
+ * CM_CLKMODE_DPLL_IVA, CM_CLKMODE_DPLL_MPU, CM_CLKMODE_DPLL_PER,
+ * CM_CLKMODE_DPLL_UNIPRO, CM_CLKMODE_DPLL_USB
*/
#define OMAP4430_DPLL_SSC_DOWNSPREAD_SHIFT 14
#define OMAP4430_DPLL_SSC_DOWNSPREAD_MASK (1 << 14)
/*
- * Used by CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE,
- * CM_CLKMODE_DPLL_CORE_RESTORE, CM_CLKMODE_DPLL_DDRPHY, CM_CLKMODE_DPLL_IVA,
- * CM_CLKMODE_DPLL_MPU, CM_CLKMODE_DPLL_PER, CM_CLKMODE_DPLL_UNIPRO,
- * CM_CLKMODE_DPLL_USB
+ * Used by CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE, CM_CLKMODE_DPLL_DDRPHY,
+ * CM_CLKMODE_DPLL_IVA, CM_CLKMODE_DPLL_MPU, CM_CLKMODE_DPLL_PER,
+ * CM_CLKMODE_DPLL_UNIPRO, CM_CLKMODE_DPLL_USB
*/
#define OMAP4430_DPLL_SSC_EN_SHIFT 12
#define OMAP4430_DPLL_SSC_EN_MASK (1 << 12)
-/*
- * Used by CM_L3_2_DYNAMICDEP, CM_L3_2_DYNAMICDEP_RESTORE, CM_L4CFG_DYNAMICDEP,
- * CM_L4CFG_DYNAMICDEP_RESTORE, CM_L4PER_DYNAMICDEP, CM_L4PER_DYNAMICDEP_RESTORE
- */
+/* Used by CM_L3_2_DYNAMICDEP, CM_L4CFG_DYNAMICDEP, CM_L4PER_DYNAMICDEP */
#define OMAP4430_DSS_DYNDEP_SHIFT 8
#define OMAP4430_DSS_DYNDEP_MASK (1 << 8)
-/*
- * Used by CM_DUCATI_STATICDEP, CM_MPU_STATICDEP, CM_SDMA_STATICDEP,
- * CM_SDMA_STATICDEP_RESTORE
- */
+/* Used by CM_DUCATI_STATICDEP, CM_MPU_STATICDEP, CM_SDMA_STATICDEP */
#define OMAP4430_DSS_STATDEP_SHIFT 8
#define OMAP4430_DSS_STATDEP_MASK (1 << 8)
-/* Used by CM_L3_2_DYNAMICDEP, CM_L3_2_DYNAMICDEP_RESTORE */
+/* Used by CM_L3_2_DYNAMICDEP */
#define OMAP4430_DUCATI_DYNDEP_SHIFT 0
#define OMAP4430_DUCATI_DYNDEP_MASK (1 << 0)
-/* Used by CM_MPU_STATICDEP, CM_SDMA_STATICDEP, CM_SDMA_STATICDEP_RESTORE */
+/* Used by CM_MPU_STATICDEP, CM_SDMA_STATICDEP */
#define OMAP4430_DUCATI_STATDEP_SHIFT 0
#define OMAP4430_DUCATI_STATDEP_MASK (1 << 0)
-/* Used by CM_SHADOW_FREQ_CONFIG1, CM_SHADOW_FREQ_CONFIG1_RESTORE */
+/* Used by CM_SHADOW_FREQ_CONFIG1 */
#define OMAP4430_FREQ_UPDATE_SHIFT 0
#define OMAP4430_FREQ_UPDATE_MASK (1 << 0)
@@ -802,7 +760,7 @@
#define OMAP4430_FUNC_SHIFT 16
#define OMAP4430_FUNC_MASK (0xfff << 16)
-/* Used by CM_L3_2_DYNAMICDEP, CM_L3_2_DYNAMICDEP_RESTORE */
+/* Used by CM_L3_2_DYNAMICDEP */
#define OMAP4430_GFX_DYNDEP_SHIFT 10
#define OMAP4430_GFX_DYNDEP_MASK (1 << 10)
@@ -810,119 +768,95 @@
#define OMAP4430_GFX_STATDEP_SHIFT 10
#define OMAP4430_GFX_STATDEP_MASK (1 << 10)
-/* Used by CM_SHADOW_FREQ_CONFIG2, CM_SHADOW_FREQ_CONFIG2_RESTORE */
+/* Used by CM_SHADOW_FREQ_CONFIG2 */
#define OMAP4430_GPMC_FREQ_UPDATE_SHIFT 0
#define OMAP4430_GPMC_FREQ_UPDATE_MASK (1 << 0)
/*
- * Used by CM_DIV_M4_DPLL_CORE, CM_DIV_M4_DPLL_CORE_RESTORE,
- * CM_DIV_M4_DPLL_DDRPHY, CM_DIV_M4_DPLL_IVA, CM_DIV_M4_DPLL_PER
+ * Used by CM_DIV_M4_DPLL_CORE, CM_DIV_M4_DPLL_DDRPHY, CM_DIV_M4_DPLL_IVA,
+ * CM_DIV_M4_DPLL_PER
*/
#define OMAP4430_HSDIVIDER_CLKOUT1_DIV_SHIFT 0
#define OMAP4430_HSDIVIDER_CLKOUT1_DIV_MASK (0x1f << 0)
/*
- * Used by CM_DIV_M4_DPLL_CORE, CM_DIV_M4_DPLL_CORE_RESTORE,
- * CM_DIV_M4_DPLL_DDRPHY, CM_DIV_M4_DPLL_IVA, CM_DIV_M4_DPLL_PER
+ * Used by CM_DIV_M4_DPLL_CORE, CM_DIV_M4_DPLL_DDRPHY, CM_DIV_M4_DPLL_IVA,
+ * CM_DIV_M4_DPLL_PER
*/
#define OMAP4430_HSDIVIDER_CLKOUT1_DIVCHACK_SHIFT 5
#define OMAP4430_HSDIVIDER_CLKOUT1_DIVCHACK_MASK (1 << 5)
/*
- * Used by CM_DIV_M4_DPLL_CORE, CM_DIV_M4_DPLL_CORE_RESTORE,
- * CM_DIV_M4_DPLL_DDRPHY, CM_DIV_M4_DPLL_IVA, CM_DIV_M4_DPLL_PER
+ * Used by CM_DIV_M4_DPLL_CORE, CM_DIV_M4_DPLL_DDRPHY, CM_DIV_M4_DPLL_IVA,
+ * CM_DIV_M4_DPLL_PER
*/
#define OMAP4430_HSDIVIDER_CLKOUT1_GATE_CTRL_SHIFT 8
#define OMAP4430_HSDIVIDER_CLKOUT1_GATE_CTRL_MASK (1 << 8)
/*
- * Used by CM_DIV_M4_DPLL_CORE, CM_DIV_M4_DPLL_CORE_RESTORE,
- * CM_DIV_M4_DPLL_DDRPHY, CM_DIV_M4_DPLL_IVA, CM_DIV_M4_DPLL_PER
+ * Used by CM_DIV_M4_DPLL_CORE, CM_DIV_M4_DPLL_DDRPHY, CM_DIV_M4_DPLL_IVA,
+ * CM_DIV_M4_DPLL_PER
*/
#define OMAP4430_HSDIVIDER_CLKOUT1_PWDN_SHIFT 12
#define OMAP4430_HSDIVIDER_CLKOUT1_PWDN_MASK (1 << 12)
/*
- * Used by CM_DIV_M5_DPLL_CORE, CM_DIV_M5_DPLL_CORE_RESTORE,
- * CM_DIV_M5_DPLL_DDRPHY, CM_DIV_M5_DPLL_IVA, CM_DIV_M5_DPLL_PER
+ * Used by CM_DIV_M5_DPLL_CORE, CM_DIV_M5_DPLL_DDRPHY, CM_DIV_M5_DPLL_IVA,
+ * CM_DIV_M5_DPLL_PER
*/
#define OMAP4430_HSDIVIDER_CLKOUT2_DIV_SHIFT 0
#define OMAP4430_HSDIVIDER_CLKOUT2_DIV_MASK (0x1f << 0)
/*
- * Used by CM_DIV_M5_DPLL_CORE, CM_DIV_M5_DPLL_CORE_RESTORE,
- * CM_DIV_M5_DPLL_DDRPHY, CM_DIV_M5_DPLL_IVA, CM_DIV_M5_DPLL_PER
+ * Used by CM_DIV_M5_DPLL_CORE, CM_DIV_M5_DPLL_DDRPHY, CM_DIV_M5_DPLL_IVA,
+ * CM_DIV_M5_DPLL_PER
*/
#define OMAP4430_HSDIVIDER_CLKOUT2_DIVCHACK_SHIFT 5
#define OMAP4430_HSDIVIDER_CLKOUT2_DIVCHACK_MASK (1 << 5)
/*
- * Used by CM_DIV_M5_DPLL_CORE, CM_DIV_M5_DPLL_CORE_RESTORE,
- * CM_DIV_M5_DPLL_DDRPHY, CM_DIV_M5_DPLL_IVA, CM_DIV_M5_DPLL_PER
+ * Used by CM_DIV_M5_DPLL_CORE, CM_DIV_M5_DPLL_DDRPHY, CM_DIV_M5_DPLL_IVA,
+ * CM_DIV_M5_DPLL_PER
*/
#define OMAP4430_HSDIVIDER_CLKOUT2_GATE_CTRL_SHIFT 8
#define OMAP4430_HSDIVIDER_CLKOUT2_GATE_CTRL_MASK (1 << 8)
/*
- * Used by CM_DIV_M5_DPLL_CORE, CM_DIV_M5_DPLL_CORE_RESTORE,
- * CM_DIV_M5_DPLL_DDRPHY, CM_DIV_M5_DPLL_IVA, CM_DIV_M5_DPLL_PER
+ * Used by CM_DIV_M5_DPLL_CORE, CM_DIV_M5_DPLL_DDRPHY, CM_DIV_M5_DPLL_IVA,
+ * CM_DIV_M5_DPLL_PER
*/
#define OMAP4430_HSDIVIDER_CLKOUT2_PWDN_SHIFT 12
#define OMAP4430_HSDIVIDER_CLKOUT2_PWDN_MASK (1 << 12)
-/*
- * Used by CM_DIV_M6_DPLL_CORE, CM_DIV_M6_DPLL_CORE_RESTORE,
- * CM_DIV_M6_DPLL_DDRPHY, CM_DIV_M6_DPLL_PER
- */
+/* Used by CM_DIV_M6_DPLL_CORE, CM_DIV_M6_DPLL_DDRPHY, CM_DIV_M6_DPLL_PER */
#define OMAP4430_HSDIVIDER_CLKOUT3_DIV_SHIFT 0
#define OMAP4430_HSDIVIDER_CLKOUT3_DIV_MASK (0x1f << 0)
-/*
- * Used by CM_DIV_M6_DPLL_CORE, CM_DIV_M6_DPLL_CORE_RESTORE,
- * CM_DIV_M6_DPLL_DDRPHY, CM_DIV_M6_DPLL_PER
- */
+/* Used by CM_DIV_M6_DPLL_CORE, CM_DIV_M6_DPLL_DDRPHY, CM_DIV_M6_DPLL_PER */
#define OMAP4430_HSDIVIDER_CLKOUT3_DIVCHACK_SHIFT 5
#define OMAP4430_HSDIVIDER_CLKOUT3_DIVCHACK_MASK (1 << 5)
-/*
- * Used by CM_DIV_M6_DPLL_CORE, CM_DIV_M6_DPLL_CORE_RESTORE,
- * CM_DIV_M6_DPLL_DDRPHY, CM_DIV_M6_DPLL_PER
- */
+/* Used by CM_DIV_M6_DPLL_CORE, CM_DIV_M6_DPLL_DDRPHY, CM_DIV_M6_DPLL_PER */
#define OMAP4430_HSDIVIDER_CLKOUT3_GATE_CTRL_SHIFT 8
#define OMAP4430_HSDIVIDER_CLKOUT3_GATE_CTRL_MASK (1 << 8)
-/*
- * Used by CM_DIV_M6_DPLL_CORE, CM_DIV_M6_DPLL_CORE_RESTORE,
- * CM_DIV_M6_DPLL_DDRPHY, CM_DIV_M6_DPLL_PER
- */
+/* Used by CM_DIV_M6_DPLL_CORE, CM_DIV_M6_DPLL_DDRPHY, CM_DIV_M6_DPLL_PER */
#define OMAP4430_HSDIVIDER_CLKOUT3_PWDN_SHIFT 12
#define OMAP4430_HSDIVIDER_CLKOUT3_PWDN_MASK (1 << 12)
-/*
- * Used by CM_DIV_M7_DPLL_CORE, CM_DIV_M7_DPLL_CORE_RESTORE,
- * CM_DIV_M7_DPLL_PER
- */
+/* Used by CM_DIV_M7_DPLL_CORE, CM_DIV_M7_DPLL_PER */
#define OMAP4430_HSDIVIDER_CLKOUT4_DIV_SHIFT 0
#define OMAP4430_HSDIVIDER_CLKOUT4_DIV_MASK (0x1f << 0)
-/*
- * Used by CM_DIV_M7_DPLL_CORE, CM_DIV_M7_DPLL_CORE_RESTORE,
- * CM_DIV_M7_DPLL_PER
- */
+/* Used by CM_DIV_M7_DPLL_CORE, CM_DIV_M7_DPLL_PER */
#define OMAP4430_HSDIVIDER_CLKOUT4_DIVCHACK_SHIFT 5
#define OMAP4430_HSDIVIDER_CLKOUT4_DIVCHACK_MASK (1 << 5)
-/*
- * Used by CM_DIV_M7_DPLL_CORE, CM_DIV_M7_DPLL_CORE_RESTORE,
- * CM_DIV_M7_DPLL_PER
- */
+/* Used by CM_DIV_M7_DPLL_CORE, CM_DIV_M7_DPLL_PER */
#define OMAP4430_HSDIVIDER_CLKOUT4_GATE_CTRL_SHIFT 8
#define OMAP4430_HSDIVIDER_CLKOUT4_GATE_CTRL_MASK (1 << 8)
-/*
- * Used by CM_DIV_M7_DPLL_CORE, CM_DIV_M7_DPLL_CORE_RESTORE,
- * CM_DIV_M7_DPLL_PER
- */
+/* Used by CM_DIV_M7_DPLL_CORE, CM_DIV_M7_DPLL_PER */
#define OMAP4430_HSDIVIDER_CLKOUT4_PWDN_SHIFT 12
#define OMAP4430_HSDIVIDER_CLKOUT4_PWDN_MASK (1 << 12)
@@ -934,8 +868,7 @@
* CM1_ABE_TIMER8_CLKCTRL, CM1_ABE_WDT3_CLKCTRL, CM_ALWON_MDMINTC_CLKCTRL,
* CM_ALWON_SR_CORE_CLKCTRL, CM_ALWON_SR_IVA_CLKCTRL, CM_ALWON_SR_MPU_CLKCTRL,
* CM_CAM_FDIF_CLKCTRL, CM_CAM_ISS_CLKCTRL, CM_CEFUSE_CEFUSE_CLKCTRL,
- * CM_CM1_PROFILING_CLKCTRL, CM_CM1_PROFILING_CLKCTRL_RESTORE,
- * CM_CM2_PROFILING_CLKCTRL, CM_CM2_PROFILING_CLKCTRL_RESTORE,
+ * CM_CM1_PROFILING_CLKCTRL, CM_CM2_PROFILING_CLKCTRL,
* CM_D2D_MODEM_ICR_CLKCTRL, CM_D2D_SAD2D_CLKCTRL, CM_D2D_SAD2D_FW_CLKCTRL,
* CM_DSS_DEISS_CLKCTRL, CM_DSS_DSS_CLKCTRL, CM_DUCATI_DUCATI_CLKCTRL,
* CM_EMU_DEBUGSS_CLKCTRL, CM_GFX_GFX_CLKCTRL, CM_IVAHD_IVAHD_CLKCTRL,
@@ -944,30 +877,24 @@
* CM_L3INIT_MMC6_CLKCTRL, CM_L3INIT_P1500_CLKCTRL, CM_L3INIT_PCIESS_CLKCTRL,
* CM_L3INIT_SATA_CLKCTRL, CM_L3INIT_TPPSS_CLKCTRL, CM_L3INIT_UNIPRO1_CLKCTRL,
* CM_L3INIT_USBPHYOCP2SCP_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL,
- * CM_L3INIT_USB_HOST_CLKCTRL_RESTORE, CM_L3INIT_USB_HOST_FS_CLKCTRL,
- * CM_L3INIT_USB_OTG_CLKCTRL, CM_L3INIT_USB_TLL_CLKCTRL,
- * CM_L3INIT_USB_TLL_CLKCTRL_RESTORE, CM_L3INIT_XHPI_CLKCTRL,
- * CM_L3INSTR_L3_3_CLKCTRL, CM_L3INSTR_L3_3_CLKCTRL_RESTORE,
- * CM_L3INSTR_L3_INSTR_CLKCTRL, CM_L3INSTR_L3_INSTR_CLKCTRL_RESTORE,
- * CM_L3INSTR_OCP_WP1_CLKCTRL, CM_L3INSTR_OCP_WP1_CLKCTRL_RESTORE,
+ * CM_L3INIT_USB_HOST_FS_CLKCTRL, CM_L3INIT_USB_OTG_CLKCTRL,
+ * CM_L3INIT_USB_TLL_CLKCTRL, CM_L3INIT_XHPI_CLKCTRL, CM_L3INSTR_L3_3_CLKCTRL,
+ * CM_L3INSTR_L3_INSTR_CLKCTRL, CM_L3INSTR_OCP_WP1_CLKCTRL,
* CM_L3_1_L3_1_CLKCTRL, CM_L3_2_GPMC_CLKCTRL, CM_L3_2_L3_2_CLKCTRL,
* CM_L3_2_OCMC_RAM_CLKCTRL, CM_L4CFG_HW_SEM_CLKCTRL, CM_L4CFG_L4_CFG_CLKCTRL,
* CM_L4CFG_MAILBOX_CLKCTRL, CM_L4CFG_SAR_ROM_CLKCTRL, CM_L4PER_ADC_CLKCTRL,
* CM_L4PER_DMTIMER10_CLKCTRL, CM_L4PER_DMTIMER11_CLKCTRL,
* CM_L4PER_DMTIMER2_CLKCTRL, CM_L4PER_DMTIMER3_CLKCTRL,
* CM_L4PER_DMTIMER4_CLKCTRL, CM_L4PER_DMTIMER9_CLKCTRL, CM_L4PER_ELM_CLKCTRL,
- * CM_L4PER_GPIO2_CLKCTRL, CM_L4PER_GPIO2_CLKCTRL_RESTORE,
- * CM_L4PER_GPIO3_CLKCTRL, CM_L4PER_GPIO3_CLKCTRL_RESTORE,
- * CM_L4PER_GPIO4_CLKCTRL, CM_L4PER_GPIO4_CLKCTRL_RESTORE,
- * CM_L4PER_GPIO5_CLKCTRL, CM_L4PER_GPIO5_CLKCTRL_RESTORE,
- * CM_L4PER_GPIO6_CLKCTRL, CM_L4PER_GPIO6_CLKCTRL_RESTORE,
- * CM_L4PER_HDQ1W_CLKCTRL, CM_L4PER_HECC1_CLKCTRL, CM_L4PER_HECC2_CLKCTRL,
- * CM_L4PER_I2C1_CLKCTRL, CM_L4PER_I2C2_CLKCTRL, CM_L4PER_I2C3_CLKCTRL,
- * CM_L4PER_I2C4_CLKCTRL, CM_L4PER_I2C5_CLKCTRL, CM_L4PER_L4PER_CLKCTRL,
- * CM_L4PER_MCASP2_CLKCTRL, CM_L4PER_MCASP3_CLKCTRL, CM_L4PER_MCBSP4_CLKCTRL,
- * CM_L4PER_MCSPI1_CLKCTRL, CM_L4PER_MCSPI2_CLKCTRL, CM_L4PER_MCSPI3_CLKCTRL,
- * CM_L4PER_MCSPI4_CLKCTRL, CM_L4PER_MGATE_CLKCTRL, CM_L4PER_MMCSD3_CLKCTRL,
- * CM_L4PER_MMCSD4_CLKCTRL, CM_L4PER_MMCSD5_CLKCTRL, CM_L4PER_MSPROHG_CLKCTRL,
+ * CM_L4PER_GPIO2_CLKCTRL, CM_L4PER_GPIO3_CLKCTRL, CM_L4PER_GPIO4_CLKCTRL,
+ * CM_L4PER_GPIO5_CLKCTRL, CM_L4PER_GPIO6_CLKCTRL, CM_L4PER_HDQ1W_CLKCTRL,
+ * CM_L4PER_HECC1_CLKCTRL, CM_L4PER_HECC2_CLKCTRL, CM_L4PER_I2C1_CLKCTRL,
+ * CM_L4PER_I2C2_CLKCTRL, CM_L4PER_I2C3_CLKCTRL, CM_L4PER_I2C4_CLKCTRL,
+ * CM_L4PER_I2C5_CLKCTRL, CM_L4PER_L4PER_CLKCTRL, CM_L4PER_MCASP2_CLKCTRL,
+ * CM_L4PER_MCASP3_CLKCTRL, CM_L4PER_MCBSP4_CLKCTRL, CM_L4PER_MCSPI1_CLKCTRL,
+ * CM_L4PER_MCSPI2_CLKCTRL, CM_L4PER_MCSPI3_CLKCTRL, CM_L4PER_MCSPI4_CLKCTRL,
+ * CM_L4PER_MGATE_CLKCTRL, CM_L4PER_MMCSD3_CLKCTRL, CM_L4PER_MMCSD4_CLKCTRL,
+ * CM_L4PER_MMCSD5_CLKCTRL, CM_L4PER_MSPROHG_CLKCTRL,
* CM_L4PER_SLIMBUS2_CLKCTRL, CM_L4PER_UART1_CLKCTRL, CM_L4PER_UART2_CLKCTRL,
* CM_L4PER_UART3_CLKCTRL, CM_L4PER_UART4_CLKCTRL, CM_L4SEC_AES1_CLKCTRL,
* CM_L4SEC_AES2_CLKCTRL, CM_L4SEC_CRYPTODMA_CLKCTRL, CM_L4SEC_DES3DES_CLKCTRL,
@@ -983,166 +910,148 @@
#define OMAP4430_IDLEST_SHIFT 16
#define OMAP4430_IDLEST_MASK (0x3 << 16)
-/*
- * Used by CM_DUCATI_DYNAMICDEP, CM_L3_2_DYNAMICDEP,
- * CM_L3_2_DYNAMICDEP_RESTORE, CM_L4CFG_DYNAMICDEP, CM_L4CFG_DYNAMICDEP_RESTORE
- */
+/* Used by CM_DUCATI_DYNAMICDEP, CM_L3_2_DYNAMICDEP, CM_L4CFG_DYNAMICDEP */
#define OMAP4430_ISS_DYNDEP_SHIFT 9
#define OMAP4430_ISS_DYNDEP_MASK (1 << 9)
/*
* Used by CM_DUCATI_STATICDEP, CM_MPU_STATICDEP, CM_SDMA_STATICDEP,
- * CM_SDMA_STATICDEP_RESTORE, CM_TESLA_STATICDEP
+ * CM_TESLA_STATICDEP
*/
#define OMAP4430_ISS_STATDEP_SHIFT 9
#define OMAP4430_ISS_STATDEP_MASK (1 << 9)
-/* Used by CM_L3_2_DYNAMICDEP, CM_L3_2_DYNAMICDEP_RESTORE, CM_TESLA_DYNAMICDEP */
+/* Used by CM_L3_2_DYNAMICDEP, CM_TESLA_DYNAMICDEP */
#define OMAP4430_IVAHD_DYNDEP_SHIFT 2
#define OMAP4430_IVAHD_DYNDEP_MASK (1 << 2)
/*
- * Used by CM_CAM_STATICDEP, CM_D2D_STATICDEP, CM_D2D_STATICDEP_RESTORE,
- * CM_DSS_STATICDEP, CM_DUCATI_STATICDEP, CM_GFX_STATICDEP,
- * CM_L3INIT_STATICDEP, CM_MPU_STATICDEP, CM_SDMA_STATICDEP,
- * CM_SDMA_STATICDEP_RESTORE, CM_TESLA_STATICDEP
+ * Used by CM_CAM_STATICDEP, CM_D2D_STATICDEP, CM_DSS_STATICDEP,
+ * CM_DUCATI_STATICDEP, CM_GFX_STATICDEP, CM_L3INIT_STATICDEP,
+ * CM_MPU_STATICDEP, CM_SDMA_STATICDEP, CM_TESLA_STATICDEP
*/
#define OMAP4430_IVAHD_STATDEP_SHIFT 2
#define OMAP4430_IVAHD_STATDEP_MASK (1 << 2)
-/*
- * Used by CM_L3_2_DYNAMICDEP, CM_L3_2_DYNAMICDEP_RESTORE, CM_L4CFG_DYNAMICDEP,
- * CM_L4CFG_DYNAMICDEP_RESTORE, CM_L4PER_DYNAMICDEP, CM_L4PER_DYNAMICDEP_RESTORE
- */
+/* Used by CM_L3_2_DYNAMICDEP, CM_L4CFG_DYNAMICDEP, CM_L4PER_DYNAMICDEP */
#define OMAP4430_L3INIT_DYNDEP_SHIFT 7
#define OMAP4430_L3INIT_DYNDEP_MASK (1 << 7)
/*
- * Used by CM_D2D_STATICDEP, CM_D2D_STATICDEP_RESTORE, CM_DUCATI_STATICDEP,
- * CM_MPU_STATICDEP, CM_SDMA_STATICDEP, CM_SDMA_STATICDEP_RESTORE,
- * CM_TESLA_STATICDEP
+ * Used by CM_D2D_STATICDEP, CM_DUCATI_STATICDEP, CM_MPU_STATICDEP,
+ * CM_SDMA_STATICDEP, CM_TESLA_STATICDEP
*/
#define OMAP4430_L3INIT_STATDEP_SHIFT 7
#define OMAP4430_L3INIT_STATDEP_MASK (1 << 7)
/*
* Used by CM_DSS_DYNAMICDEP, CM_L3INIT_DYNAMICDEP, CM_L3_2_DYNAMICDEP,
- * CM_L3_2_DYNAMICDEP_RESTORE, CM_L4CFG_DYNAMICDEP,
- * CM_L4CFG_DYNAMICDEP_RESTORE, CM_MPU_DYNAMICDEP, CM_TESLA_DYNAMICDEP
+ * CM_L4CFG_DYNAMICDEP, CM_MPU_DYNAMICDEP, CM_TESLA_DYNAMICDEP
*/
#define OMAP4430_L3_1_DYNDEP_SHIFT 5
#define OMAP4430_L3_1_DYNDEP_MASK (1 << 5)
/*
- * Used by CM_CAM_STATICDEP, CM_D2D_STATICDEP, CM_D2D_STATICDEP_RESTORE,
- * CM_DSS_STATICDEP, CM_DUCATI_STATICDEP, CM_GFX_STATICDEP, CM_IVAHD_STATICDEP,
+ * Used by CM_CAM_STATICDEP, CM_D2D_STATICDEP, CM_DSS_STATICDEP,
+ * CM_DUCATI_STATICDEP, CM_GFX_STATICDEP, CM_IVAHD_STATICDEP,
* CM_L3INIT_STATICDEP, CM_L4SEC_STATICDEP, CM_MPU_STATICDEP,
- * CM_SDMA_STATICDEP, CM_SDMA_STATICDEP_RESTORE, CM_TESLA_STATICDEP
+ * CM_SDMA_STATICDEP, CM_TESLA_STATICDEP
*/
#define OMAP4430_L3_1_STATDEP_SHIFT 5
#define OMAP4430_L3_1_STATDEP_MASK (1 << 5)
/*
- * Used by CM_CAM_DYNAMICDEP, CM_D2D_DYNAMICDEP, CM_D2D_DYNAMICDEP_RESTORE,
- * CM_DUCATI_DYNAMICDEP, CM_EMU_DYNAMICDEP, CM_GFX_DYNAMICDEP,
- * CM_IVAHD_DYNAMICDEP, CM_L3INIT_DYNAMICDEP, CM_L3_1_DYNAMICDEP,
- * CM_L3_1_DYNAMICDEP_RESTORE, CM_L4CFG_DYNAMICDEP,
- * CM_L4CFG_DYNAMICDEP_RESTORE, CM_L4SEC_DYNAMICDEP, CM_SDMA_DYNAMICDEP
+ * Used by CM_CAM_DYNAMICDEP, CM_D2D_DYNAMICDEP, CM_DUCATI_DYNAMICDEP,
+ * CM_EMU_DYNAMICDEP, CM_GFX_DYNAMICDEP, CM_IVAHD_DYNAMICDEP,
+ * CM_L3INIT_DYNAMICDEP, CM_L3_1_DYNAMICDEP, CM_L4CFG_DYNAMICDEP,
+ * CM_L4SEC_DYNAMICDEP, CM_SDMA_DYNAMICDEP
*/
#define OMAP4430_L3_2_DYNDEP_SHIFT 6
#define OMAP4430_L3_2_DYNDEP_MASK (1 << 6)
/*
- * Used by CM_CAM_STATICDEP, CM_D2D_STATICDEP, CM_D2D_STATICDEP_RESTORE,
- * CM_DSS_STATICDEP, CM_DUCATI_STATICDEP, CM_GFX_STATICDEP, CM_IVAHD_STATICDEP,
+ * Used by CM_CAM_STATICDEP, CM_D2D_STATICDEP, CM_DSS_STATICDEP,
+ * CM_DUCATI_STATICDEP, CM_GFX_STATICDEP, CM_IVAHD_STATICDEP,
* CM_L3INIT_STATICDEP, CM_L4SEC_STATICDEP, CM_MPU_STATICDEP,
- * CM_SDMA_STATICDEP, CM_SDMA_STATICDEP_RESTORE, CM_TESLA_STATICDEP
+ * CM_SDMA_STATICDEP, CM_TESLA_STATICDEP
*/
#define OMAP4430_L3_2_STATDEP_SHIFT 6
#define OMAP4430_L3_2_STATDEP_MASK (1 << 6)
-/* Used by CM_L3_1_DYNAMICDEP, CM_L3_1_DYNAMICDEP_RESTORE */
+/* Used by CM_L3_1_DYNAMICDEP */
#define OMAP4430_L4CFG_DYNDEP_SHIFT 12
#define OMAP4430_L4CFG_DYNDEP_MASK (1 << 12)
/*
- * Used by CM_D2D_STATICDEP, CM_D2D_STATICDEP_RESTORE, CM_DUCATI_STATICDEP,
- * CM_L3INIT_STATICDEP, CM_MPU_STATICDEP, CM_SDMA_STATICDEP,
- * CM_SDMA_STATICDEP_RESTORE, CM_TESLA_STATICDEP
+ * Used by CM_D2D_STATICDEP, CM_DUCATI_STATICDEP, CM_L3INIT_STATICDEP,
+ * CM_MPU_STATICDEP, CM_SDMA_STATICDEP, CM_TESLA_STATICDEP
*/
#define OMAP4430_L4CFG_STATDEP_SHIFT 12
#define OMAP4430_L4CFG_STATDEP_MASK (1 << 12)
-/* Used by CM_L3_2_DYNAMICDEP, CM_L3_2_DYNAMICDEP_RESTORE */
+/* Used by CM_L3_2_DYNAMICDEP */
#define OMAP4430_L4PER_DYNDEP_SHIFT 13
#define OMAP4430_L4PER_DYNDEP_MASK (1 << 13)
/*
- * Used by CM_D2D_STATICDEP, CM_D2D_STATICDEP_RESTORE, CM_DUCATI_STATICDEP,
- * CM_L3INIT_STATICDEP, CM_L4SEC_STATICDEP, CM_MPU_STATICDEP,
- * CM_SDMA_STATICDEP, CM_SDMA_STATICDEP_RESTORE, CM_TESLA_STATICDEP
+ * Used by CM_D2D_STATICDEP, CM_DUCATI_STATICDEP, CM_L3INIT_STATICDEP,
+ * CM_L4SEC_STATICDEP, CM_MPU_STATICDEP, CM_SDMA_STATICDEP, CM_TESLA_STATICDEP
*/
#define OMAP4430_L4PER_STATDEP_SHIFT 13
#define OMAP4430_L4PER_STATDEP_MASK (1 << 13)
-/*
- * Used by CM_L3_2_DYNAMICDEP, CM_L3_2_DYNAMICDEP_RESTORE, CM_L4PER_DYNAMICDEP,
- * CM_L4PER_DYNAMICDEP_RESTORE
- */
+/* Used by CM_L3_2_DYNAMICDEP, CM_L4PER_DYNAMICDEP */
#define OMAP4430_L4SEC_DYNDEP_SHIFT 14
#define OMAP4430_L4SEC_DYNDEP_MASK (1 << 14)
/*
* Used by CM_DUCATI_STATICDEP, CM_L3INIT_STATICDEP, CM_MPU_STATICDEP,
- * CM_SDMA_STATICDEP, CM_SDMA_STATICDEP_RESTORE
+ * CM_SDMA_STATICDEP
*/
#define OMAP4430_L4SEC_STATDEP_SHIFT 14
#define OMAP4430_L4SEC_STATDEP_MASK (1 << 14)
-/* Used by CM_L4CFG_DYNAMICDEP, CM_L4CFG_DYNAMICDEP_RESTORE */
+/* Used by CM_L4CFG_DYNAMICDEP */
#define OMAP4430_L4WKUP_DYNDEP_SHIFT 15
#define OMAP4430_L4WKUP_DYNDEP_MASK (1 << 15)
/*
* Used by CM_DUCATI_STATICDEP, CM_L3INIT_STATICDEP, CM_MPU_STATICDEP,
- * CM_SDMA_STATICDEP, CM_SDMA_STATICDEP_RESTORE, CM_TESLA_STATICDEP
+ * CM_SDMA_STATICDEP, CM_TESLA_STATICDEP
*/
#define OMAP4430_L4WKUP_STATDEP_SHIFT 15
#define OMAP4430_L4WKUP_STATDEP_MASK (1 << 15)
/*
- * Used by CM_D2D_DYNAMICDEP, CM_D2D_DYNAMICDEP_RESTORE, CM_L3_1_DYNAMICDEP,
- * CM_L3_1_DYNAMICDEP_RESTORE, CM_L4CFG_DYNAMICDEP,
- * CM_L4CFG_DYNAMICDEP_RESTORE, CM_MPU_DYNAMICDEP
+ * Used by CM_D2D_DYNAMICDEP, CM_L3_1_DYNAMICDEP, CM_L4CFG_DYNAMICDEP,
+ * CM_MPU_DYNAMICDEP
*/
#define OMAP4430_MEMIF_DYNDEP_SHIFT 4
#define OMAP4430_MEMIF_DYNDEP_MASK (1 << 4)
/*
- * Used by CM_CAM_STATICDEP, CM_D2D_STATICDEP, CM_D2D_STATICDEP_RESTORE,
- * CM_DSS_STATICDEP, CM_DUCATI_STATICDEP, CM_GFX_STATICDEP, CM_IVAHD_STATICDEP,
+ * Used by CM_CAM_STATICDEP, CM_D2D_STATICDEP, CM_DSS_STATICDEP,
+ * CM_DUCATI_STATICDEP, CM_GFX_STATICDEP, CM_IVAHD_STATICDEP,
* CM_L3INIT_STATICDEP, CM_L4SEC_STATICDEP, CM_MPU_STATICDEP,
- * CM_SDMA_STATICDEP, CM_SDMA_STATICDEP_RESTORE, CM_TESLA_STATICDEP
+ * CM_SDMA_STATICDEP, CM_TESLA_STATICDEP
*/
#define OMAP4430_MEMIF_STATDEP_SHIFT 4
#define OMAP4430_MEMIF_STATDEP_MASK (1 << 4)
/*
* Used by CM_SSC_MODFREQDIV_DPLL_ABE, CM_SSC_MODFREQDIV_DPLL_CORE,
- * CM_SSC_MODFREQDIV_DPLL_CORE_RESTORE, CM_SSC_MODFREQDIV_DPLL_DDRPHY,
- * CM_SSC_MODFREQDIV_DPLL_IVA, CM_SSC_MODFREQDIV_DPLL_MPU,
- * CM_SSC_MODFREQDIV_DPLL_PER, CM_SSC_MODFREQDIV_DPLL_UNIPRO,
- * CM_SSC_MODFREQDIV_DPLL_USB
+ * CM_SSC_MODFREQDIV_DPLL_DDRPHY, CM_SSC_MODFREQDIV_DPLL_IVA,
+ * CM_SSC_MODFREQDIV_DPLL_MPU, CM_SSC_MODFREQDIV_DPLL_PER,
+ * CM_SSC_MODFREQDIV_DPLL_UNIPRO, CM_SSC_MODFREQDIV_DPLL_USB
*/
#define OMAP4430_MODFREQDIV_EXPONENT_SHIFT 8
#define OMAP4430_MODFREQDIV_EXPONENT_MASK (0x7 << 8)
/*
* Used by CM_SSC_MODFREQDIV_DPLL_ABE, CM_SSC_MODFREQDIV_DPLL_CORE,
- * CM_SSC_MODFREQDIV_DPLL_CORE_RESTORE, CM_SSC_MODFREQDIV_DPLL_DDRPHY,
- * CM_SSC_MODFREQDIV_DPLL_IVA, CM_SSC_MODFREQDIV_DPLL_MPU,
- * CM_SSC_MODFREQDIV_DPLL_PER, CM_SSC_MODFREQDIV_DPLL_UNIPRO,
- * CM_SSC_MODFREQDIV_DPLL_USB
+ * CM_SSC_MODFREQDIV_DPLL_DDRPHY, CM_SSC_MODFREQDIV_DPLL_IVA,
+ * CM_SSC_MODFREQDIV_DPLL_MPU, CM_SSC_MODFREQDIV_DPLL_PER,
+ * CM_SSC_MODFREQDIV_DPLL_UNIPRO, CM_SSC_MODFREQDIV_DPLL_USB
*/
#define OMAP4430_MODFREQDIV_MANTISSA_SHIFT 0
#define OMAP4430_MODFREQDIV_MANTISSA_MASK (0x7f << 0)
@@ -1155,8 +1064,7 @@
* CM1_ABE_TIMER8_CLKCTRL, CM1_ABE_WDT3_CLKCTRL, CM_ALWON_MDMINTC_CLKCTRL,
* CM_ALWON_SR_CORE_CLKCTRL, CM_ALWON_SR_IVA_CLKCTRL, CM_ALWON_SR_MPU_CLKCTRL,
* CM_CAM_FDIF_CLKCTRL, CM_CAM_ISS_CLKCTRL, CM_CEFUSE_CEFUSE_CLKCTRL,
- * CM_CM1_PROFILING_CLKCTRL, CM_CM1_PROFILING_CLKCTRL_RESTORE,
- * CM_CM2_PROFILING_CLKCTRL, CM_CM2_PROFILING_CLKCTRL_RESTORE,
+ * CM_CM1_PROFILING_CLKCTRL, CM_CM2_PROFILING_CLKCTRL,
* CM_D2D_MODEM_ICR_CLKCTRL, CM_D2D_SAD2D_CLKCTRL, CM_D2D_SAD2D_FW_CLKCTRL,
* CM_DSS_DEISS_CLKCTRL, CM_DSS_DSS_CLKCTRL, CM_DUCATI_DUCATI_CLKCTRL,
* CM_EMU_DEBUGSS_CLKCTRL, CM_GFX_GFX_CLKCTRL, CM_IVAHD_IVAHD_CLKCTRL,
@@ -1165,30 +1073,24 @@
* CM_L3INIT_MMC6_CLKCTRL, CM_L3INIT_P1500_CLKCTRL, CM_L3INIT_PCIESS_CLKCTRL,
* CM_L3INIT_SATA_CLKCTRL, CM_L3INIT_TPPSS_CLKCTRL, CM_L3INIT_UNIPRO1_CLKCTRL,
* CM_L3INIT_USBPHYOCP2SCP_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL,
- * CM_L3INIT_USB_HOST_CLKCTRL_RESTORE, CM_L3INIT_USB_HOST_FS_CLKCTRL,
- * CM_L3INIT_USB_OTG_CLKCTRL, CM_L3INIT_USB_TLL_CLKCTRL,
- * CM_L3INIT_USB_TLL_CLKCTRL_RESTORE, CM_L3INIT_XHPI_CLKCTRL,
- * CM_L3INSTR_L3_3_CLKCTRL, CM_L3INSTR_L3_3_CLKCTRL_RESTORE,
- * CM_L3INSTR_L3_INSTR_CLKCTRL, CM_L3INSTR_L3_INSTR_CLKCTRL_RESTORE,
- * CM_L3INSTR_OCP_WP1_CLKCTRL, CM_L3INSTR_OCP_WP1_CLKCTRL_RESTORE,
+ * CM_L3INIT_USB_HOST_FS_CLKCTRL, CM_L3INIT_USB_OTG_CLKCTRL,
+ * CM_L3INIT_USB_TLL_CLKCTRL, CM_L3INIT_XHPI_CLKCTRL, CM_L3INSTR_L3_3_CLKCTRL,
+ * CM_L3INSTR_L3_INSTR_CLKCTRL, CM_L3INSTR_OCP_WP1_CLKCTRL,
* CM_L3_1_L3_1_CLKCTRL, CM_L3_2_GPMC_CLKCTRL, CM_L3_2_L3_2_CLKCTRL,
* CM_L3_2_OCMC_RAM_CLKCTRL, CM_L4CFG_HW_SEM_CLKCTRL, CM_L4CFG_L4_CFG_CLKCTRL,
* CM_L4CFG_MAILBOX_CLKCTRL, CM_L4CFG_SAR_ROM_CLKCTRL, CM_L4PER_ADC_CLKCTRL,
* CM_L4PER_DMTIMER10_CLKCTRL, CM_L4PER_DMTIMER11_CLKCTRL,
* CM_L4PER_DMTIMER2_CLKCTRL, CM_L4PER_DMTIMER3_CLKCTRL,
* CM_L4PER_DMTIMER4_CLKCTRL, CM_L4PER_DMTIMER9_CLKCTRL, CM_L4PER_ELM_CLKCTRL,
- * CM_L4PER_GPIO2_CLKCTRL, CM_L4PER_GPIO2_CLKCTRL_RESTORE,
- * CM_L4PER_GPIO3_CLKCTRL, CM_L4PER_GPIO3_CLKCTRL_RESTORE,
- * CM_L4PER_GPIO4_CLKCTRL, CM_L4PER_GPIO4_CLKCTRL_RESTORE,
- * CM_L4PER_GPIO5_CLKCTRL, CM_L4PER_GPIO5_CLKCTRL_RESTORE,
- * CM_L4PER_GPIO6_CLKCTRL, CM_L4PER_GPIO6_CLKCTRL_RESTORE,
- * CM_L4PER_HDQ1W_CLKCTRL, CM_L4PER_HECC1_CLKCTRL, CM_L4PER_HECC2_CLKCTRL,
- * CM_L4PER_I2C1_CLKCTRL, CM_L4PER_I2C2_CLKCTRL, CM_L4PER_I2C3_CLKCTRL,
- * CM_L4PER_I2C4_CLKCTRL, CM_L4PER_I2C5_CLKCTRL, CM_L4PER_L4PER_CLKCTRL,
- * CM_L4PER_MCASP2_CLKCTRL, CM_L4PER_MCASP3_CLKCTRL, CM_L4PER_MCBSP4_CLKCTRL,
- * CM_L4PER_MCSPI1_CLKCTRL, CM_L4PER_MCSPI2_CLKCTRL, CM_L4PER_MCSPI3_CLKCTRL,
- * CM_L4PER_MCSPI4_CLKCTRL, CM_L4PER_MGATE_CLKCTRL, CM_L4PER_MMCSD3_CLKCTRL,
- * CM_L4PER_MMCSD4_CLKCTRL, CM_L4PER_MMCSD5_CLKCTRL, CM_L4PER_MSPROHG_CLKCTRL,
+ * CM_L4PER_GPIO2_CLKCTRL, CM_L4PER_GPIO3_CLKCTRL, CM_L4PER_GPIO4_CLKCTRL,
+ * CM_L4PER_GPIO5_CLKCTRL, CM_L4PER_GPIO6_CLKCTRL, CM_L4PER_HDQ1W_CLKCTRL,
+ * CM_L4PER_HECC1_CLKCTRL, CM_L4PER_HECC2_CLKCTRL, CM_L4PER_I2C1_CLKCTRL,
+ * CM_L4PER_I2C2_CLKCTRL, CM_L4PER_I2C3_CLKCTRL, CM_L4PER_I2C4_CLKCTRL,
+ * CM_L4PER_I2C5_CLKCTRL, CM_L4PER_L4PER_CLKCTRL, CM_L4PER_MCASP2_CLKCTRL,
+ * CM_L4PER_MCASP3_CLKCTRL, CM_L4PER_MCBSP4_CLKCTRL, CM_L4PER_MCSPI1_CLKCTRL,
+ * CM_L4PER_MCSPI2_CLKCTRL, CM_L4PER_MCSPI3_CLKCTRL, CM_L4PER_MCSPI4_CLKCTRL,
+ * CM_L4PER_MGATE_CLKCTRL, CM_L4PER_MMCSD3_CLKCTRL, CM_L4PER_MMCSD4_CLKCTRL,
+ * CM_L4PER_MMCSD5_CLKCTRL, CM_L4PER_MSPROHG_CLKCTRL,
* CM_L4PER_SLIMBUS2_CLKCTRL, CM_L4PER_UART1_CLKCTRL, CM_L4PER_UART2_CLKCTRL,
* CM_L4PER_UART3_CLKCTRL, CM_L4PER_UART4_CLKCTRL, CM_L4SEC_AES1_CLKCTRL,
* CM_L4SEC_AES2_CLKCTRL, CM_L4SEC_CRYPTODMA_CLKCTRL, CM_L4SEC_DES3DES_CLKCTRL,
@@ -1221,11 +1123,9 @@
#define OMAP4430_OPTFCLKEN_CTRLCLK_MASK (1 << 8)
/*
- * Used by CM_L4PER_GPIO2_CLKCTRL, CM_L4PER_GPIO2_CLKCTRL_RESTORE,
- * CM_L4PER_GPIO3_CLKCTRL, CM_L4PER_GPIO3_CLKCTRL_RESTORE,
- * CM_L4PER_GPIO4_CLKCTRL, CM_L4PER_GPIO4_CLKCTRL_RESTORE,
- * CM_L4PER_GPIO5_CLKCTRL, CM_L4PER_GPIO5_CLKCTRL_RESTORE,
- * CM_L4PER_GPIO6_CLKCTRL, CM_L4PER_GPIO6_CLKCTRL_RESTORE, CM_WKUP_GPIO1_CLKCTRL
+ * Used by CM_L4PER_GPIO2_CLKCTRL, CM_L4PER_GPIO3_CLKCTRL,
+ * CM_L4PER_GPIO4_CLKCTRL, CM_L4PER_GPIO5_CLKCTRL, CM_L4PER_GPIO6_CLKCTRL,
+ * CM_WKUP_GPIO1_CLKCTRL
*/
#define OMAP4430_OPTFCLKEN_DBCLK_SHIFT 8
#define OMAP4430_OPTFCLKEN_DBCLK_MASK (1 << 8)
@@ -1254,23 +1154,23 @@
#define OMAP4430_OPTFCLKEN_FCLK2_SHIFT 10
#define OMAP4430_OPTFCLKEN_FCLK2_MASK (1 << 10)
-/* Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE */
+/* Used by CM_L3INIT_USB_HOST_CLKCTRL */
#define OMAP4430_OPTFCLKEN_FUNC48MCLK_SHIFT 15
#define OMAP4430_OPTFCLKEN_FUNC48MCLK_MASK (1 << 15)
-/* Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE */
+/* Used by CM_L3INIT_USB_HOST_CLKCTRL */
#define OMAP4430_OPTFCLKEN_HSIC480M_P1_CLK_SHIFT 13
#define OMAP4430_OPTFCLKEN_HSIC480M_P1_CLK_MASK (1 << 13)
-/* Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE */
+/* Used by CM_L3INIT_USB_HOST_CLKCTRL */
#define OMAP4430_OPTFCLKEN_HSIC480M_P2_CLK_SHIFT 14
#define OMAP4430_OPTFCLKEN_HSIC480M_P2_CLK_MASK (1 << 14)
-/* Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE */
+/* Used by CM_L3INIT_USB_HOST_CLKCTRL */
#define OMAP4430_OPTFCLKEN_HSIC60M_P1_CLK_SHIFT 11
#define OMAP4430_OPTFCLKEN_HSIC60M_P1_CLK_MASK (1 << 11)
-/* Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE */
+/* Used by CM_L3INIT_USB_HOST_CLKCTRL */
#define OMAP4430_OPTFCLKEN_HSIC60M_P2_CLK_SHIFT 12
#define OMAP4430_OPTFCLKEN_HSIC60M_P2_CLK_MASK (1 << 12)
@@ -1306,27 +1206,27 @@
#define OMAP4430_OPTFCLKEN_TXPHYCLK_SHIFT 8
#define OMAP4430_OPTFCLKEN_TXPHYCLK_MASK (1 << 8)
-/* Used by CM_L3INIT_USB_TLL_CLKCTRL, CM_L3INIT_USB_TLL_CLKCTRL_RESTORE */
+/* Used by CM_L3INIT_USB_TLL_CLKCTRL */
#define OMAP4430_OPTFCLKEN_USB_CH0_CLK_SHIFT 8
#define OMAP4430_OPTFCLKEN_USB_CH0_CLK_MASK (1 << 8)
-/* Used by CM_L3INIT_USB_TLL_CLKCTRL, CM_L3INIT_USB_TLL_CLKCTRL_RESTORE */
+/* Used by CM_L3INIT_USB_TLL_CLKCTRL */
#define OMAP4430_OPTFCLKEN_USB_CH1_CLK_SHIFT 9
#define OMAP4430_OPTFCLKEN_USB_CH1_CLK_MASK (1 << 9)
-/* Used by CM_L3INIT_USB_TLL_CLKCTRL, CM_L3INIT_USB_TLL_CLKCTRL_RESTORE */
+/* Used by CM_L3INIT_USB_TLL_CLKCTRL */
#define OMAP4430_OPTFCLKEN_USB_CH2_CLK_SHIFT 10
#define OMAP4430_OPTFCLKEN_USB_CH2_CLK_MASK (1 << 10)
-/* Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE */
+/* Used by CM_L3INIT_USB_HOST_CLKCTRL */
#define OMAP4430_OPTFCLKEN_UTMI_P1_CLK_SHIFT 8
#define OMAP4430_OPTFCLKEN_UTMI_P1_CLK_MASK (1 << 8)
-/* Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE */
+/* Used by CM_L3INIT_USB_HOST_CLKCTRL */
#define OMAP4430_OPTFCLKEN_UTMI_P2_CLK_SHIFT 9
#define OMAP4430_OPTFCLKEN_UTMI_P2_CLK_MASK (1 << 9)
-/* Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE */
+/* Used by CM_L3INIT_USB_HOST_CLKCTRL */
#define OMAP4430_OPTFCLKEN_UTMI_P3_CLK_SHIFT 10
#define OMAP4430_OPTFCLKEN_UTMI_P3_CLK_MASK (1 << 10)
@@ -1374,7 +1274,7 @@
#define OMAP4430_PMD_TRACE_MUX_CTRL_SHIFT 22
#define OMAP4430_PMD_TRACE_MUX_CTRL_MASK (0x3 << 22)
-/* Used by CM_DYN_DEP_PRESCAL, CM_DYN_DEP_PRESCAL_RESTORE */
+/* Used by CM_DYN_DEP_PRESCAL */
#define OMAP4430_PRESCAL_SHIFT 0
#define OMAP4430_PRESCAL_MASK (0x3f << 0)
@@ -1382,10 +1282,7 @@
#define OMAP4430_R_RTL_SHIFT 11
#define OMAP4430_R_RTL_MASK (0x1f << 11)
-/*
- * Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE,
- * CM_L3INIT_USB_TLL_CLKCTRL, CM_L3INIT_USB_TLL_CLKCTRL_RESTORE
- */
+/* Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_TLL_CLKCTRL */
#define OMAP4430_SAR_MODE_SHIFT 4
#define OMAP4430_SAR_MODE_MASK (1 << 4)
@@ -1397,7 +1294,7 @@
#define OMAP4430_SCHEME_SHIFT 30
#define OMAP4430_SCHEME_MASK (0x3 << 30)
-/* Used by CM_L4CFG_DYNAMICDEP, CM_L4CFG_DYNAMICDEP_RESTORE */
+/* Used by CM_L4CFG_DYNAMICDEP */
#define OMAP4430_SDMA_DYNDEP_SHIFT 11
#define OMAP4430_SDMA_DYNDEP_MASK (1 << 11)
@@ -1417,10 +1314,10 @@
* CM_L3INIT_HSI_CLKCTRL, CM_L3INIT_MMC1_CLKCTRL, CM_L3INIT_MMC2_CLKCTRL,
* CM_L3INIT_MMC6_CLKCTRL, CM_L3INIT_P1500_CLKCTRL, CM_L3INIT_PCIESS_CLKCTRL,
* CM_L3INIT_SATA_CLKCTRL, CM_L3INIT_TPPSS_CLKCTRL, CM_L3INIT_UNIPRO1_CLKCTRL,
- * CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE,
- * CM_L3INIT_USB_HOST_FS_CLKCTRL, CM_L3INIT_USB_OTG_CLKCTRL,
- * CM_L3INIT_XHPI_CLKCTRL, CM_L4SEC_CRYPTODMA_CLKCTRL, CM_MPU_MPU_CLKCTRL,
- * CM_SDMA_SDMA_CLKCTRL, CM_TESLA_TESLA_CLKCTRL
+ * CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_FS_CLKCTRL,
+ * CM_L3INIT_USB_OTG_CLKCTRL, CM_L3INIT_XHPI_CLKCTRL,
+ * CM_L4SEC_CRYPTODMA_CLKCTRL, CM_MPU_MPU_CLKCTRL, CM_SDMA_SDMA_CLKCTRL,
+ * CM_TESLA_TESLA_CLKCTRL
*/
#define OMAP4430_STBYST_SHIFT 18
#define OMAP4430_STBYST_MASK (1 << 18)
@@ -1438,17 +1335,13 @@
#define OMAP4430_ST_DPLL_CLKDCOLDO_MASK (1 << 9)
/*
- * Used by CM_DIV_M2_DPLL_ABE, CM_DIV_M2_DPLL_CORE,
- * CM_DIV_M2_DPLL_CORE_RESTORE, CM_DIV_M2_DPLL_DDRPHY, CM_DIV_M2_DPLL_MPU,
- * CM_DIV_M2_DPLL_PER, CM_DIV_M2_DPLL_USB
+ * Used by CM_DIV_M2_DPLL_ABE, CM_DIV_M2_DPLL_CORE, CM_DIV_M2_DPLL_DDRPHY,
+ * CM_DIV_M2_DPLL_MPU, CM_DIV_M2_DPLL_PER, CM_DIV_M2_DPLL_USB
*/
#define OMAP4430_ST_DPLL_CLKOUT_SHIFT 9
#define OMAP4430_ST_DPLL_CLKOUT_MASK (1 << 9)
-/*
- * Used by CM_DIV_M3_DPLL_ABE, CM_DIV_M3_DPLL_CORE,
- * CM_DIV_M3_DPLL_CORE_RESTORE, CM_DIV_M3_DPLL_PER
- */
+/* Used by CM_DIV_M3_DPLL_ABE, CM_DIV_M3_DPLL_CORE, CM_DIV_M3_DPLL_PER */
#define OMAP4430_ST_DPLL_CLKOUTHIF_SHIFT 9
#define OMAP4430_ST_DPLL_CLKOUTHIF_MASK (1 << 9)
@@ -1457,30 +1350,24 @@
#define OMAP4430_ST_DPLL_CLKOUTX2_MASK (1 << 11)
/*
- * Used by CM_DIV_M4_DPLL_CORE, CM_DIV_M4_DPLL_CORE_RESTORE,
- * CM_DIV_M4_DPLL_DDRPHY, CM_DIV_M4_DPLL_IVA, CM_DIV_M4_DPLL_PER
+ * Used by CM_DIV_M4_DPLL_CORE, CM_DIV_M4_DPLL_DDRPHY, CM_DIV_M4_DPLL_IVA,
+ * CM_DIV_M4_DPLL_PER
*/
#define OMAP4430_ST_HSDIVIDER_CLKOUT1_SHIFT 9
#define OMAP4430_ST_HSDIVIDER_CLKOUT1_MASK (1 << 9)
/*
- * Used by CM_DIV_M5_DPLL_CORE, CM_DIV_M5_DPLL_CORE_RESTORE,
- * CM_DIV_M5_DPLL_DDRPHY, CM_DIV_M5_DPLL_IVA, CM_DIV_M5_DPLL_PER
+ * Used by CM_DIV_M5_DPLL_CORE, CM_DIV_M5_DPLL_DDRPHY, CM_DIV_M5_DPLL_IVA,
+ * CM_DIV_M5_DPLL_PER
*/
#define OMAP4430_ST_HSDIVIDER_CLKOUT2_SHIFT 9
#define OMAP4430_ST_HSDIVIDER_CLKOUT2_MASK (1 << 9)
-/*
- * Used by CM_DIV_M6_DPLL_CORE, CM_DIV_M6_DPLL_CORE_RESTORE,
- * CM_DIV_M6_DPLL_DDRPHY, CM_DIV_M6_DPLL_PER
- */
+/* Used by CM_DIV_M6_DPLL_CORE, CM_DIV_M6_DPLL_DDRPHY, CM_DIV_M6_DPLL_PER */
#define OMAP4430_ST_HSDIVIDER_CLKOUT3_SHIFT 9
#define OMAP4430_ST_HSDIVIDER_CLKOUT3_MASK (1 << 9)
-/*
- * Used by CM_DIV_M7_DPLL_CORE, CM_DIV_M7_DPLL_CORE_RESTORE,
- * CM_DIV_M7_DPLL_PER
- */
+/* Used by CM_DIV_M7_DPLL_CORE, CM_DIV_M7_DPLL_PER */
#define OMAP4430_ST_HSDIVIDER_CLKOUT4_SHIFT 9
#define OMAP4430_ST_HSDIVIDER_CLKOUT4_MASK (1 << 9)
@@ -1496,7 +1383,7 @@
#define OMAP4430_SYS_CLKSEL_SHIFT 0
#define OMAP4430_SYS_CLKSEL_MASK (0x7 << 0)
-/* Used by CM_L4CFG_DYNAMICDEP, CM_L4CFG_DYNAMICDEP_RESTORE */
+/* Used by CM_L4CFG_DYNAMICDEP */
#define OMAP4430_TESLA_DYNDEP_SHIFT 1
#define OMAP4430_TESLA_DYNDEP_MASK (1 << 1)
@@ -1505,11 +1392,9 @@
#define OMAP4430_TESLA_STATDEP_MASK (1 << 1)
/*
- * Used by CM_D2D_DYNAMICDEP, CM_D2D_DYNAMICDEP_RESTORE, CM_DUCATI_DYNAMICDEP,
- * CM_EMU_DYNAMICDEP, CM_L3_1_DYNAMICDEP, CM_L3_1_DYNAMICDEP_RESTORE,
- * CM_L3_2_DYNAMICDEP, CM_L3_2_DYNAMICDEP_RESTORE, CM_L4CFG_DYNAMICDEP,
- * CM_L4CFG_DYNAMICDEP_RESTORE, CM_L4PER_DYNAMICDEP,
- * CM_L4PER_DYNAMICDEP_RESTORE, CM_MPU_DYNAMICDEP, CM_TESLA_DYNAMICDEP
+ * Used by CM_D2D_DYNAMICDEP, CM_DUCATI_DYNAMICDEP, CM_EMU_DYNAMICDEP,
+ * CM_L3_1_DYNAMICDEP, CM_L3_2_DYNAMICDEP, CM_L4CFG_DYNAMICDEP,
+ * CM_L4PER_DYNAMICDEP, CM_MPU_DYNAMICDEP, CM_TESLA_DYNAMICDEP
*/
#define OMAP4430_WINDOWSIZE_SHIFT 24
#define OMAP4430_WINDOWSIZE_MASK (0xf << 24)
diff --git a/arch/arm/mach-omap2/cm1_44xx.h b/arch/arm/mach-omap2/cm1_44xx.h
index e2d7a56b2ad6..1bc00dc4876c 100644
--- a/arch/arm/mach-omap2/cm1_44xx.h
+++ b/arch/arm/mach-omap2/cm1_44xx.h
@@ -1,7 +1,7 @@
/*
* OMAP44xx CM1 instance offset macros
*
- * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ * Copyright (C) 2009-2011 Texas Instruments, Inc.
* Copyright (C) 2009-2010 Nokia Corporation
*
* Paul Walmsley (paul@pwsan.com)
@@ -41,9 +41,9 @@
#define OMAP4430_CM1_INSTR_INST 0x0f00
/* CM1 clockdomain register offsets (from instance start) */
-#define OMAP4430_CM1_ABE_ABE_CDOFFS 0x0000
-#define OMAP4430_CM1_MPU_MPU_CDOFFS 0x0000
-#define OMAP4430_CM1_TESLA_TESLA_CDOFFS 0x0000
+#define OMAP4430_CM1_MPU_MPU_CDOFFS 0x0000
+#define OMAP4430_CM1_TESLA_TESLA_CDOFFS 0x0000
+#define OMAP4430_CM1_ABE_ABE_CDOFFS 0x0000
/* CM1 */
@@ -82,8 +82,8 @@
#define OMAP4430_CM_DIV_M7_DPLL_CORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0044)
#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_CORE_OFFSET 0x0048
#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_CORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0048)
-#define OMAP4_CM_SSC_INSTFREQDIV_DPLL_CORE_OFFSET 0x004c
-#define OMAP4430_CM_SSC_INSTFREQDIV_DPLL_CORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x004c)
+#define OMAP4_CM_SSC_MODFREQDIV_DPLL_CORE_OFFSET 0x004c
+#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_CORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x004c)
#define OMAP4_CM_EMU_OVERRIDE_DPLL_CORE_OFFSET 0x0050
#define OMAP4430_CM_EMU_OVERRIDE_DPLL_CORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0050)
#define OMAP4_CM_CLKMODE_DPLL_MPU_OFFSET 0x0060
@@ -98,8 +98,8 @@
#define OMAP4430_CM_DIV_M2_DPLL_MPU OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0070)
#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_MPU_OFFSET 0x0088
#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_MPU OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0088)
-#define OMAP4_CM_SSC_INSTFREQDIV_DPLL_MPU_OFFSET 0x008c
-#define OMAP4430_CM_SSC_INSTFREQDIV_DPLL_MPU OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x008c)
+#define OMAP4_CM_SSC_MODFREQDIV_DPLL_MPU_OFFSET 0x008c
+#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_MPU OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x008c)
#define OMAP4_CM_BYPCLK_DPLL_MPU_OFFSET 0x009c
#define OMAP4430_CM_BYPCLK_DPLL_MPU OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x009c)
#define OMAP4_CM_CLKMODE_DPLL_IVA_OFFSET 0x00a0
@@ -116,8 +116,8 @@
#define OMAP4430_CM_DIV_M5_DPLL_IVA OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x00bc)
#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_IVA_OFFSET 0x00c8
#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_IVA OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x00c8)
-#define OMAP4_CM_SSC_INSTFREQDIV_DPLL_IVA_OFFSET 0x00cc
-#define OMAP4430_CM_SSC_INSTFREQDIV_DPLL_IVA OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x00cc)
+#define OMAP4_CM_SSC_MODFREQDIV_DPLL_IVA_OFFSET 0x00cc
+#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_IVA OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x00cc)
#define OMAP4_CM_BYPCLK_DPLL_IVA_OFFSET 0x00dc
#define OMAP4430_CM_BYPCLK_DPLL_IVA OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x00dc)
#define OMAP4_CM_CLKMODE_DPLL_ABE_OFFSET 0x00e0
@@ -134,8 +134,8 @@
#define OMAP4430_CM_DIV_M3_DPLL_ABE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x00f4)
#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_ABE_OFFSET 0x0108
#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_ABE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0108)
-#define OMAP4_CM_SSC_INSTFREQDIV_DPLL_ABE_OFFSET 0x010c
-#define OMAP4430_CM_SSC_INSTFREQDIV_DPLL_ABE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x010c)
+#define OMAP4_CM_SSC_MODFREQDIV_DPLL_ABE_OFFSET 0x010c
+#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_ABE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x010c)
#define OMAP4_CM_CLKMODE_DPLL_DDRPHY_OFFSET 0x0120
#define OMAP4430_CM_CLKMODE_DPLL_DDRPHY OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0120)
#define OMAP4_CM_IDLEST_DPLL_DDRPHY_OFFSET 0x0124
@@ -154,8 +154,8 @@
#define OMAP4430_CM_DIV_M6_DPLL_DDRPHY OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0140)
#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_DDRPHY_OFFSET 0x0148
#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_DDRPHY OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0148)
-#define OMAP4_CM_SSC_INSTFREQDIV_DPLL_DDRPHY_OFFSET 0x014c
-#define OMAP4430_CM_SSC_INSTFREQDIV_DPLL_DDRPHY OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x014c)
+#define OMAP4_CM_SSC_MODFREQDIV_DPLL_DDRPHY_OFFSET 0x014c
+#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_DDRPHY OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x014c)
#define OMAP4_CM_SHADOW_FREQ_CONFIG1_OFFSET 0x0160
#define OMAP4430_CM_SHADOW_FREQ_CONFIG1 OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0160)
#define OMAP4_CM_SHADOW_FREQ_CONFIG2_OFFSET 0x0164
@@ -217,42 +217,6 @@
#define OMAP4_CM1_ABE_WDT3_CLKCTRL_OFFSET 0x0088
#define OMAP4430_CM1_ABE_WDT3_CLKCTRL OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_INST, 0x0088)
-/* CM1.RESTORE_CM1 register offsets */
-#define OMAP4_CM_CLKSEL_CORE_RESTORE_OFFSET 0x0000
-#define OMAP4430_CM_CLKSEL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x0000)
-#define OMAP4_CM_DIV_M2_DPLL_CORE_RESTORE_OFFSET 0x0004
-#define OMAP4430_CM_DIV_M2_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x0004)
-#define OMAP4_CM_DIV_M3_DPLL_CORE_RESTORE_OFFSET 0x0008
-#define OMAP4430_CM_DIV_M3_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x0008)
-#define OMAP4_CM_DIV_M4_DPLL_CORE_RESTORE_OFFSET 0x000c
-#define OMAP4430_CM_DIV_M4_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x000c)
-#define OMAP4_CM_DIV_M5_DPLL_CORE_RESTORE_OFFSET 0x0010
-#define OMAP4430_CM_DIV_M5_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x0010)
-#define OMAP4_CM_DIV_M6_DPLL_CORE_RESTORE_OFFSET 0x0014
-#define OMAP4430_CM_DIV_M6_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x0014)
-#define OMAP4_CM_DIV_M7_DPLL_CORE_RESTORE_OFFSET 0x0018
-#define OMAP4430_CM_DIV_M7_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x0018)
-#define OMAP4_CM_CLKSEL_DPLL_CORE_RESTORE_OFFSET 0x001c
-#define OMAP4430_CM_CLKSEL_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x001c)
-#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_CORE_RESTORE_OFFSET 0x0020
-#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x0020)
-#define OMAP4_CM_SSC_INSTFREQDIV_DPLL_CORE_RESTORE_OFFSET 0x0024
-#define OMAP4430_CM_SSC_INSTFREQDIV_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x0024)
-#define OMAP4_CM_CLKMODE_DPLL_CORE_RESTORE_OFFSET 0x0028
-#define OMAP4430_CM_CLKMODE_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x0028)
-#define OMAP4_CM_SHADOW_FREQ_CONFIG2_RESTORE_OFFSET 0x002c
-#define OMAP4430_CM_SHADOW_FREQ_CONFIG2_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x002c)
-#define OMAP4_CM_SHADOW_FREQ_CONFIG1_RESTORE_OFFSET 0x0030
-#define OMAP4430_CM_SHADOW_FREQ_CONFIG1_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x0030)
-#define OMAP4_CM_AUTOIDLE_DPLL_CORE_RESTORE_OFFSET 0x0034
-#define OMAP4430_CM_AUTOIDLE_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x0034)
-#define OMAP4_CM_MPU_CLKSTCTRL_RESTORE_OFFSET 0x0038
-#define OMAP4430_CM_MPU_CLKSTCTRL_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x0038)
-#define OMAP4_CM_CM1_PROFILING_CLKCTRL_RESTORE_OFFSET 0x003c
-#define OMAP4430_CM_CM1_PROFILING_CLKCTRL_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x003c)
-#define OMAP4_CM_DYN_DEP_PRESCAL_RESTORE_OFFSET 0x0040
-#define OMAP4430_CM_DYN_DEP_PRESCAL_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x0040)
-
/* Function prototypes */
extern u32 omap4_cm1_read_inst_reg(s16 inst, u16 idx);
extern void omap4_cm1_write_inst_reg(u32 val, s16 inst, u16 idx);
diff --git a/arch/arm/mach-omap2/cm2_44xx.h b/arch/arm/mach-omap2/cm2_44xx.h
index aa4745044065..b9de72da1a8e 100644
--- a/arch/arm/mach-omap2/cm2_44xx.h
+++ b/arch/arm/mach-omap2/cm2_44xx.h
@@ -1,7 +1,7 @@
/*
* OMAP44xx CM2 instance offset macros
*
- * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ * Copyright (C) 2009-2011 Texas Instruments, Inc.
* Copyright (C) 2009-2010 Nokia Corporation
*
* Paul Walmsley (paul@pwsan.com)
@@ -40,9 +40,9 @@
#define OMAP4430_CM2_CAM_INST 0x1000
#define OMAP4430_CM2_DSS_INST 0x1100
#define OMAP4430_CM2_GFX_INST 0x1200
-#define OMAP4430_CM2_L3INIT_INST 0x1300
+#define OMAP4430_CM2_L3INIT_INST 0x1300
#define OMAP4430_CM2_L4PER_INST 0x1400
-#define OMAP4430_CM2_CEFUSE_INST 0x1600
+#define OMAP4430_CM2_CEFUSE_INST 0x1600
#define OMAP4430_CM2_RESTORE_INST 0x1e00
#define OMAP4430_CM2_INSTR_INST 0x1f00
@@ -65,7 +65,6 @@
#define OMAP4430_CM2_L4PER_L4SEC_CDOFFS 0x0180
#define OMAP4430_CM2_CEFUSE_CEFUSE_CDOFFS 0x0000
-
/* CM2 */
/* CM2.OCP_SOCKET_CM2 register offsets */
@@ -121,8 +120,8 @@
#define OMAP4430_CM_DIV_M7_DPLL_PER OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x0064)
#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_PER_OFFSET 0x0068
#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_PER OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x0068)
-#define OMAP4_CM_SSC_INSTFREQDIV_DPLL_PER_OFFSET 0x006c
-#define OMAP4430_CM_SSC_INSTFREQDIV_DPLL_PER OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x006c)
+#define OMAP4_CM_SSC_MODFREQDIV_DPLL_PER_OFFSET 0x006c
+#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_PER OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x006c)
#define OMAP4_CM_CLKMODE_DPLL_USB_OFFSET 0x0080
#define OMAP4430_CM_CLKMODE_DPLL_USB OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x0080)
#define OMAP4_CM_IDLEST_DPLL_USB_OFFSET 0x0084
@@ -135,8 +134,8 @@
#define OMAP4430_CM_DIV_M2_DPLL_USB OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x0090)
#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_USB_OFFSET 0x00a8
#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_USB OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x00a8)
-#define OMAP4_CM_SSC_INSTFREQDIV_DPLL_USB_OFFSET 0x00ac
-#define OMAP4430_CM_SSC_INSTFREQDIV_DPLL_USB OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x00ac)
+#define OMAP4_CM_SSC_MODFREQDIV_DPLL_USB_OFFSET 0x00ac
+#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_USB OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x00ac)
#define OMAP4_CM_CLKDCOLDO_DPLL_USB_OFFSET 0x00b4
#define OMAP4430_CM_CLKDCOLDO_DPLL_USB OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x00b4)
#define OMAP4_CM_CLKMODE_DPLL_UNIPRO_OFFSET 0x00c0
@@ -151,8 +150,8 @@
#define OMAP4430_CM_DIV_M2_DPLL_UNIPRO OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x00d0)
#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_UNIPRO_OFFSET 0x00e8
#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_UNIPRO OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x00e8)
-#define OMAP4_CM_SSC_INSTFREQDIV_DPLL_UNIPRO_OFFSET 0x00ec
-#define OMAP4430_CM_SSC_INSTFREQDIV_DPLL_UNIPRO OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x00ec)
+#define OMAP4_CM_SSC_MODFREQDIV_DPLL_UNIPRO_OFFSET 0x00ec
+#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_UNIPRO OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x00ec)
/* CM2.ALWAYS_ON_CM2 register offsets */
#define OMAP4_CM_ALWON_CLKSTCTRL_OFFSET 0x0000
@@ -227,8 +226,8 @@
#define OMAP4430_CM_D2D_DYNAMICDEP OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0508)
#define OMAP4_CM_D2D_SAD2D_CLKCTRL_OFFSET 0x0520
#define OMAP4430_CM_D2D_SAD2D_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0520)
-#define OMAP4_CM_D2D_INSTEM_ICR_CLKCTRL_OFFSET 0x0528
-#define OMAP4430_CM_D2D_INSTEM_ICR_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0528)
+#define OMAP4_CM_D2D_MODEM_ICR_CLKCTRL_OFFSET 0x0528
+#define OMAP4430_CM_D2D_MODEM_ICR_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0528)
#define OMAP4_CM_D2D_SAD2D_FW_CLKCTRL_OFFSET 0x0530
#define OMAP4430_CM_D2D_SAD2D_FW_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0530)
#define OMAP4_CM_L4CFG_CLKSTCTRL_OFFSET 0x0600
@@ -450,56 +449,6 @@
#define OMAP4_CM_CEFUSE_CEFUSE_CLKCTRL_OFFSET 0x0020
#define OMAP4430_CM_CEFUSE_CEFUSE_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CEFUSE_INST, 0x0020)
-/* CM2.RESTORE_CM2 register offsets */
-#define OMAP4_CM_L3_1_CLKSTCTRL_RESTORE_OFFSET 0x0000
-#define OMAP4430_CM_L3_1_CLKSTCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0000)
-#define OMAP4_CM_L3_2_CLKSTCTRL_RESTORE_OFFSET 0x0004
-#define OMAP4430_CM_L3_2_CLKSTCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0004)
-#define OMAP4_CM_L4CFG_CLKSTCTRL_RESTORE_OFFSET 0x0008
-#define OMAP4430_CM_L4CFG_CLKSTCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0008)
-#define OMAP4_CM_MEMIF_CLKSTCTRL_RESTORE_OFFSET 0x000c
-#define OMAP4430_CM_MEMIF_CLKSTCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x000c)
-#define OMAP4_CM_L4PER_CLKSTCTRL_RESTORE_OFFSET 0x0010
-#define OMAP4430_CM_L4PER_CLKSTCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0010)
-#define OMAP4_CM_L3INIT_CLKSTCTRL_RESTORE_OFFSET 0x0014
-#define OMAP4430_CM_L3INIT_CLKSTCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0014)
-#define OMAP4_CM_L3INSTR_L3_3_CLKCTRL_RESTORE_OFFSET 0x0018
-#define OMAP4430_CM_L3INSTR_L3_3_CLKCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0018)
-#define OMAP4_CM_L3INSTR_L3_INSTR_CLKCTRL_RESTORE_OFFSET 0x001c
-#define OMAP4430_CM_L3INSTR_L3_INSTR_CLKCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x001c)
-#define OMAP4_CM_L3INSTR_OCP_WP1_CLKCTRL_RESTORE_OFFSET 0x0020
-#define OMAP4430_CM_L3INSTR_OCP_WP1_CLKCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0020)
-#define OMAP4_CM_CM2_PROFILING_CLKCTRL_RESTORE_OFFSET 0x0024
-#define OMAP4430_CM_CM2_PROFILING_CLKCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0024)
-#define OMAP4_CM_D2D_STATICDEP_RESTORE_OFFSET 0x0028
-#define OMAP4430_CM_D2D_STATICDEP_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0028)
-#define OMAP4_CM_L3_1_DYNAMICDEP_RESTORE_OFFSET 0x002c
-#define OMAP4430_CM_L3_1_DYNAMICDEP_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x002c)
-#define OMAP4_CM_L3_2_DYNAMICDEP_RESTORE_OFFSET 0x0030
-#define OMAP4430_CM_L3_2_DYNAMICDEP_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0030)
-#define OMAP4_CM_D2D_DYNAMICDEP_RESTORE_OFFSET 0x0034
-#define OMAP4430_CM_D2D_DYNAMICDEP_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0034)
-#define OMAP4_CM_L4CFG_DYNAMICDEP_RESTORE_OFFSET 0x0038
-#define OMAP4430_CM_L4CFG_DYNAMICDEP_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0038)
-#define OMAP4_CM_L4PER_DYNAMICDEP_RESTORE_OFFSET 0x003c
-#define OMAP4430_CM_L4PER_DYNAMICDEP_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x003c)
-#define OMAP4_CM_L4PER_GPIO2_CLKCTRL_RESTORE_OFFSET 0x0040
-#define OMAP4430_CM_L4PER_GPIO2_CLKCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0040)
-#define OMAP4_CM_L4PER_GPIO3_CLKCTRL_RESTORE_OFFSET 0x0044
-#define OMAP4430_CM_L4PER_GPIO3_CLKCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0044)
-#define OMAP4_CM_L4PER_GPIO4_CLKCTRL_RESTORE_OFFSET 0x0048
-#define OMAP4430_CM_L4PER_GPIO4_CLKCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0048)
-#define OMAP4_CM_L4PER_GPIO5_CLKCTRL_RESTORE_OFFSET 0x004c
-#define OMAP4430_CM_L4PER_GPIO5_CLKCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x004c)
-#define OMAP4_CM_L4PER_GPIO6_CLKCTRL_RESTORE_OFFSET 0x0050
-#define OMAP4430_CM_L4PER_GPIO6_CLKCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0050)
-#define OMAP4_CM_L3INIT_USB_HOST_CLKCTRL_RESTORE_OFFSET 0x0054
-#define OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0054)
-#define OMAP4_CM_L3INIT_USB_TLL_CLKCTRL_RESTORE_OFFSET 0x0058
-#define OMAP4430_CM_L3INIT_USB_TLL_CLKCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0058)
-#define OMAP4_CM_SDMA_STATICDEP_RESTORE_OFFSET 0x005c
-#define OMAP4430_CM_SDMA_STATICDEP_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x005c)
-
/* Function prototypes */
extern u32 omap4_cm2_read_inst_reg(s16 inst, u16 idx);
extern void omap4_cm2_write_inst_reg(u32 val, s16 inst, u16 idx);
diff --git a/arch/arm/mach-omap2/common-board-devices.c b/arch/arm/mach-omap2/common-board-devices.c
index 94ccf464677b..bcb0c5817167 100644
--- a/arch/arm/mach-omap2/common-board-devices.c
+++ b/arch/arm/mach-omap2/common-board-devices.c
@@ -20,36 +20,15 @@
*
*/
-#include <linux/i2c.h>
-#include <linux/i2c/twl.h>
-
#include <linux/gpio.h>
#include <linux/spi/spi.h>
#include <linux/spi/ads7846.h>
-#include <plat/i2c.h>
#include <plat/mcspi.h>
#include <plat/nand.h>
#include "common-board-devices.h"
-static struct i2c_board_info __initdata pmic_i2c_board_info = {
- .addr = 0x48,
- .flags = I2C_CLIENT_WAKE,
-};
-
-void __init omap_pmic_init(int bus, u32 clkrate,
- const char *pmic_type, int pmic_irq,
- struct twl4030_platform_data *pmic_data)
-{
- strncpy(pmic_i2c_board_info.type, pmic_type,
- sizeof(pmic_i2c_board_info.type));
- pmic_i2c_board_info.irq = pmic_irq;
- pmic_i2c_board_info.platform_data = pmic_data;
-
- omap_register_i2c_bus(bus, clkrate, &pmic_i2c_board_info, 1);
-}
-
#if defined(CONFIG_TOUCHSCREEN_ADS7846) || \
defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE)
static struct omap2_mcspi_device_config ads7846_mcspi_config = {
@@ -115,9 +94,7 @@ void __init omap_ads7846_init(int bus_num, int gpio_pendown, int gpio_debounce,
#endif
#if defined(CONFIG_MTD_NAND_OMAP2) || defined(CONFIG_MTD_NAND_OMAP2_MODULE)
-static struct omap_nand_platform_data nand_data = {
- .dma_channel = -1, /* disable DMA in OMAP NAND driver */
-};
+static struct omap_nand_platform_data nand_data;
void __init omap_nand_flash_init(int options, struct mtd_partition *parts,
int nr_parts)
@@ -148,7 +125,7 @@ void __init omap_nand_flash_init(int options, struct mtd_partition *parts,
nand_data.cs = nandcs;
nand_data.parts = parts;
nand_data.nr_parts = nr_parts;
- nand_data.options = options;
+ nand_data.devsize = options;
printk(KERN_INFO "Registering NAND on CS%d\n", nandcs);
if (gpmc_nand_init(&nand_data) < 0)
diff --git a/arch/arm/mach-omap2/common-board-devices.h b/arch/arm/mach-omap2/common-board-devices.h
index 679719051df5..a0b4a42836ab 100644
--- a/arch/arm/mach-omap2/common-board-devices.h
+++ b/arch/arm/mach-omap2/common-board-devices.h
@@ -1,33 +1,11 @@
#ifndef __OMAP_COMMON_BOARD_DEVICES__
#define __OMAP_COMMON_BOARD_DEVICES__
+#include "twl-common.h"
+
#define NAND_BLOCK_SIZE SZ_128K
-struct twl4030_platform_data;
struct mtd_partition;
-
-void omap_pmic_init(int bus, u32 clkrate, const char *pmic_type, int pmic_irq,
- struct twl4030_platform_data *pmic_data);
-
-static inline void omap2_pmic_init(const char *pmic_type,
- struct twl4030_platform_data *pmic_data)
-{
- omap_pmic_init(2, 2600, pmic_type, INT_24XX_SYS_NIRQ, pmic_data);
-}
-
-static inline void omap3_pmic_init(const char *pmic_type,
- struct twl4030_platform_data *pmic_data)
-{
- omap_pmic_init(1, 2600, pmic_type, INT_34XX_SYS_NIRQ, pmic_data);
-}
-
-static inline void omap4_pmic_init(const char *pmic_type,
- struct twl4030_platform_data *pmic_data)
-{
- /* Phoenix Audio IC needs I2C1 to start with 400 KHz or less */
- omap_pmic_init(1, 400, pmic_type, OMAP44XX_IRQ_SYS_1N, pmic_data);
-}
-
struct ads7846_platform_data;
void omap_ads7846_init(int bus_num, int gpio_pendown, int gpio_debounce,
diff --git a/arch/arm/mach-omap2/control.c b/arch/arm/mach-omap2/control.c
index da53ba3917ca..aab884fecc55 100644
--- a/arch/arm/mach-omap2/control.c
+++ b/arch/arm/mach-omap2/control.c
@@ -286,14 +286,15 @@ void omap3_save_scratchpad_contents(void)
scratchpad_contents.boot_config_ptr = 0x0;
if (cpu_is_omap3630())
scratchpad_contents.public_restore_ptr =
- virt_to_phys(get_omap3630_restore_pointer());
+ virt_to_phys(omap3_restore_3630);
else if (omap_rev() != OMAP3430_REV_ES3_0 &&
omap_rev() != OMAP3430_REV_ES3_1)
scratchpad_contents.public_restore_ptr =
- virt_to_phys(get_restore_pointer());
+ virt_to_phys(omap3_restore);
else
scratchpad_contents.public_restore_ptr =
- virt_to_phys(get_es3_restore_pointer());
+ virt_to_phys(omap3_restore_es3);
+
if (omap_type() == OMAP2_DEVICE_TYPE_GP)
scratchpad_contents.secure_ram_restore_ptr = 0x0;
else
diff --git a/arch/arm/mach-omap2/control.h b/arch/arm/mach-omap2/control.h
index a016c8b59e00..d4ef75d5a382 100644
--- a/arch/arm/mach-omap2/control.h
+++ b/arch/arm/mach-omap2/control.h
@@ -386,9 +386,9 @@ extern void omap4_ctrl_pad_writel(u32 val, u16 offset);
extern void omap3_save_scratchpad_contents(void);
extern void omap3_clear_scratchpad_contents(void);
-extern u32 *get_restore_pointer(void);
-extern u32 *get_es3_restore_pointer(void);
-extern u32 *get_omap3630_restore_pointer(void);
+extern void omap3_restore(void);
+extern void omap3_restore_es3(void);
+extern void omap3_restore_3630(void);
extern u32 omap3_arm_context[128];
extern void omap3_control_save_context(void);
extern void omap3_control_restore_context(void);
diff --git a/arch/arm/mach-omap2/gpmc-nand.c b/arch/arm/mach-omap2/gpmc-nand.c
index c1791d08ae56..8ad210bda9a9 100644
--- a/arch/arm/mach-omap2/gpmc-nand.c
+++ b/arch/arm/mach-omap2/gpmc-nand.c
@@ -20,8 +20,6 @@
#include <plat/board.h>
#include <plat/gpmc.h>
-static struct omap_nand_platform_data *gpmc_nand_data;
-
static struct resource gpmc_nand_resource = {
.flags = IORESOURCE_MEM,
};
@@ -33,7 +31,7 @@ static struct platform_device gpmc_nand_device = {
.resource = &gpmc_nand_resource,
};
-static int omap2_nand_gpmc_retime(void)
+static int omap2_nand_gpmc_retime(struct omap_nand_platform_data *gpmc_nand_data)
{
struct gpmc_timings t;
int err;
@@ -83,13 +81,11 @@ static int omap2_nand_gpmc_retime(void)
return 0;
}
-int __init gpmc_nand_init(struct omap_nand_platform_data *_nand_data)
+int __init gpmc_nand_init(struct omap_nand_platform_data *gpmc_nand_data)
{
int err = 0;
struct device *dev = &gpmc_nand_device.dev;
- gpmc_nand_data = _nand_data;
- gpmc_nand_data->nand_setup = omap2_nand_gpmc_retime;
gpmc_nand_device.dev.platform_data = gpmc_nand_data;
err = gpmc_cs_request(gpmc_nand_data->cs, NAND_IO_SIZE,
@@ -100,7 +96,7 @@ int __init gpmc_nand_init(struct omap_nand_platform_data *_nand_data)
}
/* Set timings in GPMC */
- err = omap2_nand_gpmc_retime();
+ err = omap2_nand_gpmc_retime(gpmc_nand_data);
if (err < 0) {
dev_err(dev, "Unable to set gpmc timings: %d\n", err);
return err;
diff --git a/arch/arm/mach-omap2/include/mach/entry-macro.S b/arch/arm/mach-omap2/include/mach/entry-macro.S
index a48690b90990..ceb8b7e593d7 100644
--- a/arch/arm/mach-omap2/include/mach/entry-macro.S
+++ b/arch/arm/mach-omap2/include/mach/entry-macro.S
@@ -165,6 +165,3 @@
#endif
#endif /* MULTI_OMAP2 */
-
- .macro irq_prio_table
- .endm
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 441e79d043a7..2ce1ce6fb4db 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -333,23 +333,9 @@ static int _set_hwmod_postsetup_state(struct omap_hwmod *oh, void *data)
return omap_hwmod_set_postsetup_state(oh, *(u8 *)data);
}
+/* See irq.c, omap4-common.c and entry-macro.S */
void __iomem *omap_irq_base;
-/*
- * Initialize asm_irq_base for entry-macro.S
- */
-static inline void omap_irq_base_init(void)
-{
- if (cpu_is_omap24xx())
- omap_irq_base = OMAP2_L4_IO_ADDRESS(OMAP24XX_IC_BASE);
- else if (cpu_is_omap34xx())
- omap_irq_base = OMAP2_L4_IO_ADDRESS(OMAP34XX_IC_BASE);
- else if (cpu_is_omap44xx())
- omap_irq_base = OMAP2_L4_IO_ADDRESS(OMAP44XX_GIC_CPU_BASE);
- else
- pr_err("Could not initialize omap_irq_base\n");
-}
-
void __init omap2_init_common_infrastructure(void)
{
u8 postsetup_state;
@@ -422,7 +408,6 @@ void __init omap2_init_common_devices(struct omap_sdrc_params *sdrc_cs0,
_omap2_init_reprogram_sdrc();
}
- omap_irq_base_init();
}
/*
diff --git a/arch/arm/mach-omap2/irq.c b/arch/arm/mach-omap2/irq.c
index 3af2b7a1045e..3a12f7586a4c 100644
--- a/arch/arm/mach-omap2/irq.c
+++ b/arch/arm/mach-omap2/irq.c
@@ -141,25 +141,20 @@ omap_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num)
IRQ_NOREQUEST | IRQ_NOPROBE, 0);
}
-void __init omap_init_irq(void)
+static void __init omap_init_irq(u32 base, int nr_irqs)
{
unsigned long nr_of_irqs = 0;
unsigned int nr_banks = 0;
int i, j;
+ omap_irq_base = ioremap(base, SZ_4K);
+ if (WARN_ON(!omap_irq_base))
+ return;
+
for (i = 0; i < ARRAY_SIZE(irq_banks); i++) {
- unsigned long base = 0;
struct omap_irq_bank *bank = irq_banks + i;
- if (cpu_is_omap24xx())
- base = OMAP24XX_IC_BASE;
- else if (cpu_is_omap34xx())
- base = OMAP34XX_IC_BASE;
-
- BUG_ON(!base);
-
- if (cpu_is_ti816x())
- bank->nr_irqs = 128;
+ bank->nr_irqs = nr_irqs;
/* Static mapping, never released */
bank->base_reg = ioremap(base, SZ_4K);
@@ -181,6 +176,21 @@ void __init omap_init_irq(void)
nr_of_irqs, nr_banks, nr_banks > 1 ? "s" : "");
}
+void __init omap2_init_irq(void)
+{
+ omap_init_irq(OMAP24XX_IC_BASE, 96);
+}
+
+void __init omap3_init_irq(void)
+{
+ omap_init_irq(OMAP34XX_IC_BASE, 96);
+}
+
+void __init ti816x_init_irq(void)
+{
+ omap_init_irq(OMAP34XX_IC_BASE, 128);
+}
+
#ifdef CONFIG_ARCH_OMAP3
static struct omap3_intc_regs intc_context[ARRAY_SIZE(irq_banks)];
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index ecfe93c4b585..ce65e9329c7b 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -125,14 +125,6 @@ void __init smp_init_cpus(void)
void __init platform_smp_prepare_cpus(unsigned int max_cpus)
{
- int i;
-
- /*
- * Initialise the present map, which describes the set of CPUs
- * actually populated at the present time.
- */
- for (i = 0; i < max_cpus; i++)
- set_cpu_present(i, true);
/*
* Initialise the SCU and wake up the secondary core using
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index 9ef8c29dd817..35ac3e5f6e94 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -19,6 +19,8 @@
#include <asm/hardware/gic.h>
#include <asm/hardware/cache-l2x0.h>
+#include <plat/irqs.h>
+
#include <mach/hardware.h>
#include <mach/omap4-common.h>
@@ -31,17 +33,15 @@ void __iomem *gic_dist_base_addr;
void __init gic_init_irq(void)
{
- void __iomem *gic_cpu_base;
-
/* Static mapping, never released */
gic_dist_base_addr = ioremap(OMAP44XX_GIC_DIST_BASE, SZ_4K);
BUG_ON(!gic_dist_base_addr);
/* Static mapping, never released */
- gic_cpu_base = ioremap(OMAP44XX_GIC_CPU_BASE, SZ_512);
- BUG_ON(!gic_cpu_base);
+ omap_irq_base = ioremap(OMAP44XX_GIC_CPU_BASE, SZ_512);
+ BUG_ON(!omap_irq_base);
- gic_init(0, 29, gic_dist_base_addr, gic_cpu_base);
+ gic_init(0, 29, gic_dist_base_addr, omap_irq_base);
}
#ifdef CONFIG_CACHE_L2X0
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 293fa6cd50e1..7d242c9e2a2c 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -2,6 +2,7 @@
* omap_hwmod implementation for OMAP2/3/4
*
* Copyright (C) 2009-2011 Nokia Corporation
+ * Copyright (C) 2011 Texas Instruments, Inc.
*
* Paul Walmsley, Benoît Cousson, Kevin Hilman
*
@@ -387,11 +388,10 @@ static int _set_module_autoidle(struct omap_hwmod *oh, u8 autoidle,
*/
static int _enable_wakeup(struct omap_hwmod *oh, u32 *v)
{
- u32 wakeup_mask;
-
if (!oh->class->sysc ||
!((oh->class->sysc->sysc_flags & SYSC_HAS_ENAWAKEUP) ||
- (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP)))
+ (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP) ||
+ (oh->class->sysc->idlemodes & MSTANDBY_SMART_WKUP)))
return -EINVAL;
if (!oh->class->sysc->sysc_fields) {
@@ -399,12 +399,13 @@ static int _enable_wakeup(struct omap_hwmod *oh, u32 *v)
return -EINVAL;
}
- wakeup_mask = (0x1 << oh->class->sysc->sysc_fields->enwkup_shift);
-
- *v |= wakeup_mask;
+ if (oh->class->sysc->sysc_flags & SYSC_HAS_ENAWAKEUP)
+ *v |= 0x1 << oh->class->sysc->sysc_fields->enwkup_shift;
if (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP)
_set_slave_idlemode(oh, HWMOD_IDLEMODE_SMART_WKUP, v);
+ if (oh->class->sysc->idlemodes & MSTANDBY_SMART_WKUP)
+ _set_master_standbymode(oh, HWMOD_IDLEMODE_SMART_WKUP, v);
/* XXX test pwrdm_get_wken for this hwmod's subsystem */
@@ -422,11 +423,10 @@ static int _enable_wakeup(struct omap_hwmod *oh, u32 *v)
*/
static int _disable_wakeup(struct omap_hwmod *oh, u32 *v)
{
- u32 wakeup_mask;
-
if (!oh->class->sysc ||
!((oh->class->sysc->sysc_flags & SYSC_HAS_ENAWAKEUP) ||
- (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP)))
+ (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP) ||
+ (oh->class->sysc->idlemodes & MSTANDBY_SMART_WKUP)))
return -EINVAL;
if (!oh->class->sysc->sysc_fields) {
@@ -434,12 +434,13 @@ static int _disable_wakeup(struct omap_hwmod *oh, u32 *v)
return -EINVAL;
}
- wakeup_mask = (0x1 << oh->class->sysc->sysc_fields->enwkup_shift);
-
- *v &= ~wakeup_mask;
+ if (oh->class->sysc->sysc_flags & SYSC_HAS_ENAWAKEUP)
+ *v &= ~(0x1 << oh->class->sysc->sysc_fields->enwkup_shift);
if (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP)
_set_slave_idlemode(oh, HWMOD_IDLEMODE_SMART, v);
+ if (oh->class->sysc->idlemodes & MSTANDBY_SMART_WKUP)
+ _set_master_standbymode(oh, HWMOD_IDLEMODE_SMART_WKUP, v);
/* XXX test pwrdm_get_wken for this hwmod's subsystem */
@@ -678,6 +679,75 @@ static void _disable_optional_clocks(struct omap_hwmod *oh)
}
/**
+ * _count_mpu_irqs - count the number of MPU IRQ lines associated with @oh
+ * @oh: struct omap_hwmod *oh
+ *
+ * Count and return the number of MPU IRQs associated with the hwmod
+ * @oh. Used to allocate struct resource data. Returns 0 if @oh is
+ * NULL.
+ */
+static int _count_mpu_irqs(struct omap_hwmod *oh)
+{
+ struct omap_hwmod_irq_info *ohii;
+ int i = 0;
+
+ if (!oh || !oh->mpu_irqs)
+ return 0;
+
+ do {
+ ohii = &oh->mpu_irqs[i++];
+ } while (ohii->irq != -1);
+
+ return i;
+}
+
+/**
+ * _count_sdma_reqs - count the number of SDMA request lines associated with @oh
+ * @oh: struct omap_hwmod *oh
+ *
+ * Count and return the number of SDMA request lines associated with
+ * the hwmod @oh. Used to allocate struct resource data. Returns 0
+ * if @oh is NULL.
+ */
+static int _count_sdma_reqs(struct omap_hwmod *oh)
+{
+ struct omap_hwmod_dma_info *ohdi;
+ int i = 0;
+
+ if (!oh || !oh->sdma_reqs)
+ return 0;
+
+ do {
+ ohdi = &oh->sdma_reqs[i++];
+ } while (ohdi->dma_req != -1);
+
+ return i;
+}
+
+/**
+ * _count_ocp_if_addr_spaces - count the number of address space entries for @oh
+ * @oh: struct omap_hwmod *oh
+ *
+ * Count and return the number of address space ranges associated with
+ * the hwmod @oh. Used to allocate struct resource data. Returns 0
+ * if @oh is NULL.
+ */
+static int _count_ocp_if_addr_spaces(struct omap_hwmod_ocp_if *os)
+{
+ struct omap_hwmod_addr_space *mem;
+ int i = 0;
+
+ if (!os || !os->addr)
+ return 0;
+
+ do {
+ mem = &os->addr[i++];
+ } while (mem->pa_start != mem->pa_end);
+
+ return i;
+}
+
+/**
* _find_mpu_port_index - find hwmod OCP slave port ID intended for MPU use
* @oh: struct omap_hwmod *
*
@@ -722,8 +792,7 @@ static void __iomem * __init _find_mpu_rt_base(struct omap_hwmod *oh, u8 index)
{
struct omap_hwmod_ocp_if *os;
struct omap_hwmod_addr_space *mem;
- int i;
- int found = 0;
+ int i = 0, found = 0;
void __iomem *va_start;
if (!oh || oh->slaves_cnt == 0)
@@ -731,12 +800,14 @@ static void __iomem * __init _find_mpu_rt_base(struct omap_hwmod *oh, u8 index)
os = oh->slaves[index];
- for (i = 0, mem = os->addr; i < os->addr_cnt; i++, mem++) {
- if (mem->flags & ADDR_TYPE_RT) {
+ if (!os->addr)
+ return NULL;
+
+ do {
+ mem = &os->addr[i++];
+ if (mem->flags & ADDR_TYPE_RT)
found = 1;
- break;
- }
- }
+ } while (!found && mem->pa_start != mem->pa_end);
if (found) {
va_start = ioremap(mem->pa_start, mem->pa_end - mem->pa_start);
@@ -781,8 +852,16 @@ static void _enable_sysc(struct omap_hwmod *oh)
}
if (sf & SYSC_HAS_MIDLEMODE) {
- idlemode = (oh->flags & HWMOD_SWSUP_MSTANDBY) ?
- HWMOD_IDLEMODE_NO : HWMOD_IDLEMODE_SMART;
+ if (oh->flags & HWMOD_SWSUP_MSTANDBY) {
+ idlemode = HWMOD_IDLEMODE_NO;
+ } else {
+ if (sf & SYSC_HAS_ENAWAKEUP)
+ _enable_wakeup(oh, &v);
+ if (oh->class->sysc->idlemodes & MSTANDBY_SMART_WKUP)
+ idlemode = HWMOD_IDLEMODE_SMART_WKUP;
+ else
+ idlemode = HWMOD_IDLEMODE_SMART;
+ }
_set_master_standbymode(oh, idlemode, &v);
}
@@ -840,8 +919,16 @@ static void _idle_sysc(struct omap_hwmod *oh)
}
if (sf & SYSC_HAS_MIDLEMODE) {
- idlemode = (oh->flags & HWMOD_SWSUP_MSTANDBY) ?
- HWMOD_IDLEMODE_FORCE : HWMOD_IDLEMODE_SMART;
+ if (oh->flags & HWMOD_SWSUP_MSTANDBY) {
+ idlemode = HWMOD_IDLEMODE_FORCE;
+ } else {
+ if (sf & SYSC_HAS_ENAWAKEUP)
+ _enable_wakeup(oh, &v);
+ if (oh->class->sysc->idlemodes & MSTANDBY_SMART_WKUP)
+ idlemode = HWMOD_IDLEMODE_SMART_WKUP;
+ else
+ idlemode = HWMOD_IDLEMODE_SMART;
+ }
_set_master_standbymode(oh, idlemode, &v);
}
@@ -928,6 +1015,8 @@ static int _init_clocks(struct omap_hwmod *oh, void *data)
if (!ret)
oh->_state = _HWMOD_STATE_CLKS_INITED;
+ else
+ pr_warning("omap_hwmod: %s: cannot _init_clocks\n", oh->name);
return ret;
}
@@ -1224,6 +1313,8 @@ static int _enable(struct omap_hwmod *oh)
{
int r;
+ pr_debug("omap_hwmod: %s: enabling\n", oh->name);
+
if (oh->_state != _HWMOD_STATE_INITIALIZED &&
oh->_state != _HWMOD_STATE_IDLE &&
oh->_state != _HWMOD_STATE_DISABLED) {
@@ -1232,17 +1323,6 @@ static int _enable(struct omap_hwmod *oh)
return -EINVAL;
}
- pr_debug("omap_hwmod: %s: enabling\n", oh->name);
-
- /*
- * If an IP contains only one HW reset line, then de-assert it in order
- * to allow to enable the clocks. Otherwise the PRCM will return
- * Intransition status, and the init will failed.
- */
- if ((oh->_state == _HWMOD_STATE_INITIALIZED ||
- oh->_state == _HWMOD_STATE_DISABLED) && oh->rst_lines_cnt == 1)
- _deassert_hardreset(oh, oh->rst_lines[0].name);
-
/* Mux pins for device runtime if populated */
if (oh->mux && (!oh->mux->enabled ||
((oh->_state == _HWMOD_STATE_IDLE) &&
@@ -1252,20 +1332,31 @@ static int _enable(struct omap_hwmod *oh)
_add_initiator_dep(oh, mpu_oh);
_enable_clocks(oh);
- r = _wait_target_ready(oh);
- if (!r) {
- oh->_state = _HWMOD_STATE_ENABLED;
+ /*
+ * If an IP contains only one HW reset line, then de-assert it in order
+ * to allow the module state transition. Otherwise the PRCM will return
+ * Intransition status, and the init will failed.
+ */
+ if ((oh->_state == _HWMOD_STATE_INITIALIZED ||
+ oh->_state == _HWMOD_STATE_DISABLED) && oh->rst_lines_cnt == 1)
+ _deassert_hardreset(oh, oh->rst_lines[0].name);
- /* Access the sysconfig only if the target is ready */
- if (oh->class->sysc) {
- if (!(oh->_int_flags & _HWMOD_SYSCONFIG_LOADED))
- _update_sysc_cache(oh);
- _enable_sysc(oh);
- }
- } else {
- _disable_clocks(oh);
+ r = _wait_target_ready(oh);
+ if (r) {
pr_debug("omap_hwmod: %s: _wait_target_ready: %d\n",
oh->name, r);
+ _disable_clocks(oh);
+
+ return r;
+ }
+
+ oh->_state = _HWMOD_STATE_ENABLED;
+
+ /* Access the sysconfig only if the target is ready */
+ if (oh->class->sysc) {
+ if (!(oh->_int_flags & _HWMOD_SYSCONFIG_LOADED))
+ _update_sysc_cache(oh);
+ _enable_sysc(oh);
}
return r;
@@ -1281,14 +1372,14 @@ static int _enable(struct omap_hwmod *oh)
*/
static int _idle(struct omap_hwmod *oh)
{
+ pr_debug("omap_hwmod: %s: idling\n", oh->name);
+
if (oh->_state != _HWMOD_STATE_ENABLED) {
WARN(1, "omap_hwmod: %s: idle state can only be entered from "
"enabled state\n", oh->name);
return -EINVAL;
}
- pr_debug("omap_hwmod: %s: idling\n", oh->name);
-
if (oh->class->sysc)
_idle_sysc(oh);
_del_initiator_dep(oh, mpu_oh);
@@ -1374,15 +1465,11 @@ static int _shutdown(struct omap_hwmod *oh)
}
}
- if (oh->class->sysc)
+ if (oh->class->sysc) {
+ if (oh->_state == _HWMOD_STATE_IDLE)
+ _enable(oh);
_shutdown_sysc(oh);
-
- /*
- * If an IP contains only one HW reset line, then assert it
- * before disabling the clocks and shutting down the IP.
- */
- if (oh->rst_lines_cnt == 1)
- _assert_hardreset(oh, oh->rst_lines[0].name);
+ }
/* clocks and deps are already disabled in idle */
if (oh->_state == _HWMOD_STATE_ENABLED) {
@@ -1392,6 +1479,13 @@ static int _shutdown(struct omap_hwmod *oh)
}
/* XXX Should this code also force-disable the optional clocks? */
+ /*
+ * If an IP contains only one HW reset line, then assert it
+ * after disabling the clocks and before shutting down the IP.
+ */
+ if (oh->rst_lines_cnt == 1)
+ _assert_hardreset(oh, oh->rst_lines[0].name);
+
/* Mux pins to safe mode or use populated off mode values */
if (oh->mux)
omap_hwmod_mux(oh->mux, _HWMOD_STATE_DISABLED);
@@ -1685,9 +1779,6 @@ static int __init _populate_mpu_rt_base(struct omap_hwmod *oh, void *data)
return 0;
oh->_mpu_rt_va = _find_mpu_rt_base(oh, oh->_mpu_port_index);
- if (!oh->_mpu_rt_va)
- pr_warning("omap_hwmod: %s found no _mpu_rt_va for %s\n",
- __func__, oh->name);
return 0;
}
@@ -1939,10 +2030,10 @@ int omap_hwmod_count_resources(struct omap_hwmod *oh)
{
int ret, i;
- ret = oh->mpu_irqs_cnt + oh->sdma_reqs_cnt;
+ ret = _count_mpu_irqs(oh) + _count_sdma_reqs(oh);
for (i = 0; i < oh->slaves_cnt; i++)
- ret += oh->slaves[i]->addr_cnt;
+ ret += _count_ocp_if_addr_spaces(oh->slaves[i]);
return ret;
}
@@ -1959,12 +2050,13 @@ int omap_hwmod_count_resources(struct omap_hwmod *oh)
*/
int omap_hwmod_fill_resources(struct omap_hwmod *oh, struct resource *res)
{
- int i, j;
+ int i, j, mpu_irqs_cnt, sdma_reqs_cnt;
int r = 0;
/* For each IRQ, DMA, memory area, fill in array.*/
- for (i = 0; i < oh->mpu_irqs_cnt; i++) {
+ mpu_irqs_cnt = _count_mpu_irqs(oh);
+ for (i = 0; i < mpu_irqs_cnt; i++) {
(res + r)->name = (oh->mpu_irqs + i)->name;
(res + r)->start = (oh->mpu_irqs + i)->irq;
(res + r)->end = (oh->mpu_irqs + i)->irq;
@@ -1972,7 +2064,8 @@ int omap_hwmod_fill_resources(struct omap_hwmod *oh, struct resource *res)
r++;
}
- for (i = 0; i < oh->sdma_reqs_cnt; i++) {
+ sdma_reqs_cnt = _count_sdma_reqs(oh);
+ for (i = 0; i < sdma_reqs_cnt; i++) {
(res + r)->name = (oh->sdma_reqs + i)->name;
(res + r)->start = (oh->sdma_reqs + i)->dma_req;
(res + r)->end = (oh->sdma_reqs + i)->dma_req;
@@ -1982,10 +2075,12 @@ int omap_hwmod_fill_resources(struct omap_hwmod *oh, struct resource *res)
for (i = 0; i < oh->slaves_cnt; i++) {
struct omap_hwmod_ocp_if *os;
+ int addr_cnt;
os = oh->slaves[i];
+ addr_cnt = _count_ocp_if_addr_spaces(os);
- for (j = 0; j < os->addr_cnt; j++) {
+ for (j = 0; j < addr_cnt; j++) {
(res + r)->name = (os->addr + j)->name;
(res + r)->start = (os->addr + j)->pa_start;
(res + r)->end = (os->addr + j)->pa_end;
diff --git a/arch/arm/mach-omap2/omap_hwmod_2420_data.c b/arch/arm/mach-omap2/omap_hwmod_2420_data.c
index c4d0ae87d62a..f3901abf2c28 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2420_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2420_data.c
@@ -1,7 +1,7 @@
/*
* omap_hwmod_2420_data.c - hardware modules present on the OMAP2420 chips
*
- * Copyright (C) 2009-2010 Nokia Corporation
+ * Copyright (C) 2009-2011 Nokia Corporation
* Paul Walmsley
*
* This program is free software; you can redistribute it and/or modify
@@ -114,38 +114,20 @@ static struct omap_hwmod omap2420_mcbsp1_hwmod;
static struct omap_hwmod omap2420_mcbsp2_hwmod;
/* l4 core -> mcspi1 interface */
-static struct omap_hwmod_addr_space omap2420_mcspi1_addr_space[] = {
- {
- .pa_start = 0x48098000,
- .pa_end = 0x480980ff,
- .flags = ADDR_TYPE_RT,
- },
-};
-
static struct omap_hwmod_ocp_if omap2420_l4_core__mcspi1 = {
.master = &omap2420_l4_core_hwmod,
.slave = &omap2420_mcspi1_hwmod,
.clk = "mcspi1_ick",
- .addr = omap2420_mcspi1_addr_space,
- .addr_cnt = ARRAY_SIZE(omap2420_mcspi1_addr_space),
+ .addr = omap2_mcspi1_addr_space,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* l4 core -> mcspi2 interface */
-static struct omap_hwmod_addr_space omap2420_mcspi2_addr_space[] = {
- {
- .pa_start = 0x4809a000,
- .pa_end = 0x4809a0ff,
- .flags = ADDR_TYPE_RT,
- },
-};
-
static struct omap_hwmod_ocp_if omap2420_l4_core__mcspi2 = {
.master = &omap2420_l4_core_hwmod,
.slave = &omap2420_mcspi2_hwmod,
.clk = "mcspi2_ick",
- .addr = omap2420_mcspi2_addr_space,
- .addr_cnt = ARRAY_SIZE(omap2420_mcspi2_addr_space),
+ .addr = omap2_mcspi2_addr_space,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -157,95 +139,47 @@ static struct omap_hwmod_ocp_if omap2420_l4_core__l4_wkup = {
};
/* L4 CORE -> UART1 interface */
-static struct omap_hwmod_addr_space omap2420_uart1_addr_space[] = {
- {
- .pa_start = OMAP2_UART1_BASE,
- .pa_end = OMAP2_UART1_BASE + SZ_8K - 1,
- .flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
- },
-};
-
static struct omap_hwmod_ocp_if omap2_l4_core__uart1 = {
.master = &omap2420_l4_core_hwmod,
.slave = &omap2420_uart1_hwmod,
.clk = "uart1_ick",
- .addr = omap2420_uart1_addr_space,
- .addr_cnt = ARRAY_SIZE(omap2420_uart1_addr_space),
+ .addr = omap2xxx_uart1_addr_space,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* L4 CORE -> UART2 interface */
-static struct omap_hwmod_addr_space omap2420_uart2_addr_space[] = {
- {
- .pa_start = OMAP2_UART2_BASE,
- .pa_end = OMAP2_UART2_BASE + SZ_1K - 1,
- .flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
- },
-};
-
static struct omap_hwmod_ocp_if omap2_l4_core__uart2 = {
.master = &omap2420_l4_core_hwmod,
.slave = &omap2420_uart2_hwmod,
.clk = "uart2_ick",
- .addr = omap2420_uart2_addr_space,
- .addr_cnt = ARRAY_SIZE(omap2420_uart2_addr_space),
+ .addr = omap2xxx_uart2_addr_space,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* L4 PER -> UART3 interface */
-static struct omap_hwmod_addr_space omap2420_uart3_addr_space[] = {
- {
- .pa_start = OMAP2_UART3_BASE,
- .pa_end = OMAP2_UART3_BASE + SZ_1K - 1,
- .flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
- },
-};
-
static struct omap_hwmod_ocp_if omap2_l4_core__uart3 = {
.master = &omap2420_l4_core_hwmod,
.slave = &omap2420_uart3_hwmod,
.clk = "uart3_ick",
- .addr = omap2420_uart3_addr_space,
- .addr_cnt = ARRAY_SIZE(omap2420_uart3_addr_space),
+ .addr = omap2xxx_uart3_addr_space,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-/* I2C IP block address space length (in bytes) */
-#define OMAP2_I2C_AS_LEN 128
-
/* L4 CORE -> I2C1 interface */
-static struct omap_hwmod_addr_space omap2420_i2c1_addr_space[] = {
- {
- .pa_start = 0x48070000,
- .pa_end = 0x48070000 + OMAP2_I2C_AS_LEN - 1,
- .flags = ADDR_TYPE_RT,
- },
-};
-
static struct omap_hwmod_ocp_if omap2420_l4_core__i2c1 = {
.master = &omap2420_l4_core_hwmod,
.slave = &omap2420_i2c1_hwmod,
.clk = "i2c1_ick",
- .addr = omap2420_i2c1_addr_space,
- .addr_cnt = ARRAY_SIZE(omap2420_i2c1_addr_space),
+ .addr = omap2_i2c1_addr_space,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* L4 CORE -> I2C2 interface */
-static struct omap_hwmod_addr_space omap2420_i2c2_addr_space[] = {
- {
- .pa_start = 0x48072000,
- .pa_end = 0x48072000 + OMAP2_I2C_AS_LEN - 1,
- .flags = ADDR_TYPE_RT,
- },
-};
-
static struct omap_hwmod_ocp_if omap2420_l4_core__i2c2 = {
.master = &omap2420_l4_core_hwmod,
.slave = &omap2420_i2c2_hwmod,
.clk = "i2c2_ick",
- .addr = omap2420_i2c2_addr_space,
- .addr_cnt = ARRAY_SIZE(omap2420_i2c2_addr_space),
+ .addr = omap2_i2c2_addr_space,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -340,29 +274,8 @@ static struct omap_hwmod omap2420_iva_hwmod = {
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420)
};
-/* Timer Common */
-static struct omap_hwmod_class_sysconfig omap2420_timer_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .syss_offs = 0x0014,
- .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_CLOCKACTIVITY |
- SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
- SYSC_HAS_AUTOIDLE),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap2420_timer_hwmod_class = {
- .name = "timer",
- .sysc = &omap2420_timer_sysc,
- .rev = OMAP_TIMER_IP_VERSION_1,
-};
-
/* timer1 */
static struct omap_hwmod omap2420_timer1_hwmod;
-static struct omap_hwmod_irq_info omap2420_timer1_mpu_irqs[] = {
- { .irq = 37, },
-};
static struct omap_hwmod_addr_space omap2420_timer1_addrs[] = {
{
@@ -370,6 +283,7 @@ static struct omap_hwmod_addr_space omap2420_timer1_addrs[] = {
.pa_end = 0x48028000 + SZ_1K - 1,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_wkup -> timer1 */
@@ -378,7 +292,6 @@ static struct omap_hwmod_ocp_if omap2420_l4_wkup__timer1 = {
.slave = &omap2420_timer1_hwmod,
.clk = "gpt1_ick",
.addr = omap2420_timer1_addrs,
- .addr_cnt = ARRAY_SIZE(omap2420_timer1_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -390,8 +303,7 @@ static struct omap_hwmod_ocp_if *omap2420_timer1_slaves[] = {
/* timer1 hwmod */
static struct omap_hwmod omap2420_timer1_hwmod = {
.name = "timer1",
- .mpu_irqs = omap2420_timer1_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap2420_timer1_mpu_irqs),
+ .mpu_irqs = omap2_timer1_mpu_irqs,
.main_clk = "gpt1_fck",
.prcm = {
.omap2 = {
@@ -404,31 +316,19 @@ static struct omap_hwmod omap2420_timer1_hwmod = {
},
.slaves = omap2420_timer1_slaves,
.slaves_cnt = ARRAY_SIZE(omap2420_timer1_slaves),
- .class = &omap2420_timer_hwmod_class,
+ .class = &omap2xxx_timer_hwmod_class,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420)
};
/* timer2 */
static struct omap_hwmod omap2420_timer2_hwmod;
-static struct omap_hwmod_irq_info omap2420_timer2_mpu_irqs[] = {
- { .irq = 38, },
-};
-
-static struct omap_hwmod_addr_space omap2420_timer2_addrs[] = {
- {
- .pa_start = 0x4802a000,
- .pa_end = 0x4802a000 + SZ_1K - 1,
- .flags = ADDR_TYPE_RT
- },
-};
/* l4_core -> timer2 */
static struct omap_hwmod_ocp_if omap2420_l4_core__timer2 = {
.master = &omap2420_l4_core_hwmod,
.slave = &omap2420_timer2_hwmod,
.clk = "gpt2_ick",
- .addr = omap2420_timer2_addrs,
- .addr_cnt = ARRAY_SIZE(omap2420_timer2_addrs),
+ .addr = omap2xxx_timer2_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -440,8 +340,7 @@ static struct omap_hwmod_ocp_if *omap2420_timer2_slaves[] = {
/* timer2 hwmod */
static struct omap_hwmod omap2420_timer2_hwmod = {
.name = "timer2",
- .mpu_irqs = omap2420_timer2_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap2420_timer2_mpu_irqs),
+ .mpu_irqs = omap2_timer2_mpu_irqs,
.main_clk = "gpt2_fck",
.prcm = {
.omap2 = {
@@ -454,31 +353,19 @@ static struct omap_hwmod omap2420_timer2_hwmod = {
},
.slaves = omap2420_timer2_slaves,
.slaves_cnt = ARRAY_SIZE(omap2420_timer2_slaves),
- .class = &omap2420_timer_hwmod_class,
+ .class = &omap2xxx_timer_hwmod_class,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420)
};
/* timer3 */
static struct omap_hwmod omap2420_timer3_hwmod;
-static struct omap_hwmod_irq_info omap2420_timer3_mpu_irqs[] = {
- { .irq = 39, },
-};
-
-static struct omap_hwmod_addr_space omap2420_timer3_addrs[] = {
- {
- .pa_start = 0x48078000,
- .pa_end = 0x48078000 + SZ_1K - 1,
- .flags = ADDR_TYPE_RT
- },
-};
/* l4_core -> timer3 */
static struct omap_hwmod_ocp_if omap2420_l4_core__timer3 = {
.master = &omap2420_l4_core_hwmod,
.slave = &omap2420_timer3_hwmod,
.clk = "gpt3_ick",
- .addr = omap2420_timer3_addrs,
- .addr_cnt = ARRAY_SIZE(omap2420_timer3_addrs),
+ .addr = omap2xxx_timer3_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -490,8 +377,7 @@ static struct omap_hwmod_ocp_if *omap2420_timer3_slaves[] = {
/* timer3 hwmod */
static struct omap_hwmod omap2420_timer3_hwmod = {
.name = "timer3",
- .mpu_irqs = omap2420_timer3_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap2420_timer3_mpu_irqs),
+ .mpu_irqs = omap2_timer3_mpu_irqs,
.main_clk = "gpt3_fck",
.prcm = {
.omap2 = {
@@ -504,31 +390,19 @@ static struct omap_hwmod omap2420_timer3_hwmod = {
},
.slaves = omap2420_timer3_slaves,
.slaves_cnt = ARRAY_SIZE(omap2420_timer3_slaves),
- .class = &omap2420_timer_hwmod_class,
+ .class = &omap2xxx_timer_hwmod_class,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420)
};
/* timer4 */
static struct omap_hwmod omap2420_timer4_hwmod;
-static struct omap_hwmod_irq_info omap2420_timer4_mpu_irqs[] = {
- { .irq = 40, },
-};
-
-static struct omap_hwmod_addr_space omap2420_timer4_addrs[] = {
- {
- .pa_start = 0x4807a000,
- .pa_end = 0x4807a000 + SZ_1K - 1,
- .flags = ADDR_TYPE_RT
- },
-};
/* l4_core -> timer4 */
static struct omap_hwmod_ocp_if omap2420_l4_core__timer4 = {
.master = &omap2420_l4_core_hwmod,
.slave = &omap2420_timer4_hwmod,
.clk = "gpt4_ick",
- .addr = omap2420_timer4_addrs,
- .addr_cnt = ARRAY_SIZE(omap2420_timer4_addrs),
+ .addr = omap2xxx_timer4_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -540,8 +414,7 @@ static struct omap_hwmod_ocp_if *omap2420_timer4_slaves[] = {
/* timer4 hwmod */
static struct omap_hwmod omap2420_timer4_hwmod = {
.name = "timer4",
- .mpu_irqs = omap2420_timer4_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap2420_timer4_mpu_irqs),
+ .mpu_irqs = omap2_timer4_mpu_irqs,
.main_clk = "gpt4_fck",
.prcm = {
.omap2 = {
@@ -554,31 +427,19 @@ static struct omap_hwmod omap2420_timer4_hwmod = {
},
.slaves = omap2420_timer4_slaves,
.slaves_cnt = ARRAY_SIZE(omap2420_timer4_slaves),
- .class = &omap2420_timer_hwmod_class,
+ .class = &omap2xxx_timer_hwmod_class,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420)
};
/* timer5 */
static struct omap_hwmod omap2420_timer5_hwmod;
-static struct omap_hwmod_irq_info omap2420_timer5_mpu_irqs[] = {
- { .irq = 41, },
-};
-
-static struct omap_hwmod_addr_space omap2420_timer5_addrs[] = {
- {
- .pa_start = 0x4807c000,
- .pa_end = 0x4807c000 + SZ_1K - 1,
- .flags = ADDR_TYPE_RT
- },
-};
/* l4_core -> timer5 */
static struct omap_hwmod_ocp_if omap2420_l4_core__timer5 = {
.master = &omap2420_l4_core_hwmod,
.slave = &omap2420_timer5_hwmod,
.clk = "gpt5_ick",
- .addr = omap2420_timer5_addrs,
- .addr_cnt = ARRAY_SIZE(omap2420_timer5_addrs),
+ .addr = omap2xxx_timer5_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -590,8 +451,7 @@ static struct omap_hwmod_ocp_if *omap2420_timer5_slaves[] = {
/* timer5 hwmod */
static struct omap_hwmod omap2420_timer5_hwmod = {
.name = "timer5",
- .mpu_irqs = omap2420_timer5_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap2420_timer5_mpu_irqs),
+ .mpu_irqs = omap2_timer5_mpu_irqs,
.main_clk = "gpt5_fck",
.prcm = {
.omap2 = {
@@ -604,32 +464,20 @@ static struct omap_hwmod omap2420_timer5_hwmod = {
},
.slaves = omap2420_timer5_slaves,
.slaves_cnt = ARRAY_SIZE(omap2420_timer5_slaves),
- .class = &omap2420_timer_hwmod_class,
+ .class = &omap2xxx_timer_hwmod_class,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420)
};
/* timer6 */
static struct omap_hwmod omap2420_timer6_hwmod;
-static struct omap_hwmod_irq_info omap2420_timer6_mpu_irqs[] = {
- { .irq = 42, },
-};
-
-static struct omap_hwmod_addr_space omap2420_timer6_addrs[] = {
- {
- .pa_start = 0x4807e000,
- .pa_end = 0x4807e000 + SZ_1K - 1,
- .flags = ADDR_TYPE_RT
- },
-};
/* l4_core -> timer6 */
static struct omap_hwmod_ocp_if omap2420_l4_core__timer6 = {
.master = &omap2420_l4_core_hwmod,
.slave = &omap2420_timer6_hwmod,
.clk = "gpt6_ick",
- .addr = omap2420_timer6_addrs,
- .addr_cnt = ARRAY_SIZE(omap2420_timer6_addrs),
+ .addr = omap2xxx_timer6_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -641,8 +489,7 @@ static struct omap_hwmod_ocp_if *omap2420_timer6_slaves[] = {
/* timer6 hwmod */
static struct omap_hwmod omap2420_timer6_hwmod = {
.name = "timer6",
- .mpu_irqs = omap2420_timer6_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap2420_timer6_mpu_irqs),
+ .mpu_irqs = omap2_timer6_mpu_irqs,
.main_clk = "gpt6_fck",
.prcm = {
.omap2 = {
@@ -655,31 +502,19 @@ static struct omap_hwmod omap2420_timer6_hwmod = {
},
.slaves = omap2420_timer6_slaves,
.slaves_cnt = ARRAY_SIZE(omap2420_timer6_slaves),
- .class = &omap2420_timer_hwmod_class,
+ .class = &omap2xxx_timer_hwmod_class,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420)
};
/* timer7 */
static struct omap_hwmod omap2420_timer7_hwmod;
-static struct omap_hwmod_irq_info omap2420_timer7_mpu_irqs[] = {
- { .irq = 43, },
-};
-
-static struct omap_hwmod_addr_space omap2420_timer7_addrs[] = {
- {
- .pa_start = 0x48080000,
- .pa_end = 0x48080000 + SZ_1K - 1,
- .flags = ADDR_TYPE_RT
- },
-};
/* l4_core -> timer7 */
static struct omap_hwmod_ocp_if omap2420_l4_core__timer7 = {
.master = &omap2420_l4_core_hwmod,
.slave = &omap2420_timer7_hwmod,
.clk = "gpt7_ick",
- .addr = omap2420_timer7_addrs,
- .addr_cnt = ARRAY_SIZE(omap2420_timer7_addrs),
+ .addr = omap2xxx_timer7_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -691,8 +526,7 @@ static struct omap_hwmod_ocp_if *omap2420_timer7_slaves[] = {
/* timer7 hwmod */
static struct omap_hwmod omap2420_timer7_hwmod = {
.name = "timer7",
- .mpu_irqs = omap2420_timer7_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap2420_timer7_mpu_irqs),
+ .mpu_irqs = omap2_timer7_mpu_irqs,
.main_clk = "gpt7_fck",
.prcm = {
.omap2 = {
@@ -705,31 +539,19 @@ static struct omap_hwmod omap2420_timer7_hwmod = {
},
.slaves = omap2420_timer7_slaves,
.slaves_cnt = ARRAY_SIZE(omap2420_timer7_slaves),
- .class = &omap2420_timer_hwmod_class,
+ .class = &omap2xxx_timer_hwmod_class,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420)
};
/* timer8 */
static struct omap_hwmod omap2420_timer8_hwmod;
-static struct omap_hwmod_irq_info omap2420_timer8_mpu_irqs[] = {
- { .irq = 44, },
-};
-
-static struct omap_hwmod_addr_space omap2420_timer8_addrs[] = {
- {
- .pa_start = 0x48082000,
- .pa_end = 0x48082000 + SZ_1K - 1,
- .flags = ADDR_TYPE_RT
- },
-};
/* l4_core -> timer8 */
static struct omap_hwmod_ocp_if omap2420_l4_core__timer8 = {
.master = &omap2420_l4_core_hwmod,
.slave = &omap2420_timer8_hwmod,
.clk = "gpt8_ick",
- .addr = omap2420_timer8_addrs,
- .addr_cnt = ARRAY_SIZE(omap2420_timer8_addrs),
+ .addr = omap2xxx_timer8_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -741,8 +563,7 @@ static struct omap_hwmod_ocp_if *omap2420_timer8_slaves[] = {
/* timer8 hwmod */
static struct omap_hwmod omap2420_timer8_hwmod = {
.name = "timer8",
- .mpu_irqs = omap2420_timer8_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap2420_timer8_mpu_irqs),
+ .mpu_irqs = omap2_timer8_mpu_irqs,
.main_clk = "gpt8_fck",
.prcm = {
.omap2 = {
@@ -755,31 +576,19 @@ static struct omap_hwmod omap2420_timer8_hwmod = {
},
.slaves = omap2420_timer8_slaves,
.slaves_cnt = ARRAY_SIZE(omap2420_timer8_slaves),
- .class = &omap2420_timer_hwmod_class,
+ .class = &omap2xxx_timer_hwmod_class,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420)
};
/* timer9 */
static struct omap_hwmod omap2420_timer9_hwmod;
-static struct omap_hwmod_irq_info omap2420_timer9_mpu_irqs[] = {
- { .irq = 45, },
-};
-
-static struct omap_hwmod_addr_space omap2420_timer9_addrs[] = {
- {
- .pa_start = 0x48084000,
- .pa_end = 0x48084000 + SZ_1K - 1,
- .flags = ADDR_TYPE_RT
- },
-};
/* l4_core -> timer9 */
static struct omap_hwmod_ocp_if omap2420_l4_core__timer9 = {
.master = &omap2420_l4_core_hwmod,
.slave = &omap2420_timer9_hwmod,
.clk = "gpt9_ick",
- .addr = omap2420_timer9_addrs,
- .addr_cnt = ARRAY_SIZE(omap2420_timer9_addrs),
+ .addr = omap2xxx_timer9_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -791,8 +600,7 @@ static struct omap_hwmod_ocp_if *omap2420_timer9_slaves[] = {
/* timer9 hwmod */
static struct omap_hwmod omap2420_timer9_hwmod = {
.name = "timer9",
- .mpu_irqs = omap2420_timer9_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap2420_timer9_mpu_irqs),
+ .mpu_irqs = omap2_timer9_mpu_irqs,
.main_clk = "gpt9_fck",
.prcm = {
.omap2 = {
@@ -805,31 +613,19 @@ static struct omap_hwmod omap2420_timer9_hwmod = {
},
.slaves = omap2420_timer9_slaves,
.slaves_cnt = ARRAY_SIZE(omap2420_timer9_slaves),
- .class = &omap2420_timer_hwmod_class,
+ .class = &omap2xxx_timer_hwmod_class,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420)
};
/* timer10 */
static struct omap_hwmod omap2420_timer10_hwmod;
-static struct omap_hwmod_irq_info omap2420_timer10_mpu_irqs[] = {
- { .irq = 46, },
-};
-
-static struct omap_hwmod_addr_space omap2420_timer10_addrs[] = {
- {
- .pa_start = 0x48086000,
- .pa_end = 0x48086000 + SZ_1K - 1,
- .flags = ADDR_TYPE_RT
- },
-};
/* l4_core -> timer10 */
static struct omap_hwmod_ocp_if omap2420_l4_core__timer10 = {
.master = &omap2420_l4_core_hwmod,
.slave = &omap2420_timer10_hwmod,
.clk = "gpt10_ick",
- .addr = omap2420_timer10_addrs,
- .addr_cnt = ARRAY_SIZE(omap2420_timer10_addrs),
+ .addr = omap2_timer10_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -841,8 +637,7 @@ static struct omap_hwmod_ocp_if *omap2420_timer10_slaves[] = {
/* timer10 hwmod */
static struct omap_hwmod omap2420_timer10_hwmod = {
.name = "timer10",
- .mpu_irqs = omap2420_timer10_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap2420_timer10_mpu_irqs),
+ .mpu_irqs = omap2_timer10_mpu_irqs,
.main_clk = "gpt10_fck",
.prcm = {
.omap2 = {
@@ -855,31 +650,19 @@ static struct omap_hwmod omap2420_timer10_hwmod = {
},
.slaves = omap2420_timer10_slaves,
.slaves_cnt = ARRAY_SIZE(omap2420_timer10_slaves),
- .class = &omap2420_timer_hwmod_class,
+ .class = &omap2xxx_timer_hwmod_class,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420)
};
/* timer11 */
static struct omap_hwmod omap2420_timer11_hwmod;
-static struct omap_hwmod_irq_info omap2420_timer11_mpu_irqs[] = {
- { .irq = 47, },
-};
-
-static struct omap_hwmod_addr_space omap2420_timer11_addrs[] = {
- {
- .pa_start = 0x48088000,
- .pa_end = 0x48088000 + SZ_1K - 1,
- .flags = ADDR_TYPE_RT
- },
-};
/* l4_core -> timer11 */
static struct omap_hwmod_ocp_if omap2420_l4_core__timer11 = {
.master = &omap2420_l4_core_hwmod,
.slave = &omap2420_timer11_hwmod,
.clk = "gpt11_ick",
- .addr = omap2420_timer11_addrs,
- .addr_cnt = ARRAY_SIZE(omap2420_timer11_addrs),
+ .addr = omap2_timer11_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -891,8 +674,7 @@ static struct omap_hwmod_ocp_if *omap2420_timer11_slaves[] = {
/* timer11 hwmod */
static struct omap_hwmod omap2420_timer11_hwmod = {
.name = "timer11",
- .mpu_irqs = omap2420_timer11_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap2420_timer11_mpu_irqs),
+ .mpu_irqs = omap2_timer11_mpu_irqs,
.main_clk = "gpt11_fck",
.prcm = {
.omap2 = {
@@ -905,31 +687,19 @@ static struct omap_hwmod omap2420_timer11_hwmod = {
},
.slaves = omap2420_timer11_slaves,
.slaves_cnt = ARRAY_SIZE(omap2420_timer11_slaves),
- .class = &omap2420_timer_hwmod_class,
+ .class = &omap2xxx_timer_hwmod_class,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420)
};
/* timer12 */
static struct omap_hwmod omap2420_timer12_hwmod;
-static struct omap_hwmod_irq_info omap2420_timer12_mpu_irqs[] = {
- { .irq = 48, },
-};
-
-static struct omap_hwmod_addr_space omap2420_timer12_addrs[] = {
- {
- .pa_start = 0x4808a000,
- .pa_end = 0x4808a000 + SZ_1K - 1,
- .flags = ADDR_TYPE_RT
- },
-};
/* l4_core -> timer12 */
static struct omap_hwmod_ocp_if omap2420_l4_core__timer12 = {
.master = &omap2420_l4_core_hwmod,
.slave = &omap2420_timer12_hwmod,
.clk = "gpt12_ick",
- .addr = omap2420_timer12_addrs,
- .addr_cnt = ARRAY_SIZE(omap2420_timer12_addrs),
+ .addr = omap2xxx_timer12_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -941,8 +711,7 @@ static struct omap_hwmod_ocp_if *omap2420_timer12_slaves[] = {
/* timer12 hwmod */
static struct omap_hwmod omap2420_timer12_hwmod = {
.name = "timer12",
- .mpu_irqs = omap2420_timer12_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap2420_timer12_mpu_irqs),
+ .mpu_irqs = omap2xxx_timer12_mpu_irqs,
.main_clk = "gpt12_fck",
.prcm = {
.omap2 = {
@@ -955,7 +724,7 @@ static struct omap_hwmod omap2420_timer12_hwmod = {
},
.slaves = omap2420_timer12_slaves,
.slaves_cnt = ARRAY_SIZE(omap2420_timer12_slaves),
- .class = &omap2420_timer_hwmod_class,
+ .class = &omap2xxx_timer_hwmod_class,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420)
};
@@ -966,6 +735,7 @@ static struct omap_hwmod_addr_space omap2420_wd_timer2_addrs[] = {
.pa_end = 0x4802207f,
.flags = ADDR_TYPE_RT
},
+ { }
};
static struct omap_hwmod_ocp_if omap2420_l4_wkup__wd_timer2 = {
@@ -973,31 +743,9 @@ static struct omap_hwmod_ocp_if omap2420_l4_wkup__wd_timer2 = {
.slave = &omap2420_wd_timer2_hwmod,
.clk = "mpu_wdt_ick",
.addr = omap2420_wd_timer2_addrs,
- .addr_cnt = ARRAY_SIZE(omap2420_wd_timer2_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-/*
- * 'wd_timer' class
- * 32-bit watchdog upward counter that generates a pulse on the reset pin on
- * overflow condition
- */
-
-static struct omap_hwmod_class_sysconfig omap2420_wd_timer_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .syss_offs = 0x0014,
- .sysc_flags = (SYSC_HAS_EMUFREE | SYSC_HAS_SOFTRESET |
- SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap2420_wd_timer_hwmod_class = {
- .name = "wd_timer",
- .sysc = &omap2420_wd_timer_sysc,
- .pre_shutdown = &omap2_wd_timer_disable
-};
-
/* wd_timer2 */
static struct omap_hwmod_ocp_if *omap2420_wd_timer2_slaves[] = {
&omap2420_l4_wkup__wd_timer2,
@@ -1005,7 +753,7 @@ static struct omap_hwmod_ocp_if *omap2420_wd_timer2_slaves[] = {
static struct omap_hwmod omap2420_wd_timer2_hwmod = {
.name = "wd_timer2",
- .class = &omap2420_wd_timer_hwmod_class,
+ .class = &omap2xxx_wd_timer_hwmod_class,
.main_clk = "mpu_wdt_fck",
.prcm = {
.omap2 = {
@@ -1021,45 +769,16 @@ static struct omap_hwmod omap2420_wd_timer2_hwmod = {
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
};
-/* UART */
-
-static struct omap_hwmod_class_sysconfig uart_sysc = {
- .rev_offs = 0x50,
- .sysc_offs = 0x54,
- .syss_offs = 0x58,
- .sysc_flags = (SYSC_HAS_SIDLEMODE |
- SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
- SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class uart_class = {
- .name = "uart",
- .sysc = &uart_sysc,
-};
-
/* UART1 */
-static struct omap_hwmod_irq_info uart1_mpu_irqs[] = {
- { .irq = INT_24XX_UART1_IRQ, },
-};
-
-static struct omap_hwmod_dma_info uart1_sdma_reqs[] = {
- { .name = "rx", .dma_req = OMAP24XX_DMA_UART1_RX, },
- { .name = "tx", .dma_req = OMAP24XX_DMA_UART1_TX, },
-};
-
static struct omap_hwmod_ocp_if *omap2420_uart1_slaves[] = {
&omap2_l4_core__uart1,
};
static struct omap_hwmod omap2420_uart1_hwmod = {
.name = "uart1",
- .mpu_irqs = uart1_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(uart1_mpu_irqs),
- .sdma_reqs = uart1_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(uart1_sdma_reqs),
+ .mpu_irqs = omap2_uart1_mpu_irqs,
+ .sdma_reqs = omap2_uart1_sdma_reqs,
.main_clk = "uart1_fck",
.prcm = {
.omap2 = {
@@ -1072,31 +791,20 @@ static struct omap_hwmod omap2420_uart1_hwmod = {
},
.slaves = omap2420_uart1_slaves,
.slaves_cnt = ARRAY_SIZE(omap2420_uart1_slaves),
- .class = &uart_class,
+ .class = &omap2_uart_class,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
};
/* UART2 */
-static struct omap_hwmod_irq_info uart2_mpu_irqs[] = {
- { .irq = INT_24XX_UART2_IRQ, },
-};
-
-static struct omap_hwmod_dma_info uart2_sdma_reqs[] = {
- { .name = "rx", .dma_req = OMAP24XX_DMA_UART2_RX, },
- { .name = "tx", .dma_req = OMAP24XX_DMA_UART2_TX, },
-};
-
static struct omap_hwmod_ocp_if *omap2420_uart2_slaves[] = {
&omap2_l4_core__uart2,
};
static struct omap_hwmod omap2420_uart2_hwmod = {
.name = "uart2",
- .mpu_irqs = uart2_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(uart2_mpu_irqs),
- .sdma_reqs = uart2_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(uart2_sdma_reqs),
+ .mpu_irqs = omap2_uart2_mpu_irqs,
+ .sdma_reqs = omap2_uart2_sdma_reqs,
.main_clk = "uart2_fck",
.prcm = {
.omap2 = {
@@ -1109,31 +817,20 @@ static struct omap_hwmod omap2420_uart2_hwmod = {
},
.slaves = omap2420_uart2_slaves,
.slaves_cnt = ARRAY_SIZE(omap2420_uart2_slaves),
- .class = &uart_class,
+ .class = &omap2_uart_class,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
};
/* UART3 */
-static struct omap_hwmod_irq_info uart3_mpu_irqs[] = {
- { .irq = INT_24XX_UART3_IRQ, },
-};
-
-static struct omap_hwmod_dma_info uart3_sdma_reqs[] = {
- { .name = "rx", .dma_req = OMAP24XX_DMA_UART3_RX, },
- { .name = "tx", .dma_req = OMAP24XX_DMA_UART3_TX, },
-};
-
static struct omap_hwmod_ocp_if *omap2420_uart3_slaves[] = {
&omap2_l4_core__uart3,
};
static struct omap_hwmod omap2420_uart3_hwmod = {
.name = "uart3",
- .mpu_irqs = uart3_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(uart3_mpu_irqs),
- .sdma_reqs = uart3_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(uart3_sdma_reqs),
+ .mpu_irqs = omap2_uart3_mpu_irqs,
+ .sdma_reqs = omap2_uart3_sdma_reqs,
.main_clk = "uart3_fck",
.prcm = {
.omap2 = {
@@ -1146,53 +843,22 @@ static struct omap_hwmod omap2420_uart3_hwmod = {
},
.slaves = omap2420_uart3_slaves,
.slaves_cnt = ARRAY_SIZE(omap2420_uart3_slaves),
- .class = &uart_class,
+ .class = &omap2_uart_class,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
};
-/*
- * 'dss' class
- * display sub-system
- */
-
-static struct omap_hwmod_class_sysconfig omap2420_dss_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .syss_offs = 0x0014,
- .sysc_flags = (SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap2420_dss_hwmod_class = {
- .name = "dss",
- .sysc = &omap2420_dss_sysc,
-};
-
-static struct omap_hwmod_dma_info omap2420_dss_sdma_chs[] = {
- { .name = "dispc", .dma_req = 5 },
-};
-
/* dss */
/* dss master ports */
static struct omap_hwmod_ocp_if *omap2420_dss_masters[] = {
&omap2420_dss__l3,
};
-static struct omap_hwmod_addr_space omap2420_dss_addrs[] = {
- {
- .pa_start = 0x48050000,
- .pa_end = 0x480503FF,
- .flags = ADDR_TYPE_RT
- },
-};
-
/* l4_core -> dss */
static struct omap_hwmod_ocp_if omap2420_l4_core__dss = {
.master = &omap2420_l4_core_hwmod,
.slave = &omap2420_dss_core_hwmod,
.clk = "dss_ick",
- .addr = omap2420_dss_addrs,
- .addr_cnt = ARRAY_SIZE(omap2420_dss_addrs),
+ .addr = omap2_dss_addrs,
.fw = {
.omap2 = {
.l4_fw_region = OMAP2420_L4_CORE_FW_DSS_CORE_REGION,
@@ -1214,10 +880,9 @@ static struct omap_hwmod_opt_clk dss_opt_clks[] = {
static struct omap_hwmod omap2420_dss_core_hwmod = {
.name = "dss_core",
- .class = &omap2420_dss_hwmod_class,
+ .class = &omap2_dss_hwmod_class,
.main_clk = "dss1_fck", /* instead of dss_fck */
- .sdma_reqs = omap2420_dss_sdma_chs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap2420_dss_sdma_chs),
+ .sdma_reqs = omap2xxx_dss_sdma_chs,
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
@@ -1237,46 +902,12 @@ static struct omap_hwmod omap2420_dss_core_hwmod = {
.flags = HWMOD_NO_IDLEST,
};
-/*
- * 'dispc' class
- * display controller
- */
-
-static struct omap_hwmod_class_sysconfig omap2420_dispc_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .syss_offs = 0x0014,
- .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE |
- SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap2420_dispc_hwmod_class = {
- .name = "dispc",
- .sysc = &omap2420_dispc_sysc,
-};
-
-static struct omap_hwmod_irq_info omap2420_dispc_irqs[] = {
- { .irq = 25 },
-};
-
-static struct omap_hwmod_addr_space omap2420_dss_dispc_addrs[] = {
- {
- .pa_start = 0x48050400,
- .pa_end = 0x480507FF,
- .flags = ADDR_TYPE_RT
- },
-};
-
/* l4_core -> dss_dispc */
static struct omap_hwmod_ocp_if omap2420_l4_core__dss_dispc = {
.master = &omap2420_l4_core_hwmod,
.slave = &omap2420_dss_dispc_hwmod,
.clk = "dss_ick",
- .addr = omap2420_dss_dispc_addrs,
- .addr_cnt = ARRAY_SIZE(omap2420_dss_dispc_addrs),
+ .addr = omap2_dss_dispc_addrs,
.fw = {
.omap2 = {
.l4_fw_region = OMAP2420_L4_CORE_FW_DSS_DISPC_REGION,
@@ -1293,9 +924,8 @@ static struct omap_hwmod_ocp_if *omap2420_dss_dispc_slaves[] = {
static struct omap_hwmod omap2420_dss_dispc_hwmod = {
.name = "dss_dispc",
- .class = &omap2420_dispc_hwmod_class,
- .mpu_irqs = omap2420_dispc_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap2420_dispc_irqs),
+ .class = &omap2_dispc_hwmod_class,
+ .mpu_irqs = omap2_dispc_irqs,
.main_clk = "dss1_fck",
.prcm = {
.omap2 = {
@@ -1312,41 +942,12 @@ static struct omap_hwmod omap2420_dss_dispc_hwmod = {
.flags = HWMOD_NO_IDLEST,
};
-/*
- * 'rfbi' class
- * remote frame buffer interface
- */
-
-static struct omap_hwmod_class_sysconfig omap2420_rfbi_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .syss_offs = 0x0014,
- .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
- SYSC_HAS_AUTOIDLE),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap2420_rfbi_hwmod_class = {
- .name = "rfbi",
- .sysc = &omap2420_rfbi_sysc,
-};
-
-static struct omap_hwmod_addr_space omap2420_dss_rfbi_addrs[] = {
- {
- .pa_start = 0x48050800,
- .pa_end = 0x48050BFF,
- .flags = ADDR_TYPE_RT
- },
-};
-
/* l4_core -> dss_rfbi */
static struct omap_hwmod_ocp_if omap2420_l4_core__dss_rfbi = {
.master = &omap2420_l4_core_hwmod,
.slave = &omap2420_dss_rfbi_hwmod,
.clk = "dss_ick",
- .addr = omap2420_dss_rfbi_addrs,
- .addr_cnt = ARRAY_SIZE(omap2420_dss_rfbi_addrs),
+ .addr = omap2_dss_rfbi_addrs,
.fw = {
.omap2 = {
.l4_fw_region = OMAP2420_L4_CORE_FW_DSS_CORE_REGION,
@@ -1363,7 +964,7 @@ static struct omap_hwmod_ocp_if *omap2420_dss_rfbi_slaves[] = {
static struct omap_hwmod omap2420_dss_rfbi_hwmod = {
.name = "dss_rfbi",
- .class = &omap2420_rfbi_hwmod_class,
+ .class = &omap2_rfbi_hwmod_class,
.main_clk = "dss1_fck",
.prcm = {
.omap2 = {
@@ -1378,31 +979,12 @@ static struct omap_hwmod omap2420_dss_rfbi_hwmod = {
.flags = HWMOD_NO_IDLEST,
};
-/*
- * 'venc' class
- * video encoder
- */
-
-static struct omap_hwmod_class omap2420_venc_hwmod_class = {
- .name = "venc",
-};
-
-/* dss_venc */
-static struct omap_hwmod_addr_space omap2420_dss_venc_addrs[] = {
- {
- .pa_start = 0x48050C00,
- .pa_end = 0x48050FFF,
- .flags = ADDR_TYPE_RT
- },
-};
-
/* l4_core -> dss_venc */
static struct omap_hwmod_ocp_if omap2420_l4_core__dss_venc = {
.master = &omap2420_l4_core_hwmod,
.slave = &omap2420_dss_venc_hwmod,
.clk = "dss_54m_fck",
- .addr = omap2420_dss_venc_addrs,
- .addr_cnt = ARRAY_SIZE(omap2420_dss_venc_addrs),
+ .addr = omap2_dss_venc_addrs,
.fw = {
.omap2 = {
.l4_fw_region = OMAP2420_L4_CORE_FW_DSS_VENC_REGION,
@@ -1420,7 +1002,7 @@ static struct omap_hwmod_ocp_if *omap2420_dss_venc_slaves[] = {
static struct omap_hwmod omap2420_dss_venc_hwmod = {
.name = "dss_venc",
- .class = &omap2420_venc_hwmod_class,
+ .class = &omap2_venc_hwmod_class,
.main_clk = "dss1_fck",
.prcm = {
.omap2 = {
@@ -1453,25 +1035,14 @@ static struct omap_i2c_dev_attr i2c_dev_attr;
/* I2C1 */
-static struct omap_hwmod_irq_info i2c1_mpu_irqs[] = {
- { .irq = INT_24XX_I2C1_IRQ, },
-};
-
-static struct omap_hwmod_dma_info i2c1_sdma_reqs[] = {
- { .name = "tx", .dma_req = OMAP24XX_DMA_I2C1_TX },
- { .name = "rx", .dma_req = OMAP24XX_DMA_I2C1_RX },
-};
-
static struct omap_hwmod_ocp_if *omap2420_i2c1_slaves[] = {
&omap2420_l4_core__i2c1,
};
static struct omap_hwmod omap2420_i2c1_hwmod = {
.name = "i2c1",
- .mpu_irqs = i2c1_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(i2c1_mpu_irqs),
- .sdma_reqs = i2c1_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(i2c1_sdma_reqs),
+ .mpu_irqs = omap2_i2c1_mpu_irqs,
+ .sdma_reqs = omap2_i2c1_sdma_reqs,
.main_clk = "i2c1_fck",
.prcm = {
.omap2 = {
@@ -1492,25 +1063,14 @@ static struct omap_hwmod omap2420_i2c1_hwmod = {
/* I2C2 */
-static struct omap_hwmod_irq_info i2c2_mpu_irqs[] = {
- { .irq = INT_24XX_I2C2_IRQ, },
-};
-
-static struct omap_hwmod_dma_info i2c2_sdma_reqs[] = {
- { .name = "tx", .dma_req = OMAP24XX_DMA_I2C2_TX },
- { .name = "rx", .dma_req = OMAP24XX_DMA_I2C2_RX },
-};
-
static struct omap_hwmod_ocp_if *omap2420_i2c2_slaves[] = {
&omap2420_l4_core__i2c2,
};
static struct omap_hwmod omap2420_i2c2_hwmod = {
.name = "i2c2",
- .mpu_irqs = i2c2_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(i2c2_mpu_irqs),
- .sdma_reqs = i2c2_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(i2c2_sdma_reqs),
+ .mpu_irqs = omap2_i2c2_mpu_irqs,
+ .sdma_reqs = omap2_i2c2_sdma_reqs,
.main_clk = "i2c2_fck",
.prcm = {
.omap2 = {
@@ -1536,6 +1096,7 @@ static struct omap_hwmod_addr_space omap2420_gpio1_addr_space[] = {
.pa_end = 0x480181ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
static struct omap_hwmod_ocp_if omap2420_l4_wkup__gpio1 = {
@@ -1543,7 +1104,6 @@ static struct omap_hwmod_ocp_if omap2420_l4_wkup__gpio1 = {
.slave = &omap2420_gpio1_hwmod,
.clk = "gpios_ick",
.addr = omap2420_gpio1_addr_space,
- .addr_cnt = ARRAY_SIZE(omap2420_gpio1_addr_space),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -1554,6 +1114,7 @@ static struct omap_hwmod_addr_space omap2420_gpio2_addr_space[] = {
.pa_end = 0x4801a1ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
static struct omap_hwmod_ocp_if omap2420_l4_wkup__gpio2 = {
@@ -1561,7 +1122,6 @@ static struct omap_hwmod_ocp_if omap2420_l4_wkup__gpio2 = {
.slave = &omap2420_gpio2_hwmod,
.clk = "gpios_ick",
.addr = omap2420_gpio2_addr_space,
- .addr_cnt = ARRAY_SIZE(omap2420_gpio2_addr_space),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -1572,6 +1132,7 @@ static struct omap_hwmod_addr_space omap2420_gpio3_addr_space[] = {
.pa_end = 0x4801c1ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
static struct omap_hwmod_ocp_if omap2420_l4_wkup__gpio3 = {
@@ -1579,7 +1140,6 @@ static struct omap_hwmod_ocp_if omap2420_l4_wkup__gpio3 = {
.slave = &omap2420_gpio3_hwmod,
.clk = "gpios_ick",
.addr = omap2420_gpio3_addr_space,
- .addr_cnt = ARRAY_SIZE(omap2420_gpio3_addr_space),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -1590,6 +1150,7 @@ static struct omap_hwmod_addr_space omap2420_gpio4_addr_space[] = {
.pa_end = 0x4801e1ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
static struct omap_hwmod_ocp_if omap2420_l4_wkup__gpio4 = {
@@ -1597,7 +1158,6 @@ static struct omap_hwmod_ocp_if omap2420_l4_wkup__gpio4 = {
.slave = &omap2420_gpio4_hwmod,
.clk = "gpios_ick",
.addr = omap2420_gpio4_addr_space,
- .addr_cnt = ARRAY_SIZE(omap2420_gpio4_addr_space),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -1607,32 +1167,7 @@ static struct omap_gpio_dev_attr gpio_dev_attr = {
.dbck_flag = false,
};
-static struct omap_hwmod_class_sysconfig omap242x_gpio_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .syss_offs = 0x0014,
- .sysc_flags = (SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
- SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE |
- SYSS_HAS_RESET_STATUS),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-/*
- * 'gpio' class
- * general purpose io module
- */
-static struct omap_hwmod_class omap242x_gpio_hwmod_class = {
- .name = "gpio",
- .sysc = &omap242x_gpio_sysc,
- .rev = 0,
-};
-
/* gpio1 */
-static struct omap_hwmod_irq_info omap242x_gpio1_irqs[] = {
- { .irq = 29 }, /* INT_24XX_GPIO_BANK1 */
-};
-
static struct omap_hwmod_ocp_if *omap2420_gpio1_slaves[] = {
&omap2420_l4_wkup__gpio1,
};
@@ -1640,8 +1175,7 @@ static struct omap_hwmod_ocp_if *omap2420_gpio1_slaves[] = {
static struct omap_hwmod omap2420_gpio1_hwmod = {
.name = "gpio1",
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
- .mpu_irqs = omap242x_gpio1_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap242x_gpio1_irqs),
+ .mpu_irqs = omap2_gpio1_irqs,
.main_clk = "gpios_fck",
.prcm = {
.omap2 = {
@@ -1654,16 +1188,12 @@ static struct omap_hwmod omap2420_gpio1_hwmod = {
},
.slaves = omap2420_gpio1_slaves,
.slaves_cnt = ARRAY_SIZE(omap2420_gpio1_slaves),
- .class = &omap242x_gpio_hwmod_class,
+ .class = &omap2xxx_gpio_hwmod_class,
.dev_attr = &gpio_dev_attr,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
};
/* gpio2 */
-static struct omap_hwmod_irq_info omap242x_gpio2_irqs[] = {
- { .irq = 30 }, /* INT_24XX_GPIO_BANK2 */
-};
-
static struct omap_hwmod_ocp_if *omap2420_gpio2_slaves[] = {
&omap2420_l4_wkup__gpio2,
};
@@ -1671,8 +1201,7 @@ static struct omap_hwmod_ocp_if *omap2420_gpio2_slaves[] = {
static struct omap_hwmod omap2420_gpio2_hwmod = {
.name = "gpio2",
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
- .mpu_irqs = omap242x_gpio2_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap242x_gpio2_irqs),
+ .mpu_irqs = omap2_gpio2_irqs,
.main_clk = "gpios_fck",
.prcm = {
.omap2 = {
@@ -1685,16 +1214,12 @@ static struct omap_hwmod omap2420_gpio2_hwmod = {
},
.slaves = omap2420_gpio2_slaves,
.slaves_cnt = ARRAY_SIZE(omap2420_gpio2_slaves),
- .class = &omap242x_gpio_hwmod_class,
+ .class = &omap2xxx_gpio_hwmod_class,
.dev_attr = &gpio_dev_attr,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
};
/* gpio3 */
-static struct omap_hwmod_irq_info omap242x_gpio3_irqs[] = {
- { .irq = 31 }, /* INT_24XX_GPIO_BANK3 */
-};
-
static struct omap_hwmod_ocp_if *omap2420_gpio3_slaves[] = {
&omap2420_l4_wkup__gpio3,
};
@@ -1702,8 +1227,7 @@ static struct omap_hwmod_ocp_if *omap2420_gpio3_slaves[] = {
static struct omap_hwmod omap2420_gpio3_hwmod = {
.name = "gpio3",
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
- .mpu_irqs = omap242x_gpio3_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap242x_gpio3_irqs),
+ .mpu_irqs = omap2_gpio3_irqs,
.main_clk = "gpios_fck",
.prcm = {
.omap2 = {
@@ -1716,16 +1240,12 @@ static struct omap_hwmod omap2420_gpio3_hwmod = {
},
.slaves = omap2420_gpio3_slaves,
.slaves_cnt = ARRAY_SIZE(omap2420_gpio3_slaves),
- .class = &omap242x_gpio_hwmod_class,
+ .class = &omap2xxx_gpio_hwmod_class,
.dev_attr = &gpio_dev_attr,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
};
/* gpio4 */
-static struct omap_hwmod_irq_info omap242x_gpio4_irqs[] = {
- { .irq = 32 }, /* INT_24XX_GPIO_BANK4 */
-};
-
static struct omap_hwmod_ocp_if *omap2420_gpio4_slaves[] = {
&omap2420_l4_wkup__gpio4,
};
@@ -1733,8 +1253,7 @@ static struct omap_hwmod_ocp_if *omap2420_gpio4_slaves[] = {
static struct omap_hwmod omap2420_gpio4_hwmod = {
.name = "gpio4",
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
- .mpu_irqs = omap242x_gpio4_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap242x_gpio4_irqs),
+ .mpu_irqs = omap2_gpio4_irqs,
.main_clk = "gpios_fck",
.prcm = {
.omap2 = {
@@ -1747,28 +1266,11 @@ static struct omap_hwmod omap2420_gpio4_hwmod = {
},
.slaves = omap2420_gpio4_slaves,
.slaves_cnt = ARRAY_SIZE(omap2420_gpio4_slaves),
- .class = &omap242x_gpio_hwmod_class,
+ .class = &omap2xxx_gpio_hwmod_class,
.dev_attr = &gpio_dev_attr,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
};
-/* system dma */
-static struct omap_hwmod_class_sysconfig omap2420_dma_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x002c,
- .syss_offs = 0x0028,
- .sysc_flags = (SYSC_HAS_SOFTRESET | SYSC_HAS_MIDLEMODE |
- SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_EMUFREE |
- SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
- .idlemodes = (MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap2420_dma_hwmod_class = {
- .name = "dma",
- .sysc = &omap2420_dma_sysc,
-};
-
/* dma attributes */
static struct omap_dma_dev_attr dma_dev_attr = {
.dev_caps = RESERVE_CHANNEL | DMA_LINKED_LCH | GLOBAL_PRIORITY |
@@ -1776,21 +1278,6 @@ static struct omap_dma_dev_attr dma_dev_attr = {
.lch_count = 32,
};
-static struct omap_hwmod_irq_info omap2420_dma_system_irqs[] = {
- { .name = "0", .irq = 12 }, /* INT_24XX_SDMA_IRQ0 */
- { .name = "1", .irq = 13 }, /* INT_24XX_SDMA_IRQ1 */
- { .name = "2", .irq = 14 }, /* INT_24XX_SDMA_IRQ2 */
- { .name = "3", .irq = 15 }, /* INT_24XX_SDMA_IRQ3 */
-};
-
-static struct omap_hwmod_addr_space omap2420_dma_system_addrs[] = {
- {
- .pa_start = 0x48056000,
- .pa_end = 0x48056fff,
- .flags = ADDR_TYPE_RT
- },
-};
-
/* dma_system -> L3 */
static struct omap_hwmod_ocp_if omap2420_dma_system__l3 = {
.master = &omap2420_dma_system_hwmod,
@@ -1809,8 +1296,7 @@ static struct omap_hwmod_ocp_if omap2420_l4_core__dma_system = {
.master = &omap2420_l4_core_hwmod,
.slave = &omap2420_dma_system_hwmod,
.clk = "sdma_ick",
- .addr = omap2420_dma_system_addrs,
- .addr_cnt = ARRAY_SIZE(omap2420_dma_system_addrs),
+ .addr = omap2_dma_system_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -1821,9 +1307,8 @@ static struct omap_hwmod_ocp_if *omap2420_dma_system_slaves[] = {
static struct omap_hwmod omap2420_dma_system_hwmod = {
.name = "dma",
- .class = &omap2420_dma_hwmod_class,
- .mpu_irqs = omap2420_dma_system_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap2420_dma_system_irqs),
+ .class = &omap2xxx_dma_hwmod_class,
+ .mpu_irqs = omap2_dma_system_irqs,
.main_clk = "core_l3_ck",
.slaves = omap2420_dma_system_slaves,
.slaves_cnt = ARRAY_SIZE(omap2420_dma_system_slaves),
@@ -1834,48 +1319,19 @@ static struct omap_hwmod omap2420_dma_system_hwmod = {
.flags = HWMOD_NO_IDLEST,
};
-/*
- * 'mailbox' class
- * mailbox module allowing communication between the on-chip processors
- * using a queued mailbox-interrupt mechanism.
- */
-
-static struct omap_hwmod_class_sysconfig omap2420_mailbox_sysc = {
- .rev_offs = 0x000,
- .sysc_offs = 0x010,
- .syss_offs = 0x014,
- .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
- SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap2420_mailbox_hwmod_class = {
- .name = "mailbox",
- .sysc = &omap2420_mailbox_sysc,
-};
-
/* mailbox */
static struct omap_hwmod omap2420_mailbox_hwmod;
static struct omap_hwmod_irq_info omap2420_mailbox_irqs[] = {
{ .name = "dsp", .irq = 26 },
{ .name = "iva", .irq = 34 },
-};
-
-static struct omap_hwmod_addr_space omap2420_mailbox_addrs[] = {
- {
- .pa_start = 0x48094000,
- .pa_end = 0x480941ff,
- .flags = ADDR_TYPE_RT,
- },
+ { .irq = -1 }
};
/* l4_core -> mailbox */
static struct omap_hwmod_ocp_if omap2420_l4_core__mailbox = {
.master = &omap2420_l4_core_hwmod,
.slave = &omap2420_mailbox_hwmod,
- .addr = omap2420_mailbox_addrs,
- .addr_cnt = ARRAY_SIZE(omap2420_mailbox_addrs),
+ .addr = omap2_mailbox_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -1886,9 +1342,8 @@ static struct omap_hwmod_ocp_if *omap2420_mailbox_slaves[] = {
static struct omap_hwmod omap2420_mailbox_hwmod = {
.name = "mailbox",
- .class = &omap2420_mailbox_hwmod_class,
+ .class = &omap2xxx_mailbox_hwmod_class,
.mpu_irqs = omap2420_mailbox_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap2420_mailbox_irqs),
.main_clk = "mailboxes_ick",
.prcm = {
.omap2 = {
@@ -1904,45 +1359,7 @@ static struct omap_hwmod omap2420_mailbox_hwmod = {
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
};
-/*
- * 'mcspi' class
- * multichannel serial port interface (mcspi) / master/slave synchronous serial
- * bus
- */
-
-static struct omap_hwmod_class_sysconfig omap2420_mcspi_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .syss_offs = 0x0014,
- .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
- SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
- SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap2420_mcspi_class = {
- .name = "mcspi",
- .sysc = &omap2420_mcspi_sysc,
- .rev = OMAP2_MCSPI_REV,
-};
-
/* mcspi1 */
-static struct omap_hwmod_irq_info omap2420_mcspi1_mpu_irqs[] = {
- { .irq = 65 },
-};
-
-static struct omap_hwmod_dma_info omap2420_mcspi1_sdma_reqs[] = {
- { .name = "tx0", .dma_req = 35 }, /* DMA_SPI1_TX0 */
- { .name = "rx0", .dma_req = 36 }, /* DMA_SPI1_RX0 */
- { .name = "tx1", .dma_req = 37 }, /* DMA_SPI1_TX1 */
- { .name = "rx1", .dma_req = 38 }, /* DMA_SPI1_RX1 */
- { .name = "tx2", .dma_req = 39 }, /* DMA_SPI1_TX2 */
- { .name = "rx2", .dma_req = 40 }, /* DMA_SPI1_RX2 */
- { .name = "tx3", .dma_req = 41 }, /* DMA_SPI1_TX3 */
- { .name = "rx3", .dma_req = 42 }, /* DMA_SPI1_RX3 */
-};
-
static struct omap_hwmod_ocp_if *omap2420_mcspi1_slaves[] = {
&omap2420_l4_core__mcspi1,
};
@@ -1953,10 +1370,8 @@ static struct omap2_mcspi_dev_attr omap_mcspi1_dev_attr = {
static struct omap_hwmod omap2420_mcspi1_hwmod = {
.name = "mcspi1_hwmod",
- .mpu_irqs = omap2420_mcspi1_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap2420_mcspi1_mpu_irqs),
- .sdma_reqs = omap2420_mcspi1_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap2420_mcspi1_sdma_reqs),
+ .mpu_irqs = omap2_mcspi1_mpu_irqs,
+ .sdma_reqs = omap2_mcspi1_sdma_reqs,
.main_clk = "mcspi1_fck",
.prcm = {
.omap2 = {
@@ -1969,23 +1384,12 @@ static struct omap_hwmod omap2420_mcspi1_hwmod = {
},
.slaves = omap2420_mcspi1_slaves,
.slaves_cnt = ARRAY_SIZE(omap2420_mcspi1_slaves),
- .class = &omap2420_mcspi_class,
- .dev_attr = &omap_mcspi1_dev_attr,
+ .class = &omap2xxx_mcspi_class,
+ .dev_attr = &omap_mcspi1_dev_attr,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
};
/* mcspi2 */
-static struct omap_hwmod_irq_info omap2420_mcspi2_mpu_irqs[] = {
- { .irq = 66 },
-};
-
-static struct omap_hwmod_dma_info omap2420_mcspi2_sdma_reqs[] = {
- { .name = "tx0", .dma_req = 43 }, /* DMA_SPI2_TX0 */
- { .name = "rx0", .dma_req = 44 }, /* DMA_SPI2_RX0 */
- { .name = "tx1", .dma_req = 45 }, /* DMA_SPI2_TX1 */
- { .name = "rx1", .dma_req = 46 }, /* DMA_SPI2_RX1 */
-};
-
static struct omap_hwmod_ocp_if *omap2420_mcspi2_slaves[] = {
&omap2420_l4_core__mcspi2,
};
@@ -1996,10 +1400,8 @@ static struct omap2_mcspi_dev_attr omap_mcspi2_dev_attr = {
static struct omap_hwmod omap2420_mcspi2_hwmod = {
.name = "mcspi2_hwmod",
- .mpu_irqs = omap2420_mcspi2_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap2420_mcspi2_mpu_irqs),
- .sdma_reqs = omap2420_mcspi2_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap2420_mcspi2_sdma_reqs),
+ .mpu_irqs = omap2_mcspi2_mpu_irqs,
+ .sdma_reqs = omap2_mcspi2_sdma_reqs,
.main_clk = "mcspi2_fck",
.prcm = {
.omap2 = {
@@ -2012,8 +1414,8 @@ static struct omap_hwmod omap2420_mcspi2_hwmod = {
},
.slaves = omap2420_mcspi2_slaves,
.slaves_cnt = ARRAY_SIZE(omap2420_mcspi2_slaves),
- .class = &omap2420_mcspi_class,
- .dev_attr = &omap_mcspi2_dev_attr,
+ .class = &omap2xxx_mcspi_class,
+ .dev_attr = &omap_mcspi2_dev_attr,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
};
@@ -2030,20 +1432,7 @@ static struct omap_hwmod_class omap2420_mcbsp_hwmod_class = {
static struct omap_hwmod_irq_info omap2420_mcbsp1_irqs[] = {
{ .name = "tx", .irq = 59 },
{ .name = "rx", .irq = 60 },
-};
-
-static struct omap_hwmod_dma_info omap2420_mcbsp1_sdma_chs[] = {
- { .name = "rx", .dma_req = 32 },
- { .name = "tx", .dma_req = 31 },
-};
-
-static struct omap_hwmod_addr_space omap2420_mcbsp1_addrs[] = {
- {
- .name = "mpu",
- .pa_start = 0x48074000,
- .pa_end = 0x480740ff,
- .flags = ADDR_TYPE_RT
- },
+ { .irq = -1 }
};
/* l4_core -> mcbsp1 */
@@ -2051,8 +1440,7 @@ static struct omap_hwmod_ocp_if omap2420_l4_core__mcbsp1 = {
.master = &omap2420_l4_core_hwmod,
.slave = &omap2420_mcbsp1_hwmod,
.clk = "mcbsp1_ick",
- .addr = omap2420_mcbsp1_addrs,
- .addr_cnt = ARRAY_SIZE(omap2420_mcbsp1_addrs),
+ .addr = omap2_mcbsp1_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -2065,9 +1453,7 @@ static struct omap_hwmod omap2420_mcbsp1_hwmod = {
.name = "mcbsp1",
.class = &omap2420_mcbsp_hwmod_class,
.mpu_irqs = omap2420_mcbsp1_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap2420_mcbsp1_irqs),
- .sdma_reqs = omap2420_mcbsp1_sdma_chs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap2420_mcbsp1_sdma_chs),
+ .sdma_reqs = omap2_mcbsp1_sdma_reqs,
.main_clk = "mcbsp1_fck",
.prcm = {
.omap2 = {
@@ -2087,20 +1473,7 @@ static struct omap_hwmod omap2420_mcbsp1_hwmod = {
static struct omap_hwmod_irq_info omap2420_mcbsp2_irqs[] = {
{ .name = "tx", .irq = 62 },
{ .name = "rx", .irq = 63 },
-};
-
-static struct omap_hwmod_dma_info omap2420_mcbsp2_sdma_chs[] = {
- { .name = "rx", .dma_req = 34 },
- { .name = "tx", .dma_req = 33 },
-};
-
-static struct omap_hwmod_addr_space omap2420_mcbsp2_addrs[] = {
- {
- .name = "mpu",
- .pa_start = 0x48076000,
- .pa_end = 0x480760ff,
- .flags = ADDR_TYPE_RT
- },
+ { .irq = -1 }
};
/* l4_core -> mcbsp2 */
@@ -2108,8 +1481,7 @@ static struct omap_hwmod_ocp_if omap2420_l4_core__mcbsp2 = {
.master = &omap2420_l4_core_hwmod,
.slave = &omap2420_mcbsp2_hwmod,
.clk = "mcbsp2_ick",
- .addr = omap2420_mcbsp2_addrs,
- .addr_cnt = ARRAY_SIZE(omap2420_mcbsp2_addrs),
+ .addr = omap2xxx_mcbsp2_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -2122,9 +1494,7 @@ static struct omap_hwmod omap2420_mcbsp2_hwmod = {
.name = "mcbsp2",
.class = &omap2420_mcbsp_hwmod_class,
.mpu_irqs = omap2420_mcbsp2_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap2420_mcbsp2_irqs),
- .sdma_reqs = omap2420_mcbsp2_sdma_chs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap2420_mcbsp2_sdma_chs),
+ .sdma_reqs = omap2_mcbsp2_sdma_reqs,
.main_clk = "mcbsp2_fck",
.prcm = {
.omap2 = {
diff --git a/arch/arm/mach-omap2/omap_hwmod_2430_data.c b/arch/arm/mach-omap2/omap_hwmod_2430_data.c
index 9682dd519f8d..2a52f025bd06 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2430_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2430_data.c
@@ -1,7 +1,7 @@
/*
* omap_hwmod_2430_data.c - hardware modules present on the OMAP2430 chips
*
- * Copyright (C) 2009-2010 Nokia Corporation
+ * Copyright (C) 2009-2011 Nokia Corporation
* Paul Walmsley
*
* This program is free software; you can redistribute it and/or modify
@@ -131,42 +131,21 @@ static struct omap_hwmod_ocp_if omap2430_usbhsotg__l3 = {
.user = OCP_USER_MPU,
};
-/* I2C IP block address space length (in bytes) */
-#define OMAP2_I2C_AS_LEN 128
-
/* L4 CORE -> I2C1 interface */
-static struct omap_hwmod_addr_space omap2430_i2c1_addr_space[] = {
- {
- .pa_start = 0x48070000,
- .pa_end = 0x48070000 + OMAP2_I2C_AS_LEN - 1,
- .flags = ADDR_TYPE_RT,
- },
-};
-
static struct omap_hwmod_ocp_if omap2430_l4_core__i2c1 = {
.master = &omap2430_l4_core_hwmod,
.slave = &omap2430_i2c1_hwmod,
.clk = "i2c1_ick",
- .addr = omap2430_i2c1_addr_space,
- .addr_cnt = ARRAY_SIZE(omap2430_i2c1_addr_space),
+ .addr = omap2_i2c1_addr_space,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* L4 CORE -> I2C2 interface */
-static struct omap_hwmod_addr_space omap2430_i2c2_addr_space[] = {
- {
- .pa_start = 0x48072000,
- .pa_end = 0x48072000 + OMAP2_I2C_AS_LEN - 1,
- .flags = ADDR_TYPE_RT,
- },
-};
-
static struct omap_hwmod_ocp_if omap2430_l4_core__i2c2 = {
.master = &omap2430_l4_core_hwmod,
.slave = &omap2430_i2c2_hwmod,
.clk = "i2c2_ick",
- .addr = omap2430_i2c2_addr_space,
- .addr_cnt = ARRAY_SIZE(omap2430_i2c2_addr_space),
+ .addr = omap2_i2c2_addr_space,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -178,56 +157,29 @@ static struct omap_hwmod_ocp_if omap2430_l4_core__l4_wkup = {
};
/* L4 CORE -> UART1 interface */
-static struct omap_hwmod_addr_space omap2430_uart1_addr_space[] = {
- {
- .pa_start = OMAP2_UART1_BASE,
- .pa_end = OMAP2_UART1_BASE + SZ_8K - 1,
- .flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
- },
-};
-
static struct omap_hwmod_ocp_if omap2_l4_core__uart1 = {
.master = &omap2430_l4_core_hwmod,
.slave = &omap2430_uart1_hwmod,
.clk = "uart1_ick",
- .addr = omap2430_uart1_addr_space,
- .addr_cnt = ARRAY_SIZE(omap2430_uart1_addr_space),
+ .addr = omap2xxx_uart1_addr_space,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* L4 CORE -> UART2 interface */
-static struct omap_hwmod_addr_space omap2430_uart2_addr_space[] = {
- {
- .pa_start = OMAP2_UART2_BASE,
- .pa_end = OMAP2_UART2_BASE + SZ_1K - 1,
- .flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
- },
-};
-
static struct omap_hwmod_ocp_if omap2_l4_core__uart2 = {
.master = &omap2430_l4_core_hwmod,
.slave = &omap2430_uart2_hwmod,
.clk = "uart2_ick",
- .addr = omap2430_uart2_addr_space,
- .addr_cnt = ARRAY_SIZE(omap2430_uart2_addr_space),
+ .addr = omap2xxx_uart2_addr_space,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* L4 PER -> UART3 interface */
-static struct omap_hwmod_addr_space omap2430_uart3_addr_space[] = {
- {
- .pa_start = OMAP2_UART3_BASE,
- .pa_end = OMAP2_UART3_BASE + SZ_1K - 1,
- .flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
- },
-};
-
static struct omap_hwmod_ocp_if omap2_l4_core__uart3 = {
.master = &omap2430_l4_core_hwmod,
.slave = &omap2430_uart3_hwmod,
.clk = "uart3_ick",
- .addr = omap2430_uart3_addr_space,
- .addr_cnt = ARRAY_SIZE(omap2430_uart3_addr_space),
+ .addr = omap2xxx_uart3_addr_space,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -248,7 +200,6 @@ static struct omap_hwmod_ocp_if omap2430_l4_core__usbhsotg = {
.slave = &omap2430_usbhsotg_hwmod,
.clk = "usb_l4_ick",
.addr = omap2430_usbhsotg_addrs,
- .addr_cnt = ARRAY_SIZE(omap2430_usbhsotg_addrs),
.user = OCP_USER_MPU,
};
@@ -261,38 +212,20 @@ static struct omap_hwmod_ocp_if *omap2430_usbhsotg_slaves[] = {
};
/* L4 CORE -> MMC1 interface */
-static struct omap_hwmod_addr_space omap2430_mmc1_addr_space[] = {
- {
- .pa_start = 0x4809c000,
- .pa_end = 0x4809c1ff,
- .flags = ADDR_TYPE_RT,
- },
-};
-
static struct omap_hwmod_ocp_if omap2430_l4_core__mmc1 = {
.master = &omap2430_l4_core_hwmod,
.slave = &omap2430_mmc1_hwmod,
.clk = "mmchs1_ick",
.addr = omap2430_mmc1_addr_space,
- .addr_cnt = ARRAY_SIZE(omap2430_mmc1_addr_space),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* L4 CORE -> MMC2 interface */
-static struct omap_hwmod_addr_space omap2430_mmc2_addr_space[] = {
- {
- .pa_start = 0x480b4000,
- .pa_end = 0x480b41ff,
- .flags = ADDR_TYPE_RT,
- },
-};
-
static struct omap_hwmod_ocp_if omap2430_l4_core__mmc2 = {
.master = &omap2430_l4_core_hwmod,
.slave = &omap2430_mmc2_hwmod,
- .addr = omap2430_mmc2_addr_space,
.clk = "mmchs2_ick",
- .addr_cnt = ARRAY_SIZE(omap2430_mmc2_addr_space),
+ .addr = omap2430_mmc2_addr_space,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -333,56 +266,29 @@ static struct omap_hwmod_ocp_if *omap2430_l4_wkup_masters[] = {
};
/* l4 core -> mcspi1 interface */
-static struct omap_hwmod_addr_space omap2430_mcspi1_addr_space[] = {
- {
- .pa_start = 0x48098000,
- .pa_end = 0x480980ff,
- .flags = ADDR_TYPE_RT,
- },
-};
-
static struct omap_hwmod_ocp_if omap2430_l4_core__mcspi1 = {
.master = &omap2430_l4_core_hwmod,
.slave = &omap2430_mcspi1_hwmod,
.clk = "mcspi1_ick",
- .addr = omap2430_mcspi1_addr_space,
- .addr_cnt = ARRAY_SIZE(omap2430_mcspi1_addr_space),
+ .addr = omap2_mcspi1_addr_space,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* l4 core -> mcspi2 interface */
-static struct omap_hwmod_addr_space omap2430_mcspi2_addr_space[] = {
- {
- .pa_start = 0x4809a000,
- .pa_end = 0x4809a0ff,
- .flags = ADDR_TYPE_RT,
- },
-};
-
static struct omap_hwmod_ocp_if omap2430_l4_core__mcspi2 = {
.master = &omap2430_l4_core_hwmod,
.slave = &omap2430_mcspi2_hwmod,
.clk = "mcspi2_ick",
- .addr = omap2430_mcspi2_addr_space,
- .addr_cnt = ARRAY_SIZE(omap2430_mcspi2_addr_space),
+ .addr = omap2_mcspi2_addr_space,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* l4 core -> mcspi3 interface */
-static struct omap_hwmod_addr_space omap2430_mcspi3_addr_space[] = {
- {
- .pa_start = 0x480b8000,
- .pa_end = 0x480b80ff,
- .flags = ADDR_TYPE_RT,
- },
-};
-
static struct omap_hwmod_ocp_if omap2430_l4_core__mcspi3 = {
.master = &omap2430_l4_core_hwmod,
.slave = &omap2430_mcspi3_hwmod,
.clk = "mcspi3_ick",
.addr = omap2430_mcspi3_addr_space,
- .addr_cnt = ARRAY_SIZE(omap2430_mcspi3_addr_space),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -441,29 +347,8 @@ static struct omap_hwmod omap2430_iva_hwmod = {
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430)
};
-/* Timer Common */
-static struct omap_hwmod_class_sysconfig omap2430_timer_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .syss_offs = 0x0014,
- .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_CLOCKACTIVITY |
- SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
- SYSC_HAS_AUTOIDLE),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap2430_timer_hwmod_class = {
- .name = "timer",
- .sysc = &omap2430_timer_sysc,
- .rev = OMAP_TIMER_IP_VERSION_1,
-};
-
/* timer1 */
static struct omap_hwmod omap2430_timer1_hwmod;
-static struct omap_hwmod_irq_info omap2430_timer1_mpu_irqs[] = {
- { .irq = 37, },
-};
static struct omap_hwmod_addr_space omap2430_timer1_addrs[] = {
{
@@ -471,6 +356,7 @@ static struct omap_hwmod_addr_space omap2430_timer1_addrs[] = {
.pa_end = 0x49018000 + SZ_1K - 1,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_wkup -> timer1 */
@@ -479,7 +365,6 @@ static struct omap_hwmod_ocp_if omap2430_l4_wkup__timer1 = {
.slave = &omap2430_timer1_hwmod,
.clk = "gpt1_ick",
.addr = omap2430_timer1_addrs,
- .addr_cnt = ARRAY_SIZE(omap2430_timer1_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -491,8 +376,7 @@ static struct omap_hwmod_ocp_if *omap2430_timer1_slaves[] = {
/* timer1 hwmod */
static struct omap_hwmod omap2430_timer1_hwmod = {
.name = "timer1",
- .mpu_irqs = omap2430_timer1_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap2430_timer1_mpu_irqs),
+ .mpu_irqs = omap2_timer1_mpu_irqs,
.main_clk = "gpt1_fck",
.prcm = {
.omap2 = {
@@ -505,31 +389,19 @@ static struct omap_hwmod omap2430_timer1_hwmod = {
},
.slaves = omap2430_timer1_slaves,
.slaves_cnt = ARRAY_SIZE(omap2430_timer1_slaves),
- .class = &omap2430_timer_hwmod_class,
+ .class = &omap2xxx_timer_hwmod_class,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430)
};
/* timer2 */
static struct omap_hwmod omap2430_timer2_hwmod;
-static struct omap_hwmod_irq_info omap2430_timer2_mpu_irqs[] = {
- { .irq = 38, },
-};
-
-static struct omap_hwmod_addr_space omap2430_timer2_addrs[] = {
- {
- .pa_start = 0x4802a000,
- .pa_end = 0x4802a000 + SZ_1K - 1,
- .flags = ADDR_TYPE_RT
- },
-};
/* l4_core -> timer2 */
static struct omap_hwmod_ocp_if omap2430_l4_core__timer2 = {
.master = &omap2430_l4_core_hwmod,
.slave = &omap2430_timer2_hwmod,
.clk = "gpt2_ick",
- .addr = omap2430_timer2_addrs,
- .addr_cnt = ARRAY_SIZE(omap2430_timer2_addrs),
+ .addr = omap2xxx_timer2_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -541,8 +413,7 @@ static struct omap_hwmod_ocp_if *omap2430_timer2_slaves[] = {
/* timer2 hwmod */
static struct omap_hwmod omap2430_timer2_hwmod = {
.name = "timer2",
- .mpu_irqs = omap2430_timer2_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap2430_timer2_mpu_irqs),
+ .mpu_irqs = omap2_timer2_mpu_irqs,
.main_clk = "gpt2_fck",
.prcm = {
.omap2 = {
@@ -555,31 +426,19 @@ static struct omap_hwmod omap2430_timer2_hwmod = {
},
.slaves = omap2430_timer2_slaves,
.slaves_cnt = ARRAY_SIZE(omap2430_timer2_slaves),
- .class = &omap2430_timer_hwmod_class,
+ .class = &omap2xxx_timer_hwmod_class,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430)
};
/* timer3 */
static struct omap_hwmod omap2430_timer3_hwmod;
-static struct omap_hwmod_irq_info omap2430_timer3_mpu_irqs[] = {
- { .irq = 39, },
-};
-
-static struct omap_hwmod_addr_space omap2430_timer3_addrs[] = {
- {
- .pa_start = 0x48078000,
- .pa_end = 0x48078000 + SZ_1K - 1,
- .flags = ADDR_TYPE_RT
- },
-};
/* l4_core -> timer3 */
static struct omap_hwmod_ocp_if omap2430_l4_core__timer3 = {
.master = &omap2430_l4_core_hwmod,
.slave = &omap2430_timer3_hwmod,
.clk = "gpt3_ick",
- .addr = omap2430_timer3_addrs,
- .addr_cnt = ARRAY_SIZE(omap2430_timer3_addrs),
+ .addr = omap2xxx_timer3_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -591,8 +450,7 @@ static struct omap_hwmod_ocp_if *omap2430_timer3_slaves[] = {
/* timer3 hwmod */
static struct omap_hwmod omap2430_timer3_hwmod = {
.name = "timer3",
- .mpu_irqs = omap2430_timer3_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap2430_timer3_mpu_irqs),
+ .mpu_irqs = omap2_timer3_mpu_irqs,
.main_clk = "gpt3_fck",
.prcm = {
.omap2 = {
@@ -605,31 +463,19 @@ static struct omap_hwmod omap2430_timer3_hwmod = {
},
.slaves = omap2430_timer3_slaves,
.slaves_cnt = ARRAY_SIZE(omap2430_timer3_slaves),
- .class = &omap2430_timer_hwmod_class,
+ .class = &omap2xxx_timer_hwmod_class,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430)
};
/* timer4 */
static struct omap_hwmod omap2430_timer4_hwmod;
-static struct omap_hwmod_irq_info omap2430_timer4_mpu_irqs[] = {
- { .irq = 40, },
-};
-
-static struct omap_hwmod_addr_space omap2430_timer4_addrs[] = {
- {
- .pa_start = 0x4807a000,
- .pa_end = 0x4807a000 + SZ_1K - 1,
- .flags = ADDR_TYPE_RT
- },
-};
/* l4_core -> timer4 */
static struct omap_hwmod_ocp_if omap2430_l4_core__timer4 = {
.master = &omap2430_l4_core_hwmod,
.slave = &omap2430_timer4_hwmod,
.clk = "gpt4_ick",
- .addr = omap2430_timer4_addrs,
- .addr_cnt = ARRAY_SIZE(omap2430_timer4_addrs),
+ .addr = omap2xxx_timer4_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -641,8 +487,7 @@ static struct omap_hwmod_ocp_if *omap2430_timer4_slaves[] = {
/* timer4 hwmod */
static struct omap_hwmod omap2430_timer4_hwmod = {
.name = "timer4",
- .mpu_irqs = omap2430_timer4_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap2430_timer4_mpu_irqs),
+ .mpu_irqs = omap2_timer4_mpu_irqs,
.main_clk = "gpt4_fck",
.prcm = {
.omap2 = {
@@ -655,31 +500,19 @@ static struct omap_hwmod omap2430_timer4_hwmod = {
},
.slaves = omap2430_timer4_slaves,
.slaves_cnt = ARRAY_SIZE(omap2430_timer4_slaves),
- .class = &omap2430_timer_hwmod_class,
+ .class = &omap2xxx_timer_hwmod_class,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430)
};
/* timer5 */
static struct omap_hwmod omap2430_timer5_hwmod;
-static struct omap_hwmod_irq_info omap2430_timer5_mpu_irqs[] = {
- { .irq = 41, },
-};
-
-static struct omap_hwmod_addr_space omap2430_timer5_addrs[] = {
- {
- .pa_start = 0x4807c000,
- .pa_end = 0x4807c000 + SZ_1K - 1,
- .flags = ADDR_TYPE_RT
- },
-};
/* l4_core -> timer5 */
static struct omap_hwmod_ocp_if omap2430_l4_core__timer5 = {
.master = &omap2430_l4_core_hwmod,
.slave = &omap2430_timer5_hwmod,
.clk = "gpt5_ick",
- .addr = omap2430_timer5_addrs,
- .addr_cnt = ARRAY_SIZE(omap2430_timer5_addrs),
+ .addr = omap2xxx_timer5_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -691,8 +524,7 @@ static struct omap_hwmod_ocp_if *omap2430_timer5_slaves[] = {
/* timer5 hwmod */
static struct omap_hwmod omap2430_timer5_hwmod = {
.name = "timer5",
- .mpu_irqs = omap2430_timer5_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap2430_timer5_mpu_irqs),
+ .mpu_irqs = omap2_timer5_mpu_irqs,
.main_clk = "gpt5_fck",
.prcm = {
.omap2 = {
@@ -705,31 +537,19 @@ static struct omap_hwmod omap2430_timer5_hwmod = {
},
.slaves = omap2430_timer5_slaves,
.slaves_cnt = ARRAY_SIZE(omap2430_timer5_slaves),
- .class = &omap2430_timer_hwmod_class,
+ .class = &omap2xxx_timer_hwmod_class,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430)
};
/* timer6 */
static struct omap_hwmod omap2430_timer6_hwmod;
-static struct omap_hwmod_irq_info omap2430_timer6_mpu_irqs[] = {
- { .irq = 42, },
-};
-
-static struct omap_hwmod_addr_space omap2430_timer6_addrs[] = {
- {
- .pa_start = 0x4807e000,
- .pa_end = 0x4807e000 + SZ_1K - 1,
- .flags = ADDR_TYPE_RT
- },
-};
/* l4_core -> timer6 */
static struct omap_hwmod_ocp_if omap2430_l4_core__timer6 = {
.master = &omap2430_l4_core_hwmod,
.slave = &omap2430_timer6_hwmod,
.clk = "gpt6_ick",
- .addr = omap2430_timer6_addrs,
- .addr_cnt = ARRAY_SIZE(omap2430_timer6_addrs),
+ .addr = omap2xxx_timer6_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -741,8 +561,7 @@ static struct omap_hwmod_ocp_if *omap2430_timer6_slaves[] = {
/* timer6 hwmod */
static struct omap_hwmod omap2430_timer6_hwmod = {
.name = "timer6",
- .mpu_irqs = omap2430_timer6_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap2430_timer6_mpu_irqs),
+ .mpu_irqs = omap2_timer6_mpu_irqs,
.main_clk = "gpt6_fck",
.prcm = {
.omap2 = {
@@ -755,31 +574,19 @@ static struct omap_hwmod omap2430_timer6_hwmod = {
},
.slaves = omap2430_timer6_slaves,
.slaves_cnt = ARRAY_SIZE(omap2430_timer6_slaves),
- .class = &omap2430_timer_hwmod_class,
+ .class = &omap2xxx_timer_hwmod_class,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430)
};
/* timer7 */
static struct omap_hwmod omap2430_timer7_hwmod;
-static struct omap_hwmod_irq_info omap2430_timer7_mpu_irqs[] = {
- { .irq = 43, },
-};
-
-static struct omap_hwmod_addr_space omap2430_timer7_addrs[] = {
- {
- .pa_start = 0x48080000,
- .pa_end = 0x48080000 + SZ_1K - 1,
- .flags = ADDR_TYPE_RT
- },
-};
/* l4_core -> timer7 */
static struct omap_hwmod_ocp_if omap2430_l4_core__timer7 = {
.master = &omap2430_l4_core_hwmod,
.slave = &omap2430_timer7_hwmod,
.clk = "gpt7_ick",
- .addr = omap2430_timer7_addrs,
- .addr_cnt = ARRAY_SIZE(omap2430_timer7_addrs),
+ .addr = omap2xxx_timer7_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -791,8 +598,7 @@ static struct omap_hwmod_ocp_if *omap2430_timer7_slaves[] = {
/* timer7 hwmod */
static struct omap_hwmod omap2430_timer7_hwmod = {
.name = "timer7",
- .mpu_irqs = omap2430_timer7_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap2430_timer7_mpu_irqs),
+ .mpu_irqs = omap2_timer7_mpu_irqs,
.main_clk = "gpt7_fck",
.prcm = {
.omap2 = {
@@ -805,31 +611,19 @@ static struct omap_hwmod omap2430_timer7_hwmod = {
},
.slaves = omap2430_timer7_slaves,
.slaves_cnt = ARRAY_SIZE(omap2430_timer7_slaves),
- .class = &omap2430_timer_hwmod_class,
+ .class = &omap2xxx_timer_hwmod_class,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430)
};
/* timer8 */
static struct omap_hwmod omap2430_timer8_hwmod;
-static struct omap_hwmod_irq_info omap2430_timer8_mpu_irqs[] = {
- { .irq = 44, },
-};
-
-static struct omap_hwmod_addr_space omap2430_timer8_addrs[] = {
- {
- .pa_start = 0x48082000,
- .pa_end = 0x48082000 + SZ_1K - 1,
- .flags = ADDR_TYPE_RT
- },
-};
/* l4_core -> timer8 */
static struct omap_hwmod_ocp_if omap2430_l4_core__timer8 = {
.master = &omap2430_l4_core_hwmod,
.slave = &omap2430_timer8_hwmod,
.clk = "gpt8_ick",
- .addr = omap2430_timer8_addrs,
- .addr_cnt = ARRAY_SIZE(omap2430_timer8_addrs),
+ .addr = omap2xxx_timer8_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -841,8 +635,7 @@ static struct omap_hwmod_ocp_if *omap2430_timer8_slaves[] = {
/* timer8 hwmod */
static struct omap_hwmod omap2430_timer8_hwmod = {
.name = "timer8",
- .mpu_irqs = omap2430_timer8_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap2430_timer8_mpu_irqs),
+ .mpu_irqs = omap2_timer8_mpu_irqs,
.main_clk = "gpt8_fck",
.prcm = {
.omap2 = {
@@ -855,31 +648,19 @@ static struct omap_hwmod omap2430_timer8_hwmod = {
},
.slaves = omap2430_timer8_slaves,
.slaves_cnt = ARRAY_SIZE(omap2430_timer8_slaves),
- .class = &omap2430_timer_hwmod_class,
+ .class = &omap2xxx_timer_hwmod_class,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430)
};
/* timer9 */
static struct omap_hwmod omap2430_timer9_hwmod;
-static struct omap_hwmod_irq_info omap2430_timer9_mpu_irqs[] = {
- { .irq = 45, },
-};
-
-static struct omap_hwmod_addr_space omap2430_timer9_addrs[] = {
- {
- .pa_start = 0x48084000,
- .pa_end = 0x48084000 + SZ_1K - 1,
- .flags = ADDR_TYPE_RT
- },
-};
/* l4_core -> timer9 */
static struct omap_hwmod_ocp_if omap2430_l4_core__timer9 = {
.master = &omap2430_l4_core_hwmod,
.slave = &omap2430_timer9_hwmod,
.clk = "gpt9_ick",
- .addr = omap2430_timer9_addrs,
- .addr_cnt = ARRAY_SIZE(omap2430_timer9_addrs),
+ .addr = omap2xxx_timer9_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -891,8 +672,7 @@ static struct omap_hwmod_ocp_if *omap2430_timer9_slaves[] = {
/* timer9 hwmod */
static struct omap_hwmod omap2430_timer9_hwmod = {
.name = "timer9",
- .mpu_irqs = omap2430_timer9_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap2430_timer9_mpu_irqs),
+ .mpu_irqs = omap2_timer9_mpu_irqs,
.main_clk = "gpt9_fck",
.prcm = {
.omap2 = {
@@ -905,31 +685,19 @@ static struct omap_hwmod omap2430_timer9_hwmod = {
},
.slaves = omap2430_timer9_slaves,
.slaves_cnt = ARRAY_SIZE(omap2430_timer9_slaves),
- .class = &omap2430_timer_hwmod_class,
+ .class = &omap2xxx_timer_hwmod_class,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430)
};
/* timer10 */
static struct omap_hwmod omap2430_timer10_hwmod;
-static struct omap_hwmod_irq_info omap2430_timer10_mpu_irqs[] = {
- { .irq = 46, },
-};
-
-static struct omap_hwmod_addr_space omap2430_timer10_addrs[] = {
- {
- .pa_start = 0x48086000,
- .pa_end = 0x48086000 + SZ_1K - 1,
- .flags = ADDR_TYPE_RT
- },
-};
/* l4_core -> timer10 */
static struct omap_hwmod_ocp_if omap2430_l4_core__timer10 = {
.master = &omap2430_l4_core_hwmod,
.slave = &omap2430_timer10_hwmod,
.clk = "gpt10_ick",
- .addr = omap2430_timer10_addrs,
- .addr_cnt = ARRAY_SIZE(omap2430_timer10_addrs),
+ .addr = omap2_timer10_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -941,8 +709,7 @@ static struct omap_hwmod_ocp_if *omap2430_timer10_slaves[] = {
/* timer10 hwmod */
static struct omap_hwmod omap2430_timer10_hwmod = {
.name = "timer10",
- .mpu_irqs = omap2430_timer10_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap2430_timer10_mpu_irqs),
+ .mpu_irqs = omap2_timer10_mpu_irqs,
.main_clk = "gpt10_fck",
.prcm = {
.omap2 = {
@@ -955,31 +722,19 @@ static struct omap_hwmod omap2430_timer10_hwmod = {
},
.slaves = omap2430_timer10_slaves,
.slaves_cnt = ARRAY_SIZE(omap2430_timer10_slaves),
- .class = &omap2430_timer_hwmod_class,
+ .class = &omap2xxx_timer_hwmod_class,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430)
};
/* timer11 */
static struct omap_hwmod omap2430_timer11_hwmod;
-static struct omap_hwmod_irq_info omap2430_timer11_mpu_irqs[] = {
- { .irq = 47, },
-};
-
-static struct omap_hwmod_addr_space omap2430_timer11_addrs[] = {
- {
- .pa_start = 0x48088000,
- .pa_end = 0x48088000 + SZ_1K - 1,
- .flags = ADDR_TYPE_RT
- },
-};
/* l4_core -> timer11 */
static struct omap_hwmod_ocp_if omap2430_l4_core__timer11 = {
.master = &omap2430_l4_core_hwmod,
.slave = &omap2430_timer11_hwmod,
.clk = "gpt11_ick",
- .addr = omap2430_timer11_addrs,
- .addr_cnt = ARRAY_SIZE(omap2430_timer11_addrs),
+ .addr = omap2_timer11_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -991,8 +746,7 @@ static struct omap_hwmod_ocp_if *omap2430_timer11_slaves[] = {
/* timer11 hwmod */
static struct omap_hwmod omap2430_timer11_hwmod = {
.name = "timer11",
- .mpu_irqs = omap2430_timer11_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap2430_timer11_mpu_irqs),
+ .mpu_irqs = omap2_timer11_mpu_irqs,
.main_clk = "gpt11_fck",
.prcm = {
.omap2 = {
@@ -1005,31 +759,19 @@ static struct omap_hwmod omap2430_timer11_hwmod = {
},
.slaves = omap2430_timer11_slaves,
.slaves_cnt = ARRAY_SIZE(omap2430_timer11_slaves),
- .class = &omap2430_timer_hwmod_class,
+ .class = &omap2xxx_timer_hwmod_class,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430)
};
/* timer12 */
static struct omap_hwmod omap2430_timer12_hwmod;
-static struct omap_hwmod_irq_info omap2430_timer12_mpu_irqs[] = {
- { .irq = 48, },
-};
-
-static struct omap_hwmod_addr_space omap2430_timer12_addrs[] = {
- {
- .pa_start = 0x4808a000,
- .pa_end = 0x4808a000 + SZ_1K - 1,
- .flags = ADDR_TYPE_RT
- },
-};
/* l4_core -> timer12 */
static struct omap_hwmod_ocp_if omap2430_l4_core__timer12 = {
.master = &omap2430_l4_core_hwmod,
.slave = &omap2430_timer12_hwmod,
.clk = "gpt12_ick",
- .addr = omap2430_timer12_addrs,
- .addr_cnt = ARRAY_SIZE(omap2430_timer12_addrs),
+ .addr = omap2xxx_timer12_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -1041,8 +783,7 @@ static struct omap_hwmod_ocp_if *omap2430_timer12_slaves[] = {
/* timer12 hwmod */
static struct omap_hwmod omap2430_timer12_hwmod = {
.name = "timer12",
- .mpu_irqs = omap2430_timer12_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap2430_timer12_mpu_irqs),
+ .mpu_irqs = omap2xxx_timer12_mpu_irqs,
.main_clk = "gpt12_fck",
.prcm = {
.omap2 = {
@@ -1055,7 +796,7 @@ static struct omap_hwmod omap2430_timer12_hwmod = {
},
.slaves = omap2430_timer12_slaves,
.slaves_cnt = ARRAY_SIZE(omap2430_timer12_slaves),
- .class = &omap2430_timer_hwmod_class,
+ .class = &omap2xxx_timer_hwmod_class,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430)
};
@@ -1066,6 +807,7 @@ static struct omap_hwmod_addr_space omap2430_wd_timer2_addrs[] = {
.pa_end = 0x4901607f,
.flags = ADDR_TYPE_RT
},
+ { }
};
static struct omap_hwmod_ocp_if omap2430_l4_wkup__wd_timer2 = {
@@ -1073,31 +815,9 @@ static struct omap_hwmod_ocp_if omap2430_l4_wkup__wd_timer2 = {
.slave = &omap2430_wd_timer2_hwmod,
.clk = "mpu_wdt_ick",
.addr = omap2430_wd_timer2_addrs,
- .addr_cnt = ARRAY_SIZE(omap2430_wd_timer2_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-/*
- * 'wd_timer' class
- * 32-bit watchdog upward counter that generates a pulse on the reset pin on
- * overflow condition
- */
-
-static struct omap_hwmod_class_sysconfig omap2430_wd_timer_sysc = {
- .rev_offs = 0x0,
- .sysc_offs = 0x0010,
- .syss_offs = 0x0014,
- .sysc_flags = (SYSC_HAS_EMUFREE | SYSC_HAS_SOFTRESET |
- SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap2430_wd_timer_hwmod_class = {
- .name = "wd_timer",
- .sysc = &omap2430_wd_timer_sysc,
- .pre_shutdown = &omap2_wd_timer_disable
-};
-
/* wd_timer2 */
static struct omap_hwmod_ocp_if *omap2430_wd_timer2_slaves[] = {
&omap2430_l4_wkup__wd_timer2,
@@ -1105,7 +825,7 @@ static struct omap_hwmod_ocp_if *omap2430_wd_timer2_slaves[] = {
static struct omap_hwmod omap2430_wd_timer2_hwmod = {
.name = "wd_timer2",
- .class = &omap2430_wd_timer_hwmod_class,
+ .class = &omap2xxx_wd_timer_hwmod_class,
.main_clk = "mpu_wdt_fck",
.prcm = {
.omap2 = {
@@ -1121,45 +841,16 @@ static struct omap_hwmod omap2430_wd_timer2_hwmod = {
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
};
-/* UART */
-
-static struct omap_hwmod_class_sysconfig uart_sysc = {
- .rev_offs = 0x50,
- .sysc_offs = 0x54,
- .syss_offs = 0x58,
- .sysc_flags = (SYSC_HAS_SIDLEMODE |
- SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
- SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class uart_class = {
- .name = "uart",
- .sysc = &uart_sysc,
-};
-
/* UART1 */
-static struct omap_hwmod_irq_info uart1_mpu_irqs[] = {
- { .irq = INT_24XX_UART1_IRQ, },
-};
-
-static struct omap_hwmod_dma_info uart1_sdma_reqs[] = {
- { .name = "rx", .dma_req = OMAP24XX_DMA_UART1_RX, },
- { .name = "tx", .dma_req = OMAP24XX_DMA_UART1_TX, },
-};
-
static struct omap_hwmod_ocp_if *omap2430_uart1_slaves[] = {
&omap2_l4_core__uart1,
};
static struct omap_hwmod omap2430_uart1_hwmod = {
.name = "uart1",
- .mpu_irqs = uart1_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(uart1_mpu_irqs),
- .sdma_reqs = uart1_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(uart1_sdma_reqs),
+ .mpu_irqs = omap2_uart1_mpu_irqs,
+ .sdma_reqs = omap2_uart1_sdma_reqs,
.main_clk = "uart1_fck",
.prcm = {
.omap2 = {
@@ -1172,31 +863,20 @@ static struct omap_hwmod omap2430_uart1_hwmod = {
},
.slaves = omap2430_uart1_slaves,
.slaves_cnt = ARRAY_SIZE(omap2430_uart1_slaves),
- .class = &uart_class,
+ .class = &omap2_uart_class,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
};
/* UART2 */
-static struct omap_hwmod_irq_info uart2_mpu_irqs[] = {
- { .irq = INT_24XX_UART2_IRQ, },
-};
-
-static struct omap_hwmod_dma_info uart2_sdma_reqs[] = {
- { .name = "rx", .dma_req = OMAP24XX_DMA_UART2_RX, },
- { .name = "tx", .dma_req = OMAP24XX_DMA_UART2_TX, },
-};
-
static struct omap_hwmod_ocp_if *omap2430_uart2_slaves[] = {
&omap2_l4_core__uart2,
};
static struct omap_hwmod omap2430_uart2_hwmod = {
.name = "uart2",
- .mpu_irqs = uart2_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(uart2_mpu_irqs),
- .sdma_reqs = uart2_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(uart2_sdma_reqs),
+ .mpu_irqs = omap2_uart2_mpu_irqs,
+ .sdma_reqs = omap2_uart2_sdma_reqs,
.main_clk = "uart2_fck",
.prcm = {
.omap2 = {
@@ -1209,31 +889,20 @@ static struct omap_hwmod omap2430_uart2_hwmod = {
},
.slaves = omap2430_uart2_slaves,
.slaves_cnt = ARRAY_SIZE(omap2430_uart2_slaves),
- .class = &uart_class,
+ .class = &omap2_uart_class,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
};
/* UART3 */
-static struct omap_hwmod_irq_info uart3_mpu_irqs[] = {
- { .irq = INT_24XX_UART3_IRQ, },
-};
-
-static struct omap_hwmod_dma_info uart3_sdma_reqs[] = {
- { .name = "rx", .dma_req = OMAP24XX_DMA_UART3_RX, },
- { .name = "tx", .dma_req = OMAP24XX_DMA_UART3_TX, },
-};
-
static struct omap_hwmod_ocp_if *omap2430_uart3_slaves[] = {
&omap2_l4_core__uart3,
};
static struct omap_hwmod omap2430_uart3_hwmod = {
.name = "uart3",
- .mpu_irqs = uart3_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(uart3_mpu_irqs),
- .sdma_reqs = uart3_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(uart3_sdma_reqs),
+ .mpu_irqs = omap2_uart3_mpu_irqs,
+ .sdma_reqs = omap2_uart3_sdma_reqs,
.main_clk = "uart3_fck",
.prcm = {
.omap2 = {
@@ -1246,53 +915,22 @@ static struct omap_hwmod omap2430_uart3_hwmod = {
},
.slaves = omap2430_uart3_slaves,
.slaves_cnt = ARRAY_SIZE(omap2430_uart3_slaves),
- .class = &uart_class,
+ .class = &omap2_uart_class,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
};
-/*
- * 'dss' class
- * display sub-system
- */
-
-static struct omap_hwmod_class_sysconfig omap2430_dss_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .syss_offs = 0x0014,
- .sysc_flags = (SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap2430_dss_hwmod_class = {
- .name = "dss",
- .sysc = &omap2430_dss_sysc,
-};
-
-static struct omap_hwmod_dma_info omap2430_dss_sdma_chs[] = {
- { .name = "dispc", .dma_req = 5 },
-};
-
/* dss */
/* dss master ports */
static struct omap_hwmod_ocp_if *omap2430_dss_masters[] = {
&omap2430_dss__l3,
};
-static struct omap_hwmod_addr_space omap2430_dss_addrs[] = {
- {
- .pa_start = 0x48050000,
- .pa_end = 0x480503FF,
- .flags = ADDR_TYPE_RT
- },
-};
-
/* l4_core -> dss */
static struct omap_hwmod_ocp_if omap2430_l4_core__dss = {
.master = &omap2430_l4_core_hwmod,
.slave = &omap2430_dss_core_hwmod,
.clk = "dss_ick",
- .addr = omap2430_dss_addrs,
- .addr_cnt = ARRAY_SIZE(omap2430_dss_addrs),
+ .addr = omap2_dss_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -1308,10 +946,9 @@ static struct omap_hwmod_opt_clk dss_opt_clks[] = {
static struct omap_hwmod omap2430_dss_core_hwmod = {
.name = "dss_core",
- .class = &omap2430_dss_hwmod_class,
+ .class = &omap2_dss_hwmod_class,
.main_clk = "dss1_fck", /* instead of dss_fck */
- .sdma_reqs = omap2430_dss_sdma_chs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap2430_dss_sdma_chs),
+ .sdma_reqs = omap2xxx_dss_sdma_chs,
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
@@ -1331,46 +968,12 @@ static struct omap_hwmod omap2430_dss_core_hwmod = {
.flags = HWMOD_NO_IDLEST,
};
-/*
- * 'dispc' class
- * display controller
- */
-
-static struct omap_hwmod_class_sysconfig omap2430_dispc_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .syss_offs = 0x0014,
- .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE |
- SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap2430_dispc_hwmod_class = {
- .name = "dispc",
- .sysc = &omap2430_dispc_sysc,
-};
-
-static struct omap_hwmod_irq_info omap2430_dispc_irqs[] = {
- { .irq = 25 },
-};
-
-static struct omap_hwmod_addr_space omap2430_dss_dispc_addrs[] = {
- {
- .pa_start = 0x48050400,
- .pa_end = 0x480507FF,
- .flags = ADDR_TYPE_RT
- },
-};
-
/* l4_core -> dss_dispc */
static struct omap_hwmod_ocp_if omap2430_l4_core__dss_dispc = {
.master = &omap2430_l4_core_hwmod,
.slave = &omap2430_dss_dispc_hwmod,
.clk = "dss_ick",
- .addr = omap2430_dss_dispc_addrs,
- .addr_cnt = ARRAY_SIZE(omap2430_dss_dispc_addrs),
+ .addr = omap2_dss_dispc_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -1381,9 +984,8 @@ static struct omap_hwmod_ocp_if *omap2430_dss_dispc_slaves[] = {
static struct omap_hwmod omap2430_dss_dispc_hwmod = {
.name = "dss_dispc",
- .class = &omap2430_dispc_hwmod_class,
- .mpu_irqs = omap2430_dispc_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap2430_dispc_irqs),
+ .class = &omap2_dispc_hwmod_class,
+ .mpu_irqs = omap2_dispc_irqs,
.main_clk = "dss1_fck",
.prcm = {
.omap2 = {
@@ -1400,41 +1002,12 @@ static struct omap_hwmod omap2430_dss_dispc_hwmod = {
.flags = HWMOD_NO_IDLEST,
};
-/*
- * 'rfbi' class
- * remote frame buffer interface
- */
-
-static struct omap_hwmod_class_sysconfig omap2430_rfbi_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .syss_offs = 0x0014,
- .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
- SYSC_HAS_AUTOIDLE),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap2430_rfbi_hwmod_class = {
- .name = "rfbi",
- .sysc = &omap2430_rfbi_sysc,
-};
-
-static struct omap_hwmod_addr_space omap2430_dss_rfbi_addrs[] = {
- {
- .pa_start = 0x48050800,
- .pa_end = 0x48050BFF,
- .flags = ADDR_TYPE_RT
- },
-};
-
/* l4_core -> dss_rfbi */
static struct omap_hwmod_ocp_if omap2430_l4_core__dss_rfbi = {
.master = &omap2430_l4_core_hwmod,
.slave = &omap2430_dss_rfbi_hwmod,
.clk = "dss_ick",
- .addr = omap2430_dss_rfbi_addrs,
- .addr_cnt = ARRAY_SIZE(omap2430_dss_rfbi_addrs),
+ .addr = omap2_dss_rfbi_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -1445,7 +1018,7 @@ static struct omap_hwmod_ocp_if *omap2430_dss_rfbi_slaves[] = {
static struct omap_hwmod omap2430_dss_rfbi_hwmod = {
.name = "dss_rfbi",
- .class = &omap2430_rfbi_hwmod_class,
+ .class = &omap2_rfbi_hwmod_class,
.main_clk = "dss1_fck",
.prcm = {
.omap2 = {
@@ -1460,31 +1033,12 @@ static struct omap_hwmod omap2430_dss_rfbi_hwmod = {
.flags = HWMOD_NO_IDLEST,
};
-/*
- * 'venc' class
- * video encoder
- */
-
-static struct omap_hwmod_class omap2430_venc_hwmod_class = {
- .name = "venc",
-};
-
-/* dss_venc */
-static struct omap_hwmod_addr_space omap2430_dss_venc_addrs[] = {
- {
- .pa_start = 0x48050C00,
- .pa_end = 0x48050FFF,
- .flags = ADDR_TYPE_RT
- },
-};
-
/* l4_core -> dss_venc */
static struct omap_hwmod_ocp_if omap2430_l4_core__dss_venc = {
.master = &omap2430_l4_core_hwmod,
.slave = &omap2430_dss_venc_hwmod,
.clk = "dss_54m_fck",
- .addr = omap2430_dss_venc_addrs,
- .addr_cnt = ARRAY_SIZE(omap2430_dss_venc_addrs),
+ .addr = omap2_dss_venc_addrs,
.flags = OCPIF_SWSUP_IDLE,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -1496,7 +1050,7 @@ static struct omap_hwmod_ocp_if *omap2430_dss_venc_slaves[] = {
static struct omap_hwmod omap2430_dss_venc_hwmod = {
.name = "dss_venc",
- .class = &omap2430_venc_hwmod_class,
+ .class = &omap2_venc_hwmod_class,
.main_clk = "dss1_fck",
.prcm = {
.omap2 = {
@@ -1532,25 +1086,14 @@ static struct omap_i2c_dev_attr i2c_dev_attr = {
/* I2C1 */
-static struct omap_hwmod_irq_info i2c1_mpu_irqs[] = {
- { .irq = INT_24XX_I2C1_IRQ, },
-};
-
-static struct omap_hwmod_dma_info i2c1_sdma_reqs[] = {
- { .name = "tx", .dma_req = OMAP24XX_DMA_I2C1_TX },
- { .name = "rx", .dma_req = OMAP24XX_DMA_I2C1_RX },
-};
-
static struct omap_hwmod_ocp_if *omap2430_i2c1_slaves[] = {
&omap2430_l4_core__i2c1,
};
static struct omap_hwmod omap2430_i2c1_hwmod = {
.name = "i2c1",
- .mpu_irqs = i2c1_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(i2c1_mpu_irqs),
- .sdma_reqs = i2c1_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(i2c1_sdma_reqs),
+ .mpu_irqs = omap2_i2c1_mpu_irqs,
+ .sdma_reqs = omap2_i2c1_sdma_reqs,
.main_clk = "i2chs1_fck",
.prcm = {
.omap2 = {
@@ -1578,25 +1121,14 @@ static struct omap_hwmod omap2430_i2c1_hwmod = {
/* I2C2 */
-static struct omap_hwmod_irq_info i2c2_mpu_irqs[] = {
- { .irq = INT_24XX_I2C2_IRQ, },
-};
-
-static struct omap_hwmod_dma_info i2c2_sdma_reqs[] = {
- { .name = "tx", .dma_req = OMAP24XX_DMA_I2C2_TX },
- { .name = "rx", .dma_req = OMAP24XX_DMA_I2C2_RX },
-};
-
static struct omap_hwmod_ocp_if *omap2430_i2c2_slaves[] = {
&omap2430_l4_core__i2c2,
};
static struct omap_hwmod omap2430_i2c2_hwmod = {
.name = "i2c2",
- .mpu_irqs = i2c2_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(i2c2_mpu_irqs),
- .sdma_reqs = i2c2_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(i2c2_sdma_reqs),
+ .mpu_irqs = omap2_i2c2_mpu_irqs,
+ .sdma_reqs = omap2_i2c2_sdma_reqs,
.main_clk = "i2chs2_fck",
.prcm = {
.omap2 = {
@@ -1621,6 +1153,7 @@ static struct omap_hwmod_addr_space omap2430_gpio1_addr_space[] = {
.pa_end = 0x4900C1ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
static struct omap_hwmod_ocp_if omap2430_l4_wkup__gpio1 = {
@@ -1628,7 +1161,6 @@ static struct omap_hwmod_ocp_if omap2430_l4_wkup__gpio1 = {
.slave = &omap2430_gpio1_hwmod,
.clk = "gpios_ick",
.addr = omap2430_gpio1_addr_space,
- .addr_cnt = ARRAY_SIZE(omap2430_gpio1_addr_space),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -1639,6 +1171,7 @@ static struct omap_hwmod_addr_space omap2430_gpio2_addr_space[] = {
.pa_end = 0x4900E1ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
static struct omap_hwmod_ocp_if omap2430_l4_wkup__gpio2 = {
@@ -1646,7 +1179,6 @@ static struct omap_hwmod_ocp_if omap2430_l4_wkup__gpio2 = {
.slave = &omap2430_gpio2_hwmod,
.clk = "gpios_ick",
.addr = omap2430_gpio2_addr_space,
- .addr_cnt = ARRAY_SIZE(omap2430_gpio2_addr_space),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -1657,6 +1189,7 @@ static struct omap_hwmod_addr_space omap2430_gpio3_addr_space[] = {
.pa_end = 0x490101ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
static struct omap_hwmod_ocp_if omap2430_l4_wkup__gpio3 = {
@@ -1664,7 +1197,6 @@ static struct omap_hwmod_ocp_if omap2430_l4_wkup__gpio3 = {
.slave = &omap2430_gpio3_hwmod,
.clk = "gpios_ick",
.addr = omap2430_gpio3_addr_space,
- .addr_cnt = ARRAY_SIZE(omap2430_gpio3_addr_space),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -1675,6 +1207,7 @@ static struct omap_hwmod_addr_space omap2430_gpio4_addr_space[] = {
.pa_end = 0x490121ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
static struct omap_hwmod_ocp_if omap2430_l4_wkup__gpio4 = {
@@ -1682,7 +1215,6 @@ static struct omap_hwmod_ocp_if omap2430_l4_wkup__gpio4 = {
.slave = &omap2430_gpio4_hwmod,
.clk = "gpios_ick",
.addr = omap2430_gpio4_addr_space,
- .addr_cnt = ARRAY_SIZE(omap2430_gpio4_addr_space),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -1693,6 +1225,7 @@ static struct omap_hwmod_addr_space omap2430_gpio5_addr_space[] = {
.pa_end = 0x480B61ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
static struct omap_hwmod_ocp_if omap2430_l4_core__gpio5 = {
@@ -1700,7 +1233,6 @@ static struct omap_hwmod_ocp_if omap2430_l4_core__gpio5 = {
.slave = &omap2430_gpio5_hwmod,
.clk = "gpio5_ick",
.addr = omap2430_gpio5_addr_space,
- .addr_cnt = ARRAY_SIZE(omap2430_gpio5_addr_space),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -1710,32 +1242,7 @@ static struct omap_gpio_dev_attr gpio_dev_attr = {
.dbck_flag = false,
};
-static struct omap_hwmod_class_sysconfig omap243x_gpio_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .syss_offs = 0x0014,
- .sysc_flags = (SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
- SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE |
- SYSS_HAS_RESET_STATUS),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-/*
- * 'gpio' class
- * general purpose io module
- */
-static struct omap_hwmod_class omap243x_gpio_hwmod_class = {
- .name = "gpio",
- .sysc = &omap243x_gpio_sysc,
- .rev = 0,
-};
-
/* gpio1 */
-static struct omap_hwmod_irq_info omap243x_gpio1_irqs[] = {
- { .irq = 29 }, /* INT_24XX_GPIO_BANK1 */
-};
-
static struct omap_hwmod_ocp_if *omap2430_gpio1_slaves[] = {
&omap2430_l4_wkup__gpio1,
};
@@ -1743,8 +1250,7 @@ static struct omap_hwmod_ocp_if *omap2430_gpio1_slaves[] = {
static struct omap_hwmod omap2430_gpio1_hwmod = {
.name = "gpio1",
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
- .mpu_irqs = omap243x_gpio1_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap243x_gpio1_irqs),
+ .mpu_irqs = omap2_gpio1_irqs,
.main_clk = "gpios_fck",
.prcm = {
.omap2 = {
@@ -1757,16 +1263,12 @@ static struct omap_hwmod omap2430_gpio1_hwmod = {
},
.slaves = omap2430_gpio1_slaves,
.slaves_cnt = ARRAY_SIZE(omap2430_gpio1_slaves),
- .class = &omap243x_gpio_hwmod_class,
+ .class = &omap2xxx_gpio_hwmod_class,
.dev_attr = &gpio_dev_attr,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
};
/* gpio2 */
-static struct omap_hwmod_irq_info omap243x_gpio2_irqs[] = {
- { .irq = 30 }, /* INT_24XX_GPIO_BANK2 */
-};
-
static struct omap_hwmod_ocp_if *omap2430_gpio2_slaves[] = {
&omap2430_l4_wkup__gpio2,
};
@@ -1774,8 +1276,7 @@ static struct omap_hwmod_ocp_if *omap2430_gpio2_slaves[] = {
static struct omap_hwmod omap2430_gpio2_hwmod = {
.name = "gpio2",
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
- .mpu_irqs = omap243x_gpio2_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap243x_gpio2_irqs),
+ .mpu_irqs = omap2_gpio2_irqs,
.main_clk = "gpios_fck",
.prcm = {
.omap2 = {
@@ -1788,16 +1289,12 @@ static struct omap_hwmod omap2430_gpio2_hwmod = {
},
.slaves = omap2430_gpio2_slaves,
.slaves_cnt = ARRAY_SIZE(omap2430_gpio2_slaves),
- .class = &omap243x_gpio_hwmod_class,
+ .class = &omap2xxx_gpio_hwmod_class,
.dev_attr = &gpio_dev_attr,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
};
/* gpio3 */
-static struct omap_hwmod_irq_info omap243x_gpio3_irqs[] = {
- { .irq = 31 }, /* INT_24XX_GPIO_BANK3 */
-};
-
static struct omap_hwmod_ocp_if *omap2430_gpio3_slaves[] = {
&omap2430_l4_wkup__gpio3,
};
@@ -1805,8 +1302,7 @@ static struct omap_hwmod_ocp_if *omap2430_gpio3_slaves[] = {
static struct omap_hwmod omap2430_gpio3_hwmod = {
.name = "gpio3",
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
- .mpu_irqs = omap243x_gpio3_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap243x_gpio3_irqs),
+ .mpu_irqs = omap2_gpio3_irqs,
.main_clk = "gpios_fck",
.prcm = {
.omap2 = {
@@ -1819,16 +1315,12 @@ static struct omap_hwmod omap2430_gpio3_hwmod = {
},
.slaves = omap2430_gpio3_slaves,
.slaves_cnt = ARRAY_SIZE(omap2430_gpio3_slaves),
- .class = &omap243x_gpio_hwmod_class,
+ .class = &omap2xxx_gpio_hwmod_class,
.dev_attr = &gpio_dev_attr,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
};
/* gpio4 */
-static struct omap_hwmod_irq_info omap243x_gpio4_irqs[] = {
- { .irq = 32 }, /* INT_24XX_GPIO_BANK4 */
-};
-
static struct omap_hwmod_ocp_if *omap2430_gpio4_slaves[] = {
&omap2430_l4_wkup__gpio4,
};
@@ -1836,8 +1328,7 @@ static struct omap_hwmod_ocp_if *omap2430_gpio4_slaves[] = {
static struct omap_hwmod omap2430_gpio4_hwmod = {
.name = "gpio4",
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
- .mpu_irqs = omap243x_gpio4_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap243x_gpio4_irqs),
+ .mpu_irqs = omap2_gpio4_irqs,
.main_clk = "gpios_fck",
.prcm = {
.omap2 = {
@@ -1850,7 +1341,7 @@ static struct omap_hwmod omap2430_gpio4_hwmod = {
},
.slaves = omap2430_gpio4_slaves,
.slaves_cnt = ARRAY_SIZE(omap2430_gpio4_slaves),
- .class = &omap243x_gpio_hwmod_class,
+ .class = &omap2xxx_gpio_hwmod_class,
.dev_attr = &gpio_dev_attr,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
};
@@ -1858,6 +1349,7 @@ static struct omap_hwmod omap2430_gpio4_hwmod = {
/* gpio5 */
static struct omap_hwmod_irq_info omap243x_gpio5_irqs[] = {
{ .irq = 33 }, /* INT_24XX_GPIO_BANK5 */
+ { .irq = -1 }
};
static struct omap_hwmod_ocp_if *omap2430_gpio5_slaves[] = {
@@ -1868,7 +1360,6 @@ static struct omap_hwmod omap2430_gpio5_hwmod = {
.name = "gpio5",
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
.mpu_irqs = omap243x_gpio5_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap243x_gpio5_irqs),
.main_clk = "gpio5_fck",
.prcm = {
.omap2 = {
@@ -1881,28 +1372,11 @@ static struct omap_hwmod omap2430_gpio5_hwmod = {
},
.slaves = omap2430_gpio5_slaves,
.slaves_cnt = ARRAY_SIZE(omap2430_gpio5_slaves),
- .class = &omap243x_gpio_hwmod_class,
+ .class = &omap2xxx_gpio_hwmod_class,
.dev_attr = &gpio_dev_attr,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
};
-/* dma_system */
-static struct omap_hwmod_class_sysconfig omap2430_dma_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x002c,
- .syss_offs = 0x0028,
- .sysc_flags = (SYSC_HAS_SOFTRESET | SYSC_HAS_MIDLEMODE |
- SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_EMUFREE |
- SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
- .idlemodes = (MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap2430_dma_hwmod_class = {
- .name = "dma",
- .sysc = &omap2430_dma_sysc,
-};
-
/* dma attributes */
static struct omap_dma_dev_attr dma_dev_attr = {
.dev_caps = RESERVE_CHANNEL | DMA_LINKED_LCH | GLOBAL_PRIORITY |
@@ -1910,21 +1384,6 @@ static struct omap_dma_dev_attr dma_dev_attr = {
.lch_count = 32,
};
-static struct omap_hwmod_irq_info omap2430_dma_system_irqs[] = {
- { .name = "0", .irq = 12 }, /* INT_24XX_SDMA_IRQ0 */
- { .name = "1", .irq = 13 }, /* INT_24XX_SDMA_IRQ1 */
- { .name = "2", .irq = 14 }, /* INT_24XX_SDMA_IRQ2 */
- { .name = "3", .irq = 15 }, /* INT_24XX_SDMA_IRQ3 */
-};
-
-static struct omap_hwmod_addr_space omap2430_dma_system_addrs[] = {
- {
- .pa_start = 0x48056000,
- .pa_end = 0x48056fff,
- .flags = ADDR_TYPE_RT
- },
-};
-
/* dma_system -> L3 */
static struct omap_hwmod_ocp_if omap2430_dma_system__l3 = {
.master = &omap2430_dma_system_hwmod,
@@ -1943,8 +1402,7 @@ static struct omap_hwmod_ocp_if omap2430_l4_core__dma_system = {
.master = &omap2430_l4_core_hwmod,
.slave = &omap2430_dma_system_hwmod,
.clk = "sdma_ick",
- .addr = omap2430_dma_system_addrs,
- .addr_cnt = ARRAY_SIZE(omap2430_dma_system_addrs),
+ .addr = omap2_dma_system_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -1955,9 +1413,8 @@ static struct omap_hwmod_ocp_if *omap2430_dma_system_slaves[] = {
static struct omap_hwmod omap2430_dma_system_hwmod = {
.name = "dma",
- .class = &omap2430_dma_hwmod_class,
- .mpu_irqs = omap2430_dma_system_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap2430_dma_system_irqs),
+ .class = &omap2xxx_dma_hwmod_class,
+ .mpu_irqs = omap2_dma_system_irqs,
.main_clk = "core_l3_ck",
.slaves = omap2430_dma_system_slaves,
.slaves_cnt = ARRAY_SIZE(omap2430_dma_system_slaves),
@@ -1968,47 +1425,18 @@ static struct omap_hwmod omap2430_dma_system_hwmod = {
.flags = HWMOD_NO_IDLEST,
};
-/*
- * 'mailbox' class
- * mailbox module allowing communication between the on-chip processors
- * using a queued mailbox-interrupt mechanism.
- */
-
-static struct omap_hwmod_class_sysconfig omap2430_mailbox_sysc = {
- .rev_offs = 0x000,
- .sysc_offs = 0x010,
- .syss_offs = 0x014,
- .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
- SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap2430_mailbox_hwmod_class = {
- .name = "mailbox",
- .sysc = &omap2430_mailbox_sysc,
-};
-
/* mailbox */
static struct omap_hwmod omap2430_mailbox_hwmod;
static struct omap_hwmod_irq_info omap2430_mailbox_irqs[] = {
{ .irq = 26 },
-};
-
-static struct omap_hwmod_addr_space omap2430_mailbox_addrs[] = {
- {
- .pa_start = 0x48094000,
- .pa_end = 0x480941ff,
- .flags = ADDR_TYPE_RT,
- },
+ { .irq = -1 }
};
/* l4_core -> mailbox */
static struct omap_hwmod_ocp_if omap2430_l4_core__mailbox = {
.master = &omap2430_l4_core_hwmod,
.slave = &omap2430_mailbox_hwmod,
- .addr = omap2430_mailbox_addrs,
- .addr_cnt = ARRAY_SIZE(omap2430_mailbox_addrs),
+ .addr = omap2_mailbox_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -2019,9 +1447,8 @@ static struct omap_hwmod_ocp_if *omap2430_mailbox_slaves[] = {
static struct omap_hwmod omap2430_mailbox_hwmod = {
.name = "mailbox",
- .class = &omap2430_mailbox_hwmod_class,
+ .class = &omap2xxx_mailbox_hwmod_class,
.mpu_irqs = omap2430_mailbox_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap2430_mailbox_irqs),
.main_clk = "mailboxes_ick",
.prcm = {
.omap2 = {
@@ -2037,45 +1464,7 @@ static struct omap_hwmod omap2430_mailbox_hwmod = {
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
};
-/*
- * 'mcspi' class
- * multichannel serial port interface (mcspi) / master/slave synchronous serial
- * bus
- */
-
-static struct omap_hwmod_class_sysconfig omap2430_mcspi_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .syss_offs = 0x0014,
- .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
- SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
- SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap2430_mcspi_class = {
- .name = "mcspi",
- .sysc = &omap2430_mcspi_sysc,
- .rev = OMAP2_MCSPI_REV,
-};
-
/* mcspi1 */
-static struct omap_hwmod_irq_info omap2430_mcspi1_mpu_irqs[] = {
- { .irq = 65 },
-};
-
-static struct omap_hwmod_dma_info omap2430_mcspi1_sdma_reqs[] = {
- { .name = "tx0", .dma_req = 35 }, /* DMA_SPI1_TX0 */
- { .name = "rx0", .dma_req = 36 }, /* DMA_SPI1_RX0 */
- { .name = "tx1", .dma_req = 37 }, /* DMA_SPI1_TX1 */
- { .name = "rx1", .dma_req = 38 }, /* DMA_SPI1_RX1 */
- { .name = "tx2", .dma_req = 39 }, /* DMA_SPI1_TX2 */
- { .name = "rx2", .dma_req = 40 }, /* DMA_SPI1_RX2 */
- { .name = "tx3", .dma_req = 41 }, /* DMA_SPI1_TX3 */
- { .name = "rx3", .dma_req = 42 }, /* DMA_SPI1_RX3 */
-};
-
static struct omap_hwmod_ocp_if *omap2430_mcspi1_slaves[] = {
&omap2430_l4_core__mcspi1,
};
@@ -2086,10 +1475,8 @@ static struct omap2_mcspi_dev_attr omap_mcspi1_dev_attr = {
static struct omap_hwmod omap2430_mcspi1_hwmod = {
.name = "mcspi1_hwmod",
- .mpu_irqs = omap2430_mcspi1_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap2430_mcspi1_mpu_irqs),
- .sdma_reqs = omap2430_mcspi1_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap2430_mcspi1_sdma_reqs),
+ .mpu_irqs = omap2_mcspi1_mpu_irqs,
+ .sdma_reqs = omap2_mcspi1_sdma_reqs,
.main_clk = "mcspi1_fck",
.prcm = {
.omap2 = {
@@ -2102,23 +1489,12 @@ static struct omap_hwmod omap2430_mcspi1_hwmod = {
},
.slaves = omap2430_mcspi1_slaves,
.slaves_cnt = ARRAY_SIZE(omap2430_mcspi1_slaves),
- .class = &omap2430_mcspi_class,
- .dev_attr = &omap_mcspi1_dev_attr,
+ .class = &omap2xxx_mcspi_class,
+ .dev_attr = &omap_mcspi1_dev_attr,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
};
/* mcspi2 */
-static struct omap_hwmod_irq_info omap2430_mcspi2_mpu_irqs[] = {
- { .irq = 66 },
-};
-
-static struct omap_hwmod_dma_info omap2430_mcspi2_sdma_reqs[] = {
- { .name = "tx0", .dma_req = 43 }, /* DMA_SPI2_TX0 */
- { .name = "rx0", .dma_req = 44 }, /* DMA_SPI2_RX0 */
- { .name = "tx1", .dma_req = 45 }, /* DMA_SPI2_TX1 */
- { .name = "rx1", .dma_req = 46 }, /* DMA_SPI2_RX1 */
-};
-
static struct omap_hwmod_ocp_if *omap2430_mcspi2_slaves[] = {
&omap2430_l4_core__mcspi2,
};
@@ -2129,10 +1505,8 @@ static struct omap2_mcspi_dev_attr omap_mcspi2_dev_attr = {
static struct omap_hwmod omap2430_mcspi2_hwmod = {
.name = "mcspi2_hwmod",
- .mpu_irqs = omap2430_mcspi2_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap2430_mcspi2_mpu_irqs),
- .sdma_reqs = omap2430_mcspi2_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap2430_mcspi2_sdma_reqs),
+ .mpu_irqs = omap2_mcspi2_mpu_irqs,
+ .sdma_reqs = omap2_mcspi2_sdma_reqs,
.main_clk = "mcspi2_fck",
.prcm = {
.omap2 = {
@@ -2145,14 +1519,15 @@ static struct omap_hwmod omap2430_mcspi2_hwmod = {
},
.slaves = omap2430_mcspi2_slaves,
.slaves_cnt = ARRAY_SIZE(omap2430_mcspi2_slaves),
- .class = &omap2430_mcspi_class,
- .dev_attr = &omap_mcspi2_dev_attr,
+ .class = &omap2xxx_mcspi_class,
+ .dev_attr = &omap_mcspi2_dev_attr,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
};
/* mcspi3 */
static struct omap_hwmod_irq_info omap2430_mcspi3_mpu_irqs[] = {
{ .irq = 91 },
+ { .irq = -1 }
};
static struct omap_hwmod_dma_info omap2430_mcspi3_sdma_reqs[] = {
@@ -2160,6 +1535,7 @@ static struct omap_hwmod_dma_info omap2430_mcspi3_sdma_reqs[] = {
{ .name = "rx0", .dma_req = 16 }, /* DMA_SPI3_RX0 */
{ .name = "tx1", .dma_req = 23 }, /* DMA_SPI3_TX1 */
{ .name = "rx1", .dma_req = 24 }, /* DMA_SPI3_RX1 */
+ { .dma_req = -1 }
};
static struct omap_hwmod_ocp_if *omap2430_mcspi3_slaves[] = {
@@ -2173,9 +1549,7 @@ static struct omap2_mcspi_dev_attr omap_mcspi3_dev_attr = {
static struct omap_hwmod omap2430_mcspi3_hwmod = {
.name = "mcspi3_hwmod",
.mpu_irqs = omap2430_mcspi3_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap2430_mcspi3_mpu_irqs),
.sdma_reqs = omap2430_mcspi3_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap2430_mcspi3_sdma_reqs),
.main_clk = "mcspi3_fck",
.prcm = {
.omap2 = {
@@ -2188,8 +1562,8 @@ static struct omap_hwmod omap2430_mcspi3_hwmod = {
},
.slaves = omap2430_mcspi3_slaves,
.slaves_cnt = ARRAY_SIZE(omap2430_mcspi3_slaves),
- .class = &omap2430_mcspi_class,
- .dev_attr = &omap_mcspi3_dev_attr,
+ .class = &omap2xxx_mcspi_class,
+ .dev_attr = &omap_mcspi3_dev_attr,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
};
@@ -2218,12 +1592,12 @@ static struct omap_hwmod_irq_info omap2430_usbhsotg_mpu_irqs[] = {
{ .name = "mc", .irq = 92 },
{ .name = "dma", .irq = 93 },
+ { .irq = -1 }
};
static struct omap_hwmod omap2430_usbhsotg_hwmod = {
.name = "usb_otg_hs",
.mpu_irqs = omap2430_usbhsotg_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap2430_usbhsotg_mpu_irqs),
.main_clk = "usbhs_ick",
.prcm = {
.omap2 = {
@@ -2273,20 +1647,7 @@ static struct omap_hwmod_irq_info omap2430_mcbsp1_irqs[] = {
{ .name = "rx", .irq = 60 },
{ .name = "ovr", .irq = 61 },
{ .name = "common", .irq = 64 },
-};
-
-static struct omap_hwmod_dma_info omap2430_mcbsp1_sdma_chs[] = {
- { .name = "rx", .dma_req = 32 },
- { .name = "tx", .dma_req = 31 },
-};
-
-static struct omap_hwmod_addr_space omap2430_mcbsp1_addrs[] = {
- {
- .name = "mpu",
- .pa_start = 0x48074000,
- .pa_end = 0x480740ff,
- .flags = ADDR_TYPE_RT
- },
+ { .irq = -1 }
};
/* l4_core -> mcbsp1 */
@@ -2294,8 +1655,7 @@ static struct omap_hwmod_ocp_if omap2430_l4_core__mcbsp1 = {
.master = &omap2430_l4_core_hwmod,
.slave = &omap2430_mcbsp1_hwmod,
.clk = "mcbsp1_ick",
- .addr = omap2430_mcbsp1_addrs,
- .addr_cnt = ARRAY_SIZE(omap2430_mcbsp1_addrs),
+ .addr = omap2_mcbsp1_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -2308,9 +1668,7 @@ static struct omap_hwmod omap2430_mcbsp1_hwmod = {
.name = "mcbsp1",
.class = &omap2430_mcbsp_hwmod_class,
.mpu_irqs = omap2430_mcbsp1_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap2430_mcbsp1_irqs),
- .sdma_reqs = omap2430_mcbsp1_sdma_chs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap2430_mcbsp1_sdma_chs),
+ .sdma_reqs = omap2_mcbsp1_sdma_reqs,
.main_clk = "mcbsp1_fck",
.prcm = {
.omap2 = {
@@ -2331,20 +1689,7 @@ static struct omap_hwmod_irq_info omap2430_mcbsp2_irqs[] = {
{ .name = "tx", .irq = 62 },
{ .name = "rx", .irq = 63 },
{ .name = "common", .irq = 16 },
-};
-
-static struct omap_hwmod_dma_info omap2430_mcbsp2_sdma_chs[] = {
- { .name = "rx", .dma_req = 34 },
- { .name = "tx", .dma_req = 33 },
-};
-
-static struct omap_hwmod_addr_space omap2430_mcbsp2_addrs[] = {
- {
- .name = "mpu",
- .pa_start = 0x48076000,
- .pa_end = 0x480760ff,
- .flags = ADDR_TYPE_RT
- },
+ { .irq = -1 }
};
/* l4_core -> mcbsp2 */
@@ -2352,8 +1697,7 @@ static struct omap_hwmod_ocp_if omap2430_l4_core__mcbsp2 = {
.master = &omap2430_l4_core_hwmod,
.slave = &omap2430_mcbsp2_hwmod,
.clk = "mcbsp2_ick",
- .addr = omap2430_mcbsp2_addrs,
- .addr_cnt = ARRAY_SIZE(omap2430_mcbsp2_addrs),
+ .addr = omap2xxx_mcbsp2_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -2366,9 +1710,7 @@ static struct omap_hwmod omap2430_mcbsp2_hwmod = {
.name = "mcbsp2",
.class = &omap2430_mcbsp_hwmod_class,
.mpu_irqs = omap2430_mcbsp2_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap2430_mcbsp2_irqs),
- .sdma_reqs = omap2430_mcbsp2_sdma_chs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap2430_mcbsp2_sdma_chs),
+ .sdma_reqs = omap2_mcbsp2_sdma_reqs,
.main_clk = "mcbsp2_fck",
.prcm = {
.omap2 = {
@@ -2389,11 +1731,7 @@ static struct omap_hwmod_irq_info omap2430_mcbsp3_irqs[] = {
{ .name = "tx", .irq = 89 },
{ .name = "rx", .irq = 90 },
{ .name = "common", .irq = 17 },
-};
-
-static struct omap_hwmod_dma_info omap2430_mcbsp3_sdma_chs[] = {
- { .name = "rx", .dma_req = 18 },
- { .name = "tx", .dma_req = 17 },
+ { .irq = -1 }
};
static struct omap_hwmod_addr_space omap2430_mcbsp3_addrs[] = {
@@ -2403,6 +1741,7 @@ static struct omap_hwmod_addr_space omap2430_mcbsp3_addrs[] = {
.pa_end = 0x4808C0ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_core -> mcbsp3 */
@@ -2411,7 +1750,6 @@ static struct omap_hwmod_ocp_if omap2430_l4_core__mcbsp3 = {
.slave = &omap2430_mcbsp3_hwmod,
.clk = "mcbsp3_ick",
.addr = omap2430_mcbsp3_addrs,
- .addr_cnt = ARRAY_SIZE(omap2430_mcbsp3_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -2424,9 +1762,7 @@ static struct omap_hwmod omap2430_mcbsp3_hwmod = {
.name = "mcbsp3",
.class = &omap2430_mcbsp_hwmod_class,
.mpu_irqs = omap2430_mcbsp3_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap2430_mcbsp3_irqs),
- .sdma_reqs = omap2430_mcbsp3_sdma_chs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap2430_mcbsp3_sdma_chs),
+ .sdma_reqs = omap2_mcbsp3_sdma_reqs,
.main_clk = "mcbsp3_fck",
.prcm = {
.omap2 = {
@@ -2447,11 +1783,13 @@ static struct omap_hwmod_irq_info omap2430_mcbsp4_irqs[] = {
{ .name = "tx", .irq = 54 },
{ .name = "rx", .irq = 55 },
{ .name = "common", .irq = 18 },
+ { .irq = -1 }
};
static struct omap_hwmod_dma_info omap2430_mcbsp4_sdma_chs[] = {
{ .name = "rx", .dma_req = 20 },
{ .name = "tx", .dma_req = 19 },
+ { .dma_req = -1 }
};
static struct omap_hwmod_addr_space omap2430_mcbsp4_addrs[] = {
@@ -2461,6 +1799,7 @@ static struct omap_hwmod_addr_space omap2430_mcbsp4_addrs[] = {
.pa_end = 0x4808E0ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_core -> mcbsp4 */
@@ -2469,7 +1808,6 @@ static struct omap_hwmod_ocp_if omap2430_l4_core__mcbsp4 = {
.slave = &omap2430_mcbsp4_hwmod,
.clk = "mcbsp4_ick",
.addr = omap2430_mcbsp4_addrs,
- .addr_cnt = ARRAY_SIZE(omap2430_mcbsp4_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -2482,9 +1820,7 @@ static struct omap_hwmod omap2430_mcbsp4_hwmod = {
.name = "mcbsp4",
.class = &omap2430_mcbsp_hwmod_class,
.mpu_irqs = omap2430_mcbsp4_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap2430_mcbsp4_irqs),
.sdma_reqs = omap2430_mcbsp4_sdma_chs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap2430_mcbsp4_sdma_chs),
.main_clk = "mcbsp4_fck",
.prcm = {
.omap2 = {
@@ -2505,11 +1841,13 @@ static struct omap_hwmod_irq_info omap2430_mcbsp5_irqs[] = {
{ .name = "tx", .irq = 81 },
{ .name = "rx", .irq = 82 },
{ .name = "common", .irq = 19 },
+ { .irq = -1 }
};
static struct omap_hwmod_dma_info omap2430_mcbsp5_sdma_chs[] = {
{ .name = "rx", .dma_req = 22 },
{ .name = "tx", .dma_req = 21 },
+ { .dma_req = -1 }
};
static struct omap_hwmod_addr_space omap2430_mcbsp5_addrs[] = {
@@ -2519,6 +1857,7 @@ static struct omap_hwmod_addr_space omap2430_mcbsp5_addrs[] = {
.pa_end = 0x480960ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_core -> mcbsp5 */
@@ -2527,7 +1866,6 @@ static struct omap_hwmod_ocp_if omap2430_l4_core__mcbsp5 = {
.slave = &omap2430_mcbsp5_hwmod,
.clk = "mcbsp5_ick",
.addr = omap2430_mcbsp5_addrs,
- .addr_cnt = ARRAY_SIZE(omap2430_mcbsp5_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -2540,9 +1878,7 @@ static struct omap_hwmod omap2430_mcbsp5_hwmod = {
.name = "mcbsp5",
.class = &omap2430_mcbsp_hwmod_class,
.mpu_irqs = omap2430_mcbsp5_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap2430_mcbsp5_irqs),
.sdma_reqs = omap2430_mcbsp5_sdma_chs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap2430_mcbsp5_sdma_chs),
.main_clk = "mcbsp5_fck",
.prcm = {
.omap2 = {
@@ -2580,11 +1916,13 @@ static struct omap_hwmod_class omap2430_mmc_class = {
static struct omap_hwmod_irq_info omap2430_mmc1_mpu_irqs[] = {
{ .irq = 83 },
+ { .irq = -1 }
};
static struct omap_hwmod_dma_info omap2430_mmc1_sdma_reqs[] = {
{ .name = "tx", .dma_req = 61 }, /* DMA_MMC1_TX */
{ .name = "rx", .dma_req = 62 }, /* DMA_MMC1_RX */
+ { .dma_req = -1 }
};
static struct omap_hwmod_opt_clk omap2430_mmc1_opt_clks[] = {
@@ -2603,9 +1941,7 @@ static struct omap_hwmod omap2430_mmc1_hwmod = {
.name = "mmc1",
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
.mpu_irqs = omap2430_mmc1_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap2430_mmc1_mpu_irqs),
.sdma_reqs = omap2430_mmc1_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap2430_mmc1_sdma_reqs),
.opt_clks = omap2430_mmc1_opt_clks,
.opt_clks_cnt = ARRAY_SIZE(omap2430_mmc1_opt_clks),
.main_clk = "mmchs1_fck",
@@ -2629,11 +1965,13 @@ static struct omap_hwmod omap2430_mmc1_hwmod = {
static struct omap_hwmod_irq_info omap2430_mmc2_mpu_irqs[] = {
{ .irq = 86 },
+ { .irq = -1 }
};
static struct omap_hwmod_dma_info omap2430_mmc2_sdma_reqs[] = {
{ .name = "tx", .dma_req = 47 }, /* DMA_MMC2_TX */
{ .name = "rx", .dma_req = 48 }, /* DMA_MMC2_RX */
+ { .dma_req = -1 }
};
static struct omap_hwmod_opt_clk omap2430_mmc2_opt_clks[] = {
@@ -2648,9 +1986,7 @@ static struct omap_hwmod omap2430_mmc2_hwmod = {
.name = "mmc2",
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
.mpu_irqs = omap2430_mmc2_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap2430_mmc2_mpu_irqs),
.sdma_reqs = omap2430_mmc2_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap2430_mmc2_sdma_reqs),
.opt_clks = omap2430_mmc2_opt_clks,
.opt_clks_cnt = ARRAY_SIZE(omap2430_mmc2_opt_clks),
.main_clk = "mmchs2_fck",
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_interconnect_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_interconnect_data.c
new file mode 100644
index 000000000000..04637fabadd2
--- /dev/null
+++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_interconnect_data.c
@@ -0,0 +1,173 @@
+/*
+ * omap_hwmod_2xxx_3xxx_interconnect_data.c - common interconnect data, OMAP2/3
+ *
+ * Copyright (C) 2009-2011 Nokia Corporation
+ * Paul Walmsley
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * XXX handle crossbar/shared link difference for L3?
+ * XXX these should be marked initdata for multi-OMAP kernels
+ */
+#include <asm/sizes.h>
+
+#include <plat/omap_hwmod.h>
+#include <plat/serial.h>
+
+#include "omap_hwmod_common_data.h"
+
+struct omap_hwmod_addr_space omap2430_mmc1_addr_space[] = {
+ {
+ .pa_start = 0x4809c000,
+ .pa_end = 0x4809c1ff,
+ .flags = ADDR_TYPE_RT,
+ },
+ { }
+};
+
+struct omap_hwmod_addr_space omap2430_mmc2_addr_space[] = {
+ {
+ .pa_start = 0x480b4000,
+ .pa_end = 0x480b41ff,
+ .flags = ADDR_TYPE_RT,
+ },
+ { }
+};
+
+struct omap_hwmod_addr_space omap2_i2c1_addr_space[] = {
+ {
+ .pa_start = 0x48070000,
+ .pa_end = 0x48070000 + SZ_128 - 1,
+ .flags = ADDR_TYPE_RT,
+ },
+ { }
+};
+
+struct omap_hwmod_addr_space omap2_i2c2_addr_space[] = {
+ {
+ .pa_start = 0x48072000,
+ .pa_end = 0x48072000 + SZ_128 - 1,
+ .flags = ADDR_TYPE_RT,
+ },
+ { }
+};
+
+struct omap_hwmod_addr_space omap2_dss_addrs[] = {
+ {
+ .pa_start = 0x48050000,
+ .pa_end = 0x48050000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+ { }
+};
+
+struct omap_hwmod_addr_space omap2_dss_dispc_addrs[] = {
+ {
+ .pa_start = 0x48050400,
+ .pa_end = 0x48050400 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+ { }
+};
+
+struct omap_hwmod_addr_space omap2_dss_rfbi_addrs[] = {
+ {
+ .pa_start = 0x48050800,
+ .pa_end = 0x48050800 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+ { }
+};
+
+struct omap_hwmod_addr_space omap2_dss_venc_addrs[] = {
+ {
+ .pa_start = 0x48050C00,
+ .pa_end = 0x48050C00 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+ { }
+};
+
+struct omap_hwmod_addr_space omap2_timer10_addrs[] = {
+ {
+ .pa_start = 0x48086000,
+ .pa_end = 0x48086000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+ { }
+};
+
+struct omap_hwmod_addr_space omap2_timer11_addrs[] = {
+ {
+ .pa_start = 0x48088000,
+ .pa_end = 0x48088000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+ { }
+};
+
+struct omap_hwmod_addr_space omap2xxx_timer12_addrs[] = {
+ {
+ .pa_start = 0x4808a000,
+ .pa_end = 0x4808a000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+ { }
+};
+
+struct omap_hwmod_addr_space omap2_mcspi1_addr_space[] = {
+ {
+ .pa_start = 0x48098000,
+ .pa_end = 0x48098000 + SZ_256 - 1,
+ .flags = ADDR_TYPE_RT,
+ },
+ { }
+};
+
+struct omap_hwmod_addr_space omap2_mcspi2_addr_space[] = {
+ {
+ .pa_start = 0x4809a000,
+ .pa_end = 0x4809a000 + SZ_256 - 1,
+ .flags = ADDR_TYPE_RT,
+ },
+ { }
+};
+
+struct omap_hwmod_addr_space omap2430_mcspi3_addr_space[] = {
+ {
+ .pa_start = 0x480b8000,
+ .pa_end = 0x480b8000 + SZ_256 - 1,
+ .flags = ADDR_TYPE_RT,
+ },
+ { }
+};
+
+struct omap_hwmod_addr_space omap2_dma_system_addrs[] = {
+ {
+ .pa_start = 0x48056000,
+ .pa_end = 0x48056000 + SZ_4K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+ { }
+};
+
+struct omap_hwmod_addr_space omap2_mailbox_addrs[] = {
+ {
+ .pa_start = 0x48094000,
+ .pa_end = 0x48094000 + SZ_512 - 1,
+ .flags = ADDR_TYPE_RT,
+ },
+ { }
+};
+
+struct omap_hwmod_addr_space omap2_mcbsp1_addrs[] = {
+ {
+ .name = "mpu",
+ .pa_start = 0x48074000,
+ .pa_end = 0x480740ff,
+ .flags = ADDR_TYPE_RT
+ },
+ { }
+};
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c
new file mode 100644
index 000000000000..c451729d289a
--- /dev/null
+++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c
@@ -0,0 +1,322 @@
+/*
+ * omap_hwmod_2xxx_3xxx_ipblock_data.c - common IP block data for OMAP2/3
+ *
+ * Copyright (C) 2011 Nokia Corporation
+ * Paul Walmsley
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <plat/omap_hwmod.h>
+#include <plat/serial.h>
+#include <plat/dma.h>
+
+#include <mach/irqs.h>
+
+#include "omap_hwmod_common_data.h"
+
+/* UART */
+
+static struct omap_hwmod_class_sysconfig omap2_uart_sysc = {
+ .rev_offs = 0x50,
+ .sysc_offs = 0x54,
+ .syss_offs = 0x58,
+ .sysc_flags = (SYSC_HAS_SIDLEMODE |
+ SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
+ SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+struct omap_hwmod_class omap2_uart_class = {
+ .name = "uart",
+ .sysc = &omap2_uart_sysc,
+};
+
+/*
+ * 'dss' class
+ * display sub-system
+ */
+
+static struct omap_hwmod_class_sysconfig omap2_dss_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+struct omap_hwmod_class omap2_dss_hwmod_class = {
+ .name = "dss",
+ .sysc = &omap2_dss_sysc,
+};
+
+/*
+ * 'dispc' class
+ * display controller
+ */
+
+static struct omap_hwmod_class_sysconfig omap2_dispc_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE |
+ SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+ MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+struct omap_hwmod_class omap2_dispc_hwmod_class = {
+ .name = "dispc",
+ .sysc = &omap2_dispc_sysc,
+};
+
+/*
+ * 'rfbi' class
+ * remote frame buffer interface
+ */
+
+static struct omap_hwmod_class_sysconfig omap2_rfbi_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
+ SYSC_HAS_AUTOIDLE),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+struct omap_hwmod_class omap2_rfbi_hwmod_class = {
+ .name = "rfbi",
+ .sysc = &omap2_rfbi_sysc,
+};
+
+/*
+ * 'venc' class
+ * video encoder
+ */
+
+struct omap_hwmod_class omap2_venc_hwmod_class = {
+ .name = "venc",
+};
+
+
+/* Common DMA request line data */
+struct omap_hwmod_dma_info omap2_uart1_sdma_reqs[] = {
+ { .name = "rx", .dma_req = OMAP24XX_DMA_UART1_RX, },
+ { .name = "tx", .dma_req = OMAP24XX_DMA_UART1_TX, },
+ { .dma_req = -1 }
+};
+
+struct omap_hwmod_dma_info omap2_uart2_sdma_reqs[] = {
+ { .name = "rx", .dma_req = OMAP24XX_DMA_UART2_RX, },
+ { .name = "tx", .dma_req = OMAP24XX_DMA_UART2_TX, },
+ { .dma_req = -1 }
+};
+
+struct omap_hwmod_dma_info omap2_uart3_sdma_reqs[] = {
+ { .name = "rx", .dma_req = OMAP24XX_DMA_UART3_RX, },
+ { .name = "tx", .dma_req = OMAP24XX_DMA_UART3_TX, },
+ { .dma_req = -1 }
+};
+
+struct omap_hwmod_dma_info omap2_i2c1_sdma_reqs[] = {
+ { .name = "tx", .dma_req = OMAP24XX_DMA_I2C1_TX },
+ { .name = "rx", .dma_req = OMAP24XX_DMA_I2C1_RX },
+ { .dma_req = -1 }
+};
+
+struct omap_hwmod_dma_info omap2_i2c2_sdma_reqs[] = {
+ { .name = "tx", .dma_req = OMAP24XX_DMA_I2C2_TX },
+ { .name = "rx", .dma_req = OMAP24XX_DMA_I2C2_RX },
+ { .dma_req = -1 }
+};
+
+struct omap_hwmod_dma_info omap2_mcspi1_sdma_reqs[] = {
+ { .name = "tx0", .dma_req = 35 }, /* DMA_SPI1_TX0 */
+ { .name = "rx0", .dma_req = 36 }, /* DMA_SPI1_RX0 */
+ { .name = "tx1", .dma_req = 37 }, /* DMA_SPI1_TX1 */
+ { .name = "rx1", .dma_req = 38 }, /* DMA_SPI1_RX1 */
+ { .name = "tx2", .dma_req = 39 }, /* DMA_SPI1_TX2 */
+ { .name = "rx2", .dma_req = 40 }, /* DMA_SPI1_RX2 */
+ { .name = "tx3", .dma_req = 41 }, /* DMA_SPI1_TX3 */
+ { .name = "rx3", .dma_req = 42 }, /* DMA_SPI1_RX3 */
+ { .dma_req = -1 }
+};
+
+struct omap_hwmod_dma_info omap2_mcspi2_sdma_reqs[] = {
+ { .name = "tx0", .dma_req = 43 }, /* DMA_SPI2_TX0 */
+ { .name = "rx0", .dma_req = 44 }, /* DMA_SPI2_RX0 */
+ { .name = "tx1", .dma_req = 45 }, /* DMA_SPI2_TX1 */
+ { .name = "rx1", .dma_req = 46 }, /* DMA_SPI2_RX1 */
+ { .dma_req = -1 }
+};
+
+struct omap_hwmod_dma_info omap2_mcbsp1_sdma_reqs[] = {
+ { .name = "rx", .dma_req = 32 },
+ { .name = "tx", .dma_req = 31 },
+ { .dma_req = -1 }
+};
+
+struct omap_hwmod_dma_info omap2_mcbsp2_sdma_reqs[] = {
+ { .name = "rx", .dma_req = 34 },
+ { .name = "tx", .dma_req = 33 },
+ { .dma_req = -1 }
+};
+
+struct omap_hwmod_dma_info omap2_mcbsp3_sdma_reqs[] = {
+ { .name = "rx", .dma_req = 18 },
+ { .name = "tx", .dma_req = 17 },
+ { .dma_req = -1 }
+};
+
+/* Other IP block data */
+
+
+/*
+ * omap_hwmod class data
+ */
+
+struct omap_hwmod_class l3_hwmod_class = {
+ .name = "l3"
+};
+
+struct omap_hwmod_class l4_hwmod_class = {
+ .name = "l4"
+};
+
+struct omap_hwmod_class mpu_hwmod_class = {
+ .name = "mpu"
+};
+
+struct omap_hwmod_class iva_hwmod_class = {
+ .name = "iva"
+};
+
+/* Common MPU IRQ line data */
+
+struct omap_hwmod_irq_info omap2_timer1_mpu_irqs[] = {
+ { .irq = 37, },
+ { .irq = -1 }
+};
+
+struct omap_hwmod_irq_info omap2_timer2_mpu_irqs[] = {
+ { .irq = 38, },
+ { .irq = -1 }
+};
+
+struct omap_hwmod_irq_info omap2_timer3_mpu_irqs[] = {
+ { .irq = 39, },
+ { .irq = -1 }
+};
+
+struct omap_hwmod_irq_info omap2_timer4_mpu_irqs[] = {
+ { .irq = 40, },
+ { .irq = -1 }
+};
+
+struct omap_hwmod_irq_info omap2_timer5_mpu_irqs[] = {
+ { .irq = 41, },
+ { .irq = -1 }
+};
+
+struct omap_hwmod_irq_info omap2_timer6_mpu_irqs[] = {
+ { .irq = 42, },
+ { .irq = -1 }
+};
+
+struct omap_hwmod_irq_info omap2_timer7_mpu_irqs[] = {
+ { .irq = 43, },
+ { .irq = -1 }
+};
+
+struct omap_hwmod_irq_info omap2_timer8_mpu_irqs[] = {
+ { .irq = 44, },
+ { .irq = -1 }
+};
+
+struct omap_hwmod_irq_info omap2_timer9_mpu_irqs[] = {
+ { .irq = 45, },
+ { .irq = -1 }
+};
+
+struct omap_hwmod_irq_info omap2_timer10_mpu_irqs[] = {
+ { .irq = 46, },
+ { .irq = -1 }
+};
+
+struct omap_hwmod_irq_info omap2_timer11_mpu_irqs[] = {
+ { .irq = 47, },
+ { .irq = -1 }
+};
+
+struct omap_hwmod_irq_info omap2_uart1_mpu_irqs[] = {
+ { .irq = INT_24XX_UART1_IRQ, },
+ { .irq = -1 }
+};
+
+struct omap_hwmod_irq_info omap2_uart2_mpu_irqs[] = {
+ { .irq = INT_24XX_UART2_IRQ, },
+ { .irq = -1 }
+};
+
+struct omap_hwmod_irq_info omap2_uart3_mpu_irqs[] = {
+ { .irq = INT_24XX_UART3_IRQ, },
+ { .irq = -1 }
+};
+
+struct omap_hwmod_irq_info omap2_dispc_irqs[] = {
+ { .irq = 25 },
+ { .irq = -1 }
+};
+
+struct omap_hwmod_irq_info omap2_i2c1_mpu_irqs[] = {
+ { .irq = INT_24XX_I2C1_IRQ, },
+ { .irq = -1 }
+};
+
+struct omap_hwmod_irq_info omap2_i2c2_mpu_irqs[] = {
+ { .irq = INT_24XX_I2C2_IRQ, },
+ { .irq = -1 }
+};
+
+struct omap_hwmod_irq_info omap2_gpio1_irqs[] = {
+ { .irq = 29 }, /* INT_24XX_GPIO_BANK1 */
+ { .irq = -1 }
+};
+
+struct omap_hwmod_irq_info omap2_gpio2_irqs[] = {
+ { .irq = 30 }, /* INT_24XX_GPIO_BANK2 */
+ { .irq = -1 }
+};
+
+struct omap_hwmod_irq_info omap2_gpio3_irqs[] = {
+ { .irq = 31 }, /* INT_24XX_GPIO_BANK3 */
+ { .irq = -1 }
+};
+
+struct omap_hwmod_irq_info omap2_gpio4_irqs[] = {
+ { .irq = 32 }, /* INT_24XX_GPIO_BANK4 */
+ { .irq = -1 }
+};
+
+struct omap_hwmod_irq_info omap2_dma_system_irqs[] = {
+ { .name = "0", .irq = 12 }, /* INT_24XX_SDMA_IRQ0 */
+ { .name = "1", .irq = 13 }, /* INT_24XX_SDMA_IRQ1 */
+ { .name = "2", .irq = 14 }, /* INT_24XX_SDMA_IRQ2 */
+ { .name = "3", .irq = 15 }, /* INT_24XX_SDMA_IRQ3 */
+ { .irq = -1 }
+};
+
+struct omap_hwmod_irq_info omap2_mcspi1_mpu_irqs[] = {
+ { .irq = 65 },
+ { .irq = -1 }
+};
+
+struct omap_hwmod_irq_info omap2_mcspi2_mpu_irqs[] = {
+ { .irq = 66 },
+ { .irq = -1 }
+};
+
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c
new file mode 100644
index 000000000000..4f3547c2a49e
--- /dev/null
+++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c
@@ -0,0 +1,130 @@
+/*
+ * omap_hwmod_2xxx_interconnect_data.c - common interconnect data for OMAP2xxx
+ *
+ * Copyright (C) 2009-2011 Nokia Corporation
+ * Paul Walmsley
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * XXX handle crossbar/shared link difference for L3?
+ * XXX these should be marked initdata for multi-OMAP kernels
+ */
+#include <asm/sizes.h>
+
+#include <plat/omap_hwmod.h>
+#include <plat/serial.h>
+
+#include "omap_hwmod_common_data.h"
+
+struct omap_hwmod_addr_space omap2xxx_uart1_addr_space[] = {
+ {
+ .pa_start = OMAP2_UART1_BASE,
+ .pa_end = OMAP2_UART1_BASE + SZ_8K - 1,
+ .flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
+ },
+ { }
+};
+
+struct omap_hwmod_addr_space omap2xxx_uart2_addr_space[] = {
+ {
+ .pa_start = OMAP2_UART2_BASE,
+ .pa_end = OMAP2_UART2_BASE + SZ_1K - 1,
+ .flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
+ },
+ { }
+};
+
+struct omap_hwmod_addr_space omap2xxx_uart3_addr_space[] = {
+ {
+ .pa_start = OMAP2_UART3_BASE,
+ .pa_end = OMAP2_UART3_BASE + SZ_1K - 1,
+ .flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
+ },
+ { }
+};
+
+struct omap_hwmod_addr_space omap2xxx_timer2_addrs[] = {
+ {
+ .pa_start = 0x4802a000,
+ .pa_end = 0x4802a000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+ { }
+};
+
+struct omap_hwmod_addr_space omap2xxx_timer3_addrs[] = {
+ {
+ .pa_start = 0x48078000,
+ .pa_end = 0x48078000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+ { }
+};
+
+struct omap_hwmod_addr_space omap2xxx_timer4_addrs[] = {
+ {
+ .pa_start = 0x4807a000,
+ .pa_end = 0x4807a000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+ { }
+};
+
+struct omap_hwmod_addr_space omap2xxx_timer5_addrs[] = {
+ {
+ .pa_start = 0x4807c000,
+ .pa_end = 0x4807c000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+ { }
+};
+
+struct omap_hwmod_addr_space omap2xxx_timer6_addrs[] = {
+ {
+ .pa_start = 0x4807e000,
+ .pa_end = 0x4807e000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+ { }
+};
+
+struct omap_hwmod_addr_space omap2xxx_timer7_addrs[] = {
+ {
+ .pa_start = 0x48080000,
+ .pa_end = 0x48080000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+ { }
+};
+
+struct omap_hwmod_addr_space omap2xxx_timer8_addrs[] = {
+ {
+ .pa_start = 0x48082000,
+ .pa_end = 0x48082000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+ { }
+};
+
+struct omap_hwmod_addr_space omap2xxx_timer9_addrs[] = {
+ {
+ .pa_start = 0x48084000,
+ .pa_end = 0x48084000 + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT
+ },
+ { }
+};
+
+struct omap_hwmod_addr_space omap2xxx_mcbsp2_addrs[] = {
+ {
+ .name = "mpu",
+ .pa_start = 0x48076000,
+ .pa_end = 0x480760ff,
+ .flags = ADDR_TYPE_RT
+ },
+ { }
+};
+
+
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
new file mode 100644
index 000000000000..177dee20faef
--- /dev/null
+++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
@@ -0,0 +1,150 @@
+/*
+ * omap_hwmod_2xxx_ipblock_data.c - common IP block data for OMAP2xxx
+ *
+ * Copyright (C) 2011 Nokia Corporation
+ * Paul Walmsley
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <plat/omap_hwmod.h>
+#include <plat/serial.h>
+#include <plat/dma.h>
+#include <plat/dmtimer.h>
+#include <plat/mcspi.h>
+
+#include <mach/irqs.h>
+
+#include "omap_hwmod_common_data.h"
+#include "wd_timer.h"
+
+struct omap_hwmod_irq_info omap2xxx_timer12_mpu_irqs[] = {
+ { .irq = 48, },
+ { .irq = -1 }
+};
+
+struct omap_hwmod_dma_info omap2xxx_dss_sdma_chs[] = {
+ { .name = "dispc", .dma_req = 5 },
+ { .dma_req = -1 }
+};
+/* OMAP2xxx Timer Common */
+static struct omap_hwmod_class_sysconfig omap2xxx_timer_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_CLOCKACTIVITY |
+ SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
+ SYSC_HAS_AUTOIDLE),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+struct omap_hwmod_class omap2xxx_timer_hwmod_class = {
+ .name = "timer",
+ .sysc = &omap2xxx_timer_sysc,
+ .rev = OMAP_TIMER_IP_VERSION_1,
+};
+
+/*
+ * 'wd_timer' class
+ * 32-bit watchdog upward counter that generates a pulse on the reset pin on
+ * overflow condition
+ */
+
+static struct omap_hwmod_class_sysconfig omap2xxx_wd_timer_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_EMUFREE | SYSC_HAS_SOFTRESET |
+ SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+struct omap_hwmod_class omap2xxx_wd_timer_hwmod_class = {
+ .name = "wd_timer",
+ .sysc = &omap2xxx_wd_timer_sysc,
+ .pre_shutdown = &omap2_wd_timer_disable
+};
+
+/*
+ * 'gpio' class
+ * general purpose io module
+ */
+static struct omap_hwmod_class_sysconfig omap2xxx_gpio_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
+ SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE |
+ SYSS_HAS_RESET_STATUS),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+struct omap_hwmod_class omap2xxx_gpio_hwmod_class = {
+ .name = "gpio",
+ .sysc = &omap2xxx_gpio_sysc,
+ .rev = 0,
+};
+
+/* system dma */
+static struct omap_hwmod_class_sysconfig omap2xxx_dma_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x002c,
+ .syss_offs = 0x0028,
+ .sysc_flags = (SYSC_HAS_SOFTRESET | SYSC_HAS_MIDLEMODE |
+ SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_EMUFREE |
+ SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
+ .idlemodes = (MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+struct omap_hwmod_class omap2xxx_dma_hwmod_class = {
+ .name = "dma",
+ .sysc = &omap2xxx_dma_sysc,
+};
+
+/*
+ * 'mailbox' class
+ * mailbox module allowing communication between the on-chip processors
+ * using a queued mailbox-interrupt mechanism.
+ */
+
+static struct omap_hwmod_class_sysconfig omap2xxx_mailbox_sysc = {
+ .rev_offs = 0x000,
+ .sysc_offs = 0x010,
+ .syss_offs = 0x014,
+ .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
+ SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+struct omap_hwmod_class omap2xxx_mailbox_hwmod_class = {
+ .name = "mailbox",
+ .sysc = &omap2xxx_mailbox_sysc,
+};
+
+/*
+ * 'mcspi' class
+ * multichannel serial port interface (mcspi) / master/slave synchronous serial
+ * bus
+ */
+
+static struct omap_hwmod_class_sysconfig omap2xxx_mcspi_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
+ SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
+ SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
+struct omap_hwmod_class omap2xxx_mcspi_class = {
+ .name = "mcspi",
+ .sysc = &omap2xxx_mcspi_sysc,
+ .rev = OMAP2_MCSPI_REV,
+};
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index 909a84de6682..1a52716e48bf 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -1,7 +1,7 @@
/*
* omap_hwmod_3xxx_data.c - hardware modules present on the OMAP3xxx chips
*
- * Copyright (C) 2009-2010 Nokia Corporation
+ * Copyright (C) 2009-2011 Nokia Corporation
* Paul Walmsley
*
* This program is free software; you can redistribute it and/or modify
@@ -103,6 +103,7 @@ static struct omap_hwmod_ocp_if omap3xxx_l3_main__l4_per = {
static struct omap_hwmod_irq_info omap3xxx_l3_main_irqs[] = {
{ .irq = INT_34XX_L3_DBG_IRQ },
{ .irq = INT_34XX_L3_APP_IRQ },
+ { .irq = -1 }
};
static struct omap_hwmod_addr_space omap3xxx_l3_main_addrs[] = {
@@ -111,6 +112,7 @@ static struct omap_hwmod_addr_space omap3xxx_l3_main_addrs[] = {
.pa_end = 0x6800ffff,
.flags = ADDR_TYPE_RT,
},
+ { }
};
/* MPU -> L3 interface */
@@ -118,7 +120,6 @@ static struct omap_hwmod_ocp_if omap3xxx_mpu__l3_main = {
.master = &omap3xxx_mpu_hwmod,
.slave = &omap3xxx_l3_main_hwmod,
.addr = omap3xxx_l3_main_addrs,
- .addr_cnt = ARRAY_SIZE(omap3xxx_l3_main_addrs),
.user = OCP_USER_MPU,
};
@@ -150,8 +151,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_l3_main_masters[] = {
static struct omap_hwmod omap3xxx_l3_main_hwmod = {
.name = "l3_main",
.class = &l3_hwmod_class,
- .mpu_irqs = omap3xxx_l3_main_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_l3_main_irqs),
+ .mpu_irqs = omap3xxx_l3_main_irqs,
.masters = omap3xxx_l3_main_masters,
.masters_cnt = ARRAY_SIZE(omap3xxx_l3_main_masters),
.slaves = omap3xxx_l3_main_slaves,
@@ -190,39 +190,21 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_core__l4_wkup = {
};
/* L4 CORE -> MMC1 interface */
-static struct omap_hwmod_addr_space omap3xxx_mmc1_addr_space[] = {
- {
- .pa_start = 0x4809c000,
- .pa_end = 0x4809c1ff,
- .flags = ADDR_TYPE_RT,
- },
-};
-
static struct omap_hwmod_ocp_if omap3xxx_l4_core__mmc1 = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap3xxx_mmc1_hwmod,
.clk = "mmchs1_ick",
- .addr = omap3xxx_mmc1_addr_space,
- .addr_cnt = ARRAY_SIZE(omap3xxx_mmc1_addr_space),
+ .addr = omap2430_mmc1_addr_space,
.user = OCP_USER_MPU | OCP_USER_SDMA,
.flags = OMAP_FIREWALL_L4
};
/* L4 CORE -> MMC2 interface */
-static struct omap_hwmod_addr_space omap3xxx_mmc2_addr_space[] = {
- {
- .pa_start = 0x480b4000,
- .pa_end = 0x480b41ff,
- .flags = ADDR_TYPE_RT,
- },
-};
-
static struct omap_hwmod_ocp_if omap3xxx_l4_core__mmc2 = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap3xxx_mmc2_hwmod,
.clk = "mmchs2_ick",
- .addr = omap3xxx_mmc2_addr_space,
- .addr_cnt = ARRAY_SIZE(omap3xxx_mmc2_addr_space),
+ .addr = omap2430_mmc2_addr_space,
.user = OCP_USER_MPU | OCP_USER_SDMA,
.flags = OMAP_FIREWALL_L4
};
@@ -234,6 +216,7 @@ static struct omap_hwmod_addr_space omap3xxx_mmc3_addr_space[] = {
.pa_end = 0x480ad1ff,
.flags = ADDR_TYPE_RT,
},
+ { }
};
static struct omap_hwmod_ocp_if omap3xxx_l4_core__mmc3 = {
@@ -241,7 +224,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_core__mmc3 = {
.slave = &omap3xxx_mmc3_hwmod,
.clk = "mmchs3_ick",
.addr = omap3xxx_mmc3_addr_space,
- .addr_cnt = ARRAY_SIZE(omap3xxx_mmc3_addr_space),
.user = OCP_USER_MPU | OCP_USER_SDMA,
.flags = OMAP_FIREWALL_L4
};
@@ -253,6 +235,7 @@ static struct omap_hwmod_addr_space omap3xxx_uart1_addr_space[] = {
.pa_end = OMAP3_UART1_BASE + SZ_8K - 1,
.flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
},
+ { }
};
static struct omap_hwmod_ocp_if omap3_l4_core__uart1 = {
@@ -260,7 +243,6 @@ static struct omap_hwmod_ocp_if omap3_l4_core__uart1 = {
.slave = &omap3xxx_uart1_hwmod,
.clk = "uart1_ick",
.addr = omap3xxx_uart1_addr_space,
- .addr_cnt = ARRAY_SIZE(omap3xxx_uart1_addr_space),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -271,6 +253,7 @@ static struct omap_hwmod_addr_space omap3xxx_uart2_addr_space[] = {
.pa_end = OMAP3_UART2_BASE + SZ_1K - 1,
.flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
},
+ { }
};
static struct omap_hwmod_ocp_if omap3_l4_core__uart2 = {
@@ -278,7 +261,6 @@ static struct omap_hwmod_ocp_if omap3_l4_core__uart2 = {
.slave = &omap3xxx_uart2_hwmod,
.clk = "uart2_ick",
.addr = omap3xxx_uart2_addr_space,
- .addr_cnt = ARRAY_SIZE(omap3xxx_uart2_addr_space),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -289,6 +271,7 @@ static struct omap_hwmod_addr_space omap3xxx_uart3_addr_space[] = {
.pa_end = OMAP3_UART3_BASE + SZ_1K - 1,
.flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
},
+ { }
};
static struct omap_hwmod_ocp_if omap3_l4_per__uart3 = {
@@ -296,7 +279,6 @@ static struct omap_hwmod_ocp_if omap3_l4_per__uart3 = {
.slave = &omap3xxx_uart3_hwmod,
.clk = "uart3_ick",
.addr = omap3xxx_uart3_addr_space,
- .addr_cnt = ARRAY_SIZE(omap3xxx_uart3_addr_space),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -307,6 +289,7 @@ static struct omap_hwmod_addr_space omap3xxx_uart4_addr_space[] = {
.pa_end = OMAP3_UART4_BASE + SZ_1K - 1,
.flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
},
+ { }
};
static struct omap_hwmod_ocp_if omap3_l4_per__uart4 = {
@@ -314,28 +297,15 @@ static struct omap_hwmod_ocp_if omap3_l4_per__uart4 = {
.slave = &omap3xxx_uart4_hwmod,
.clk = "uart4_ick",
.addr = omap3xxx_uart4_addr_space,
- .addr_cnt = ARRAY_SIZE(omap3xxx_uart4_addr_space),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-/* I2C IP block address space length (in bytes) */
-#define OMAP2_I2C_AS_LEN 128
-
/* L4 CORE -> I2C1 interface */
-static struct omap_hwmod_addr_space omap3xxx_i2c1_addr_space[] = {
- {
- .pa_start = 0x48070000,
- .pa_end = 0x48070000 + OMAP2_I2C_AS_LEN - 1,
- .flags = ADDR_TYPE_RT,
- },
-};
-
static struct omap_hwmod_ocp_if omap3_l4_core__i2c1 = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap3xxx_i2c1_hwmod,
.clk = "i2c1_ick",
- .addr = omap3xxx_i2c1_addr_space,
- .addr_cnt = ARRAY_SIZE(omap3xxx_i2c1_addr_space),
+ .addr = omap2_i2c1_addr_space,
.fw = {
.omap2 = {
.l4_fw_region = OMAP3_L4_CORE_FW_I2C1_REGION,
@@ -347,20 +317,11 @@ static struct omap_hwmod_ocp_if omap3_l4_core__i2c1 = {
};
/* L4 CORE -> I2C2 interface */
-static struct omap_hwmod_addr_space omap3xxx_i2c2_addr_space[] = {
- {
- .pa_start = 0x48072000,
- .pa_end = 0x48072000 + OMAP2_I2C_AS_LEN - 1,
- .flags = ADDR_TYPE_RT,
- },
-};
-
static struct omap_hwmod_ocp_if omap3_l4_core__i2c2 = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap3xxx_i2c2_hwmod,
.clk = "i2c2_ick",
- .addr = omap3xxx_i2c2_addr_space,
- .addr_cnt = ARRAY_SIZE(omap3xxx_i2c2_addr_space),
+ .addr = omap2_i2c2_addr_space,
.fw = {
.omap2 = {
.l4_fw_region = OMAP3_L4_CORE_FW_I2C2_REGION,
@@ -375,9 +336,10 @@ static struct omap_hwmod_ocp_if omap3_l4_core__i2c2 = {
static struct omap_hwmod_addr_space omap3xxx_i2c3_addr_space[] = {
{
.pa_start = 0x48060000,
- .pa_end = 0x48060000 + OMAP2_I2C_AS_LEN - 1,
+ .pa_end = 0x48060000 + SZ_128 - 1,
.flags = ADDR_TYPE_RT,
},
+ { }
};
static struct omap_hwmod_ocp_if omap3_l4_core__i2c3 = {
@@ -385,7 +347,6 @@ static struct omap_hwmod_ocp_if omap3_l4_core__i2c3 = {
.slave = &omap3xxx_i2c3_hwmod,
.clk = "i2c3_ick",
.addr = omap3xxx_i2c3_addr_space,
- .addr_cnt = ARRAY_SIZE(omap3xxx_i2c3_addr_space),
.fw = {
.omap2 = {
.l4_fw_region = OMAP3_L4_CORE_FW_I2C3_REGION,
@@ -403,6 +364,7 @@ static struct omap_hwmod_addr_space omap3_sr1_addr_space[] = {
.pa_end = OMAP34XX_SR1_BASE + SZ_1K - 1,
.flags = ADDR_TYPE_RT,
},
+ { }
};
static struct omap_hwmod_ocp_if omap3_l4_core__sr1 = {
@@ -410,7 +372,6 @@ static struct omap_hwmod_ocp_if omap3_l4_core__sr1 = {
.slave = &omap34xx_sr1_hwmod,
.clk = "sr_l4_ick",
.addr = omap3_sr1_addr_space,
- .addr_cnt = ARRAY_SIZE(omap3_sr1_addr_space),
.user = OCP_USER_MPU,
};
@@ -421,6 +382,7 @@ static struct omap_hwmod_addr_space omap3_sr2_addr_space[] = {
.pa_end = OMAP34XX_SR2_BASE + SZ_1K - 1,
.flags = ADDR_TYPE_RT,
},
+ { }
};
static struct omap_hwmod_ocp_if omap3_l4_core__sr2 = {
@@ -428,7 +390,6 @@ static struct omap_hwmod_ocp_if omap3_l4_core__sr2 = {
.slave = &omap34xx_sr2_hwmod,
.clk = "sr_l4_ick",
.addr = omap3_sr2_addr_space,
- .addr_cnt = ARRAY_SIZE(omap3_sr2_addr_space),
.user = OCP_USER_MPU,
};
@@ -442,6 +403,7 @@ static struct omap_hwmod_addr_space omap3xxx_usbhsotg_addrs[] = {
.pa_end = OMAP34XX_HSUSB_OTG_BASE + SZ_4K - 1,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_core -> usbhsotg */
@@ -450,7 +412,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_core__usbhsotg = {
.slave = &omap3xxx_usbhsotg_hwmod,
.clk = "l4_ick",
.addr = omap3xxx_usbhsotg_addrs,
- .addr_cnt = ARRAY_SIZE(omap3xxx_usbhsotg_addrs),
.user = OCP_USER_MPU,
};
@@ -468,6 +429,7 @@ static struct omap_hwmod_addr_space am35xx_usbhsotg_addrs[] = {
.pa_end = AM35XX_IPSS_USBOTGSS_BASE + SZ_4K - 1,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_core -> usbhsotg */
@@ -476,7 +438,6 @@ static struct omap_hwmod_ocp_if am35xx_l4_core__usbhsotg = {
.slave = &am35xx_usbhsotg_hwmod,
.clk = "l4_ick",
.addr = am35xx_usbhsotg_addrs,
- .addr_cnt = ARRAY_SIZE(am35xx_usbhsotg_addrs),
.user = OCP_USER_MPU,
};
@@ -611,9 +572,6 @@ static struct omap_hwmod_class omap3xxx_timer_hwmod_class = {
/* timer1 */
static struct omap_hwmod omap3xxx_timer1_hwmod;
-static struct omap_hwmod_irq_info omap3xxx_timer1_mpu_irqs[] = {
- { .irq = 37, },
-};
static struct omap_hwmod_addr_space omap3xxx_timer1_addrs[] = {
{
@@ -621,6 +579,7 @@ static struct omap_hwmod_addr_space omap3xxx_timer1_addrs[] = {
.pa_end = 0x48318000 + SZ_1K - 1,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_wkup -> timer1 */
@@ -629,7 +588,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_wkup__timer1 = {
.slave = &omap3xxx_timer1_hwmod,
.clk = "gpt1_ick",
.addr = omap3xxx_timer1_addrs,
- .addr_cnt = ARRAY_SIZE(omap3xxx_timer1_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -641,8 +599,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_timer1_slaves[] = {
/* timer1 hwmod */
static struct omap_hwmod omap3xxx_timer1_hwmod = {
.name = "timer1",
- .mpu_irqs = omap3xxx_timer1_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_timer1_mpu_irqs),
+ .mpu_irqs = omap2_timer1_mpu_irqs,
.main_clk = "gpt1_fck",
.prcm = {
.omap2 = {
@@ -661,9 +618,6 @@ static struct omap_hwmod omap3xxx_timer1_hwmod = {
/* timer2 */
static struct omap_hwmod omap3xxx_timer2_hwmod;
-static struct omap_hwmod_irq_info omap3xxx_timer2_mpu_irqs[] = {
- { .irq = 38, },
-};
static struct omap_hwmod_addr_space omap3xxx_timer2_addrs[] = {
{
@@ -671,6 +625,7 @@ static struct omap_hwmod_addr_space omap3xxx_timer2_addrs[] = {
.pa_end = 0x49032000 + SZ_1K - 1,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> timer2 */
@@ -679,7 +634,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer2 = {
.slave = &omap3xxx_timer2_hwmod,
.clk = "gpt2_ick",
.addr = omap3xxx_timer2_addrs,
- .addr_cnt = ARRAY_SIZE(omap3xxx_timer2_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -691,8 +645,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_timer2_slaves[] = {
/* timer2 hwmod */
static struct omap_hwmod omap3xxx_timer2_hwmod = {
.name = "timer2",
- .mpu_irqs = omap3xxx_timer2_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_timer2_mpu_irqs),
+ .mpu_irqs = omap2_timer2_mpu_irqs,
.main_clk = "gpt2_fck",
.prcm = {
.omap2 = {
@@ -711,9 +664,6 @@ static struct omap_hwmod omap3xxx_timer2_hwmod = {
/* timer3 */
static struct omap_hwmod omap3xxx_timer3_hwmod;
-static struct omap_hwmod_irq_info omap3xxx_timer3_mpu_irqs[] = {
- { .irq = 39, },
-};
static struct omap_hwmod_addr_space omap3xxx_timer3_addrs[] = {
{
@@ -721,6 +671,7 @@ static struct omap_hwmod_addr_space omap3xxx_timer3_addrs[] = {
.pa_end = 0x49034000 + SZ_1K - 1,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> timer3 */
@@ -729,7 +680,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer3 = {
.slave = &omap3xxx_timer3_hwmod,
.clk = "gpt3_ick",
.addr = omap3xxx_timer3_addrs,
- .addr_cnt = ARRAY_SIZE(omap3xxx_timer3_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -741,8 +691,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_timer3_slaves[] = {
/* timer3 hwmod */
static struct omap_hwmod omap3xxx_timer3_hwmod = {
.name = "timer3",
- .mpu_irqs = omap3xxx_timer3_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_timer3_mpu_irqs),
+ .mpu_irqs = omap2_timer3_mpu_irqs,
.main_clk = "gpt3_fck",
.prcm = {
.omap2 = {
@@ -761,9 +710,6 @@ static struct omap_hwmod omap3xxx_timer3_hwmod = {
/* timer4 */
static struct omap_hwmod omap3xxx_timer4_hwmod;
-static struct omap_hwmod_irq_info omap3xxx_timer4_mpu_irqs[] = {
- { .irq = 40, },
-};
static struct omap_hwmod_addr_space omap3xxx_timer4_addrs[] = {
{
@@ -771,6 +717,7 @@ static struct omap_hwmod_addr_space omap3xxx_timer4_addrs[] = {
.pa_end = 0x49036000 + SZ_1K - 1,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> timer4 */
@@ -779,7 +726,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer4 = {
.slave = &omap3xxx_timer4_hwmod,
.clk = "gpt4_ick",
.addr = omap3xxx_timer4_addrs,
- .addr_cnt = ARRAY_SIZE(omap3xxx_timer4_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -791,8 +737,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_timer4_slaves[] = {
/* timer4 hwmod */
static struct omap_hwmod omap3xxx_timer4_hwmod = {
.name = "timer4",
- .mpu_irqs = omap3xxx_timer4_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_timer4_mpu_irqs),
+ .mpu_irqs = omap2_timer4_mpu_irqs,
.main_clk = "gpt4_fck",
.prcm = {
.omap2 = {
@@ -811,9 +756,6 @@ static struct omap_hwmod omap3xxx_timer4_hwmod = {
/* timer5 */
static struct omap_hwmod omap3xxx_timer5_hwmod;
-static struct omap_hwmod_irq_info omap3xxx_timer5_mpu_irqs[] = {
- { .irq = 41, },
-};
static struct omap_hwmod_addr_space omap3xxx_timer5_addrs[] = {
{
@@ -821,6 +763,7 @@ static struct omap_hwmod_addr_space omap3xxx_timer5_addrs[] = {
.pa_end = 0x49038000 + SZ_1K - 1,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> timer5 */
@@ -829,7 +772,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer5 = {
.slave = &omap3xxx_timer5_hwmod,
.clk = "gpt5_ick",
.addr = omap3xxx_timer5_addrs,
- .addr_cnt = ARRAY_SIZE(omap3xxx_timer5_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -841,8 +783,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_timer5_slaves[] = {
/* timer5 hwmod */
static struct omap_hwmod omap3xxx_timer5_hwmod = {
.name = "timer5",
- .mpu_irqs = omap3xxx_timer5_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_timer5_mpu_irqs),
+ .mpu_irqs = omap2_timer5_mpu_irqs,
.main_clk = "gpt5_fck",
.prcm = {
.omap2 = {
@@ -861,9 +802,6 @@ static struct omap_hwmod omap3xxx_timer5_hwmod = {
/* timer6 */
static struct omap_hwmod omap3xxx_timer6_hwmod;
-static struct omap_hwmod_irq_info omap3xxx_timer6_mpu_irqs[] = {
- { .irq = 42, },
-};
static struct omap_hwmod_addr_space omap3xxx_timer6_addrs[] = {
{
@@ -871,6 +809,7 @@ static struct omap_hwmod_addr_space omap3xxx_timer6_addrs[] = {
.pa_end = 0x4903A000 + SZ_1K - 1,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> timer6 */
@@ -879,7 +818,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer6 = {
.slave = &omap3xxx_timer6_hwmod,
.clk = "gpt6_ick",
.addr = omap3xxx_timer6_addrs,
- .addr_cnt = ARRAY_SIZE(omap3xxx_timer6_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -891,8 +829,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_timer6_slaves[] = {
/* timer6 hwmod */
static struct omap_hwmod omap3xxx_timer6_hwmod = {
.name = "timer6",
- .mpu_irqs = omap3xxx_timer6_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_timer6_mpu_irqs),
+ .mpu_irqs = omap2_timer6_mpu_irqs,
.main_clk = "gpt6_fck",
.prcm = {
.omap2 = {
@@ -911,9 +848,6 @@ static struct omap_hwmod omap3xxx_timer6_hwmod = {
/* timer7 */
static struct omap_hwmod omap3xxx_timer7_hwmod;
-static struct omap_hwmod_irq_info omap3xxx_timer7_mpu_irqs[] = {
- { .irq = 43, },
-};
static struct omap_hwmod_addr_space omap3xxx_timer7_addrs[] = {
{
@@ -921,6 +855,7 @@ static struct omap_hwmod_addr_space omap3xxx_timer7_addrs[] = {
.pa_end = 0x4903C000 + SZ_1K - 1,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> timer7 */
@@ -929,7 +864,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer7 = {
.slave = &omap3xxx_timer7_hwmod,
.clk = "gpt7_ick",
.addr = omap3xxx_timer7_addrs,
- .addr_cnt = ARRAY_SIZE(omap3xxx_timer7_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -941,8 +875,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_timer7_slaves[] = {
/* timer7 hwmod */
static struct omap_hwmod omap3xxx_timer7_hwmod = {
.name = "timer7",
- .mpu_irqs = omap3xxx_timer7_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_timer7_mpu_irqs),
+ .mpu_irqs = omap2_timer7_mpu_irqs,
.main_clk = "gpt7_fck",
.prcm = {
.omap2 = {
@@ -961,9 +894,6 @@ static struct omap_hwmod omap3xxx_timer7_hwmod = {
/* timer8 */
static struct omap_hwmod omap3xxx_timer8_hwmod;
-static struct omap_hwmod_irq_info omap3xxx_timer8_mpu_irqs[] = {
- { .irq = 44, },
-};
static struct omap_hwmod_addr_space omap3xxx_timer8_addrs[] = {
{
@@ -971,6 +901,7 @@ static struct omap_hwmod_addr_space omap3xxx_timer8_addrs[] = {
.pa_end = 0x4903E000 + SZ_1K - 1,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> timer8 */
@@ -979,7 +910,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer8 = {
.slave = &omap3xxx_timer8_hwmod,
.clk = "gpt8_ick",
.addr = omap3xxx_timer8_addrs,
- .addr_cnt = ARRAY_SIZE(omap3xxx_timer8_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -991,8 +921,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_timer8_slaves[] = {
/* timer8 hwmod */
static struct omap_hwmod omap3xxx_timer8_hwmod = {
.name = "timer8",
- .mpu_irqs = omap3xxx_timer8_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_timer8_mpu_irqs),
+ .mpu_irqs = omap2_timer8_mpu_irqs,
.main_clk = "gpt8_fck",
.prcm = {
.omap2 = {
@@ -1011,9 +940,6 @@ static struct omap_hwmod omap3xxx_timer8_hwmod = {
/* timer9 */
static struct omap_hwmod omap3xxx_timer9_hwmod;
-static struct omap_hwmod_irq_info omap3xxx_timer9_mpu_irqs[] = {
- { .irq = 45, },
-};
static struct omap_hwmod_addr_space omap3xxx_timer9_addrs[] = {
{
@@ -1021,6 +947,7 @@ static struct omap_hwmod_addr_space omap3xxx_timer9_addrs[] = {
.pa_end = 0x49040000 + SZ_1K - 1,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> timer9 */
@@ -1029,7 +956,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer9 = {
.slave = &omap3xxx_timer9_hwmod,
.clk = "gpt9_ick",
.addr = omap3xxx_timer9_addrs,
- .addr_cnt = ARRAY_SIZE(omap3xxx_timer9_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -1041,8 +967,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_timer9_slaves[] = {
/* timer9 hwmod */
static struct omap_hwmod omap3xxx_timer9_hwmod = {
.name = "timer9",
- .mpu_irqs = omap3xxx_timer9_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_timer9_mpu_irqs),
+ .mpu_irqs = omap2_timer9_mpu_irqs,
.main_clk = "gpt9_fck",
.prcm = {
.omap2 = {
@@ -1061,25 +986,13 @@ static struct omap_hwmod omap3xxx_timer9_hwmod = {
/* timer10 */
static struct omap_hwmod omap3xxx_timer10_hwmod;
-static struct omap_hwmod_irq_info omap3xxx_timer10_mpu_irqs[] = {
- { .irq = 46, },
-};
-
-static struct omap_hwmod_addr_space omap3xxx_timer10_addrs[] = {
- {
- .pa_start = 0x48086000,
- .pa_end = 0x48086000 + SZ_1K - 1,
- .flags = ADDR_TYPE_RT
- },
-};
/* l4_core -> timer10 */
static struct omap_hwmod_ocp_if omap3xxx_l4_core__timer10 = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap3xxx_timer10_hwmod,
.clk = "gpt10_ick",
- .addr = omap3xxx_timer10_addrs,
- .addr_cnt = ARRAY_SIZE(omap3xxx_timer10_addrs),
+ .addr = omap2_timer10_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -1091,8 +1004,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_timer10_slaves[] = {
/* timer10 hwmod */
static struct omap_hwmod omap3xxx_timer10_hwmod = {
.name = "timer10",
- .mpu_irqs = omap3xxx_timer10_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_timer10_mpu_irqs),
+ .mpu_irqs = omap2_timer10_mpu_irqs,
.main_clk = "gpt10_fck",
.prcm = {
.omap2 = {
@@ -1111,25 +1023,13 @@ static struct omap_hwmod omap3xxx_timer10_hwmod = {
/* timer11 */
static struct omap_hwmod omap3xxx_timer11_hwmod;
-static struct omap_hwmod_irq_info omap3xxx_timer11_mpu_irqs[] = {
- { .irq = 47, },
-};
-
-static struct omap_hwmod_addr_space omap3xxx_timer11_addrs[] = {
- {
- .pa_start = 0x48088000,
- .pa_end = 0x48088000 + SZ_1K - 1,
- .flags = ADDR_TYPE_RT
- },
-};
/* l4_core -> timer11 */
static struct omap_hwmod_ocp_if omap3xxx_l4_core__timer11 = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap3xxx_timer11_hwmod,
.clk = "gpt11_ick",
- .addr = omap3xxx_timer11_addrs,
- .addr_cnt = ARRAY_SIZE(omap3xxx_timer11_addrs),
+ .addr = omap2_timer11_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -1141,8 +1041,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_timer11_slaves[] = {
/* timer11 hwmod */
static struct omap_hwmod omap3xxx_timer11_hwmod = {
.name = "timer11",
- .mpu_irqs = omap3xxx_timer11_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_timer11_mpu_irqs),
+ .mpu_irqs = omap2_timer11_mpu_irqs,
.main_clk = "gpt11_fck",
.prcm = {
.omap2 = {
@@ -1163,6 +1062,7 @@ static struct omap_hwmod omap3xxx_timer11_hwmod = {
static struct omap_hwmod omap3xxx_timer12_hwmod;
static struct omap_hwmod_irq_info omap3xxx_timer12_mpu_irqs[] = {
{ .irq = 95, },
+ { .irq = -1 }
};
static struct omap_hwmod_addr_space omap3xxx_timer12_addrs[] = {
@@ -1171,6 +1071,7 @@ static struct omap_hwmod_addr_space omap3xxx_timer12_addrs[] = {
.pa_end = 0x48304000 + SZ_1K - 1,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_core -> timer12 */
@@ -1179,7 +1080,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_core__timer12 = {
.slave = &omap3xxx_timer12_hwmod,
.clk = "gpt12_ick",
.addr = omap3xxx_timer12_addrs,
- .addr_cnt = ARRAY_SIZE(omap3xxx_timer12_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -1192,7 +1092,6 @@ static struct omap_hwmod_ocp_if *omap3xxx_timer12_slaves[] = {
static struct omap_hwmod omap3xxx_timer12_hwmod = {
.name = "timer12",
.mpu_irqs = omap3xxx_timer12_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_timer12_mpu_irqs),
.main_clk = "gpt12_fck",
.prcm = {
.omap2 = {
@@ -1216,6 +1115,7 @@ static struct omap_hwmod_addr_space omap3xxx_wd_timer2_addrs[] = {
.pa_end = 0x4831407f,
.flags = ADDR_TYPE_RT
},
+ { }
};
static struct omap_hwmod_ocp_if omap3xxx_l4_wkup__wd_timer2 = {
@@ -1223,7 +1123,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_wkup__wd_timer2 = {
.slave = &omap3xxx_wd_timer2_hwmod,
.clk = "wdt2_ick",
.addr = omap3xxx_wd_timer2_addrs,
- .addr_cnt = ARRAY_SIZE(omap3xxx_wd_timer2_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -1291,45 +1190,16 @@ static struct omap_hwmod omap3xxx_wd_timer2_hwmod = {
.flags = HWMOD_SWSUP_SIDLE,
};
-/* UART common */
-
-static struct omap_hwmod_class_sysconfig uart_sysc = {
- .rev_offs = 0x50,
- .sysc_offs = 0x54,
- .syss_offs = 0x58,
- .sysc_flags = (SYSC_HAS_SIDLEMODE |
- SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
- SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class uart_class = {
- .name = "uart",
- .sysc = &uart_sysc,
-};
-
/* UART1 */
-static struct omap_hwmod_irq_info uart1_mpu_irqs[] = {
- { .irq = INT_24XX_UART1_IRQ, },
-};
-
-static struct omap_hwmod_dma_info uart1_sdma_reqs[] = {
- { .name = "tx", .dma_req = OMAP24XX_DMA_UART1_TX, },
- { .name = "rx", .dma_req = OMAP24XX_DMA_UART1_RX, },
-};
-
static struct omap_hwmod_ocp_if *omap3xxx_uart1_slaves[] = {
&omap3_l4_core__uart1,
};
static struct omap_hwmod omap3xxx_uart1_hwmod = {
.name = "uart1",
- .mpu_irqs = uart1_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(uart1_mpu_irqs),
- .sdma_reqs = uart1_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(uart1_sdma_reqs),
+ .mpu_irqs = omap2_uart1_mpu_irqs,
+ .sdma_reqs = omap2_uart1_sdma_reqs,
.main_clk = "uart1_fck",
.prcm = {
.omap2 = {
@@ -1342,31 +1212,20 @@ static struct omap_hwmod omap3xxx_uart1_hwmod = {
},
.slaves = omap3xxx_uart1_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_uart1_slaves),
- .class = &uart_class,
+ .class = &omap2_uart_class,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
};
/* UART2 */
-static struct omap_hwmod_irq_info uart2_mpu_irqs[] = {
- { .irq = INT_24XX_UART2_IRQ, },
-};
-
-static struct omap_hwmod_dma_info uart2_sdma_reqs[] = {
- { .name = "tx", .dma_req = OMAP24XX_DMA_UART2_TX, },
- { .name = "rx", .dma_req = OMAP24XX_DMA_UART2_RX, },
-};
-
static struct omap_hwmod_ocp_if *omap3xxx_uart2_slaves[] = {
&omap3_l4_core__uart2,
};
static struct omap_hwmod omap3xxx_uart2_hwmod = {
.name = "uart2",
- .mpu_irqs = uart2_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(uart2_mpu_irqs),
- .sdma_reqs = uart2_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(uart2_sdma_reqs),
+ .mpu_irqs = omap2_uart2_mpu_irqs,
+ .sdma_reqs = omap2_uart2_sdma_reqs,
.main_clk = "uart2_fck",
.prcm = {
.omap2 = {
@@ -1379,31 +1238,20 @@ static struct omap_hwmod omap3xxx_uart2_hwmod = {
},
.slaves = omap3xxx_uart2_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_uart2_slaves),
- .class = &uart_class,
+ .class = &omap2_uart_class,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
};
/* UART3 */
-static struct omap_hwmod_irq_info uart3_mpu_irqs[] = {
- { .irq = INT_24XX_UART3_IRQ, },
-};
-
-static struct omap_hwmod_dma_info uart3_sdma_reqs[] = {
- { .name = "tx", .dma_req = OMAP24XX_DMA_UART3_TX, },
- { .name = "rx", .dma_req = OMAP24XX_DMA_UART3_RX, },
-};
-
static struct omap_hwmod_ocp_if *omap3xxx_uart3_slaves[] = {
&omap3_l4_per__uart3,
};
static struct omap_hwmod omap3xxx_uart3_hwmod = {
.name = "uart3",
- .mpu_irqs = uart3_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(uart3_mpu_irqs),
- .sdma_reqs = uart3_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(uart3_sdma_reqs),
+ .mpu_irqs = omap2_uart3_mpu_irqs,
+ .sdma_reqs = omap2_uart3_sdma_reqs,
.main_clk = "uart3_fck",
.prcm = {
.omap2 = {
@@ -1416,7 +1264,7 @@ static struct omap_hwmod omap3xxx_uart3_hwmod = {
},
.slaves = omap3xxx_uart3_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_uart3_slaves),
- .class = &uart_class,
+ .class = &omap2_uart_class,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
};
@@ -1424,11 +1272,13 @@ static struct omap_hwmod omap3xxx_uart3_hwmod = {
static struct omap_hwmod_irq_info uart4_mpu_irqs[] = {
{ .irq = INT_36XX_UART4_IRQ, },
+ { .irq = -1 }
};
static struct omap_hwmod_dma_info uart4_sdma_reqs[] = {
{ .name = "rx", .dma_req = OMAP36XX_DMA_UART4_RX, },
{ .name = "tx", .dma_req = OMAP36XX_DMA_UART4_TX, },
+ { .dma_req = -1 }
};
static struct omap_hwmod_ocp_if *omap3xxx_uart4_slaves[] = {
@@ -1438,9 +1288,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_uart4_slaves[] = {
static struct omap_hwmod omap3xxx_uart4_hwmod = {
.name = "uart4",
.mpu_irqs = uart4_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(uart4_mpu_irqs),
.sdma_reqs = uart4_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(uart4_sdma_reqs),
.main_clk = "uart4_fck",
.prcm = {
.omap2 = {
@@ -1453,7 +1301,7 @@ static struct omap_hwmod omap3xxx_uart4_hwmod = {
},
.slaves = omap3xxx_uart4_slaves,
.slaves_cnt = ARRAY_SIZE(omap3xxx_uart4_slaves),
- .class = &uart_class,
+ .class = &omap2_uart_class,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3630ES1),
};
@@ -1462,27 +1310,10 @@ static struct omap_hwmod_class i2c_class = {
.sysc = &i2c_sysc,
};
-/*
- * 'dss' class
- * display sub-system
- */
-
-static struct omap_hwmod_class_sysconfig omap3xxx_dss_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .syss_offs = 0x0014,
- .sysc_flags = (SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap3xxx_dss_hwmod_class = {
- .name = "dss",
- .sysc = &omap3xxx_dss_sysc,
-};
-
static struct omap_hwmod_dma_info omap3xxx_dss_sdma_chs[] = {
{ .name = "dispc", .dma_req = 5 },
{ .name = "dsi1", .dma_req = 74 },
+ { .dma_req = -1 }
};
/* dss */
@@ -1491,21 +1322,12 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_masters[] = {
&omap3xxx_dss__l3,
};
-static struct omap_hwmod_addr_space omap3xxx_dss_addrs[] = {
- {
- .pa_start = 0x48050000,
- .pa_end = 0x480503FF,
- .flags = ADDR_TYPE_RT
- },
-};
-
/* l4_core -> dss */
static struct omap_hwmod_ocp_if omap3430es1_l4_core__dss = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap3430es1_dss_core_hwmod,
.clk = "dss_ick",
- .addr = omap3xxx_dss_addrs,
- .addr_cnt = ARRAY_SIZE(omap3xxx_dss_addrs),
+ .addr = omap2_dss_addrs,
.fw = {
.omap2 = {
.l4_fw_region = OMAP3ES1_L4_CORE_FW_DSS_CORE_REGION,
@@ -1520,8 +1342,7 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap3xxx_dss_core_hwmod,
.clk = "dss_ick",
- .addr = omap3xxx_dss_addrs,
- .addr_cnt = ARRAY_SIZE(omap3xxx_dss_addrs),
+ .addr = omap2_dss_addrs,
.fw = {
.omap2 = {
.l4_fw_region = OMAP3_L4_CORE_FW_DSS_CORE_REGION,
@@ -1549,11 +1370,9 @@ static struct omap_hwmod_opt_clk dss_opt_clks[] = {
static struct omap_hwmod omap3430es1_dss_core_hwmod = {
.name = "dss_core",
- .class = &omap3xxx_dss_hwmod_class,
+ .class = &omap2_dss_hwmod_class,
.main_clk = "dss1_alwon_fck", /* instead of dss_fck */
.sdma_reqs = omap3xxx_dss_sdma_chs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap3xxx_dss_sdma_chs),
-
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
@@ -1575,11 +1394,9 @@ static struct omap_hwmod omap3430es1_dss_core_hwmod = {
static struct omap_hwmod omap3xxx_dss_core_hwmod = {
.name = "dss_core",
- .class = &omap3xxx_dss_hwmod_class,
+ .class = &omap2_dss_hwmod_class,
.main_clk = "dss1_alwon_fck", /* instead of dss_fck */
.sdma_reqs = omap3xxx_dss_sdma_chs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap3xxx_dss_sdma_chs),
-
.prcm = {
.omap2 = {
.prcm_reg_id = 1,
@@ -1600,47 +1417,12 @@ static struct omap_hwmod omap3xxx_dss_core_hwmod = {
CHIP_IS_OMAP3630ES1 | CHIP_GE_OMAP3630ES1_1),
};
-/*
- * 'dispc' class
- * display controller
- */
-
-static struct omap_hwmod_class_sysconfig omap3xxx_dispc_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .syss_offs = 0x0014,
- .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_CLOCKACTIVITY |
- SYSC_HAS_MIDLEMODE | SYSC_HAS_ENAWAKEUP |
- SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap3xxx_dispc_hwmod_class = {
- .name = "dispc",
- .sysc = &omap3xxx_dispc_sysc,
-};
-
-static struct omap_hwmod_irq_info omap3xxx_dispc_irqs[] = {
- { .irq = 25 },
-};
-
-static struct omap_hwmod_addr_space omap3xxx_dss_dispc_addrs[] = {
- {
- .pa_start = 0x48050400,
- .pa_end = 0x480507FF,
- .flags = ADDR_TYPE_RT
- },
-};
-
/* l4_core -> dss_dispc */
static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_dispc = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap3xxx_dss_dispc_hwmod,
.clk = "dss_ick",
- .addr = omap3xxx_dss_dispc_addrs,
- .addr_cnt = ARRAY_SIZE(omap3xxx_dss_dispc_addrs),
+ .addr = omap2_dss_dispc_addrs,
.fw = {
.omap2 = {
.l4_fw_region = OMAP3_L4_CORE_FW_DSS_DISPC_REGION,
@@ -1658,9 +1440,8 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_dispc_slaves[] = {
static struct omap_hwmod omap3xxx_dss_dispc_hwmod = {
.name = "dss_dispc",
- .class = &omap3xxx_dispc_hwmod_class,
- .mpu_irqs = omap3xxx_dispc_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_dispc_irqs),
+ .class = &omap2_dispc_hwmod_class,
+ .mpu_irqs = omap2_dispc_irqs,
.main_clk = "dss1_alwon_fck",
.prcm = {
.omap2 = {
@@ -1688,6 +1469,7 @@ static struct omap_hwmod_class omap3xxx_dsi_hwmod_class = {
static struct omap_hwmod_irq_info omap3xxx_dsi1_irqs[] = {
{ .irq = 25 },
+ { .irq = -1 }
};
/* dss_dsi1 */
@@ -1697,6 +1479,7 @@ static struct omap_hwmod_addr_space omap3xxx_dss_dsi1_addrs[] = {
.pa_end = 0x4804FFFF,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_core -> dss_dsi1 */
@@ -1704,7 +1487,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_dsi1 = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap3xxx_dss_dsi1_hwmod,
.addr = omap3xxx_dss_dsi1_addrs,
- .addr_cnt = ARRAY_SIZE(omap3xxx_dss_dsi1_addrs),
.fw = {
.omap2 = {
.l4_fw_region = OMAP3_L4_CORE_FW_DSS_DSI_REGION,
@@ -1724,7 +1506,6 @@ static struct omap_hwmod omap3xxx_dss_dsi1_hwmod = {
.name = "dss_dsi1",
.class = &omap3xxx_dsi_hwmod_class,
.mpu_irqs = omap3xxx_dsi1_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_dsi1_irqs),
.main_clk = "dss1_alwon_fck",
.prcm = {
.omap2 = {
@@ -1741,41 +1522,12 @@ static struct omap_hwmod omap3xxx_dss_dsi1_hwmod = {
.flags = HWMOD_NO_IDLEST,
};
-/*
- * 'rfbi' class
- * remote frame buffer interface
- */
-
-static struct omap_hwmod_class_sysconfig omap3xxx_rfbi_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .syss_offs = 0x0014,
- .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
- SYSC_HAS_AUTOIDLE),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap3xxx_rfbi_hwmod_class = {
- .name = "rfbi",
- .sysc = &omap3xxx_rfbi_sysc,
-};
-
-static struct omap_hwmod_addr_space omap3xxx_dss_rfbi_addrs[] = {
- {
- .pa_start = 0x48050800,
- .pa_end = 0x48050BFF,
- .flags = ADDR_TYPE_RT
- },
-};
-
/* l4_core -> dss_rfbi */
static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_rfbi = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap3xxx_dss_rfbi_hwmod,
.clk = "dss_ick",
- .addr = omap3xxx_dss_rfbi_addrs,
- .addr_cnt = ARRAY_SIZE(omap3xxx_dss_rfbi_addrs),
+ .addr = omap2_dss_rfbi_addrs,
.fw = {
.omap2 = {
.l4_fw_region = OMAP3_L4_CORE_FW_DSS_RFBI_REGION,
@@ -1793,7 +1545,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_rfbi_slaves[] = {
static struct omap_hwmod omap3xxx_dss_rfbi_hwmod = {
.name = "dss_rfbi",
- .class = &omap3xxx_rfbi_hwmod_class,
+ .class = &omap2_rfbi_hwmod_class,
.main_clk = "dss1_alwon_fck",
.prcm = {
.omap2 = {
@@ -1810,31 +1562,12 @@ static struct omap_hwmod omap3xxx_dss_rfbi_hwmod = {
.flags = HWMOD_NO_IDLEST,
};
-/*
- * 'venc' class
- * video encoder
- */
-
-static struct omap_hwmod_class omap3xxx_venc_hwmod_class = {
- .name = "venc",
-};
-
-/* dss_venc */
-static struct omap_hwmod_addr_space omap3xxx_dss_venc_addrs[] = {
- {
- .pa_start = 0x48050C00,
- .pa_end = 0x48050FFF,
- .flags = ADDR_TYPE_RT
- },
-};
-
/* l4_core -> dss_venc */
static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_venc = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap3xxx_dss_venc_hwmod,
.clk = "dss_tv_fck",
- .addr = omap3xxx_dss_venc_addrs,
- .addr_cnt = ARRAY_SIZE(omap3xxx_dss_venc_addrs),
+ .addr = omap2_dss_venc_addrs,
.fw = {
.omap2 = {
.l4_fw_region = OMAP3_L4_CORE_FW_DSS_VENC_REGION,
@@ -1853,7 +1586,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_venc_slaves[] = {
static struct omap_hwmod omap3xxx_dss_venc_hwmod = {
.name = "dss_venc",
- .class = &omap3xxx_venc_hwmod_class,
+ .class = &omap2_venc_hwmod_class,
.main_clk = "dss1_alwon_fck",
.prcm = {
.omap2 = {
@@ -1876,25 +1609,14 @@ static struct omap_i2c_dev_attr i2c1_dev_attr = {
.fifo_depth = 8, /* bytes */
};
-static struct omap_hwmod_irq_info i2c1_mpu_irqs[] = {
- { .irq = INT_24XX_I2C1_IRQ, },
-};
-
-static struct omap_hwmod_dma_info i2c1_sdma_reqs[] = {
- { .name = "tx", .dma_req = OMAP24XX_DMA_I2C1_TX },
- { .name = "rx", .dma_req = OMAP24XX_DMA_I2C1_RX },
-};
-
static struct omap_hwmod_ocp_if *omap3xxx_i2c1_slaves[] = {
&omap3_l4_core__i2c1,
};
static struct omap_hwmod omap3xxx_i2c1_hwmod = {
.name = "i2c1",
- .mpu_irqs = i2c1_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(i2c1_mpu_irqs),
- .sdma_reqs = i2c1_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(i2c1_sdma_reqs),
+ .mpu_irqs = omap2_i2c1_mpu_irqs,
+ .sdma_reqs = omap2_i2c1_sdma_reqs,
.main_clk = "i2c1_fck",
.prcm = {
.omap2 = {
@@ -1918,25 +1640,14 @@ static struct omap_i2c_dev_attr i2c2_dev_attr = {
.fifo_depth = 8, /* bytes */
};
-static struct omap_hwmod_irq_info i2c2_mpu_irqs[] = {
- { .irq = INT_24XX_I2C2_IRQ, },
-};
-
-static struct omap_hwmod_dma_info i2c2_sdma_reqs[] = {
- { .name = "tx", .dma_req = OMAP24XX_DMA_I2C2_TX },
- { .name = "rx", .dma_req = OMAP24XX_DMA_I2C2_RX },
-};
-
static struct omap_hwmod_ocp_if *omap3xxx_i2c2_slaves[] = {
&omap3_l4_core__i2c2,
};
static struct omap_hwmod omap3xxx_i2c2_hwmod = {
.name = "i2c2",
- .mpu_irqs = i2c2_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(i2c2_mpu_irqs),
- .sdma_reqs = i2c2_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(i2c2_sdma_reqs),
+ .mpu_irqs = omap2_i2c2_mpu_irqs,
+ .sdma_reqs = omap2_i2c2_sdma_reqs,
.main_clk = "i2c2_fck",
.prcm = {
.omap2 = {
@@ -1962,11 +1673,13 @@ static struct omap_i2c_dev_attr i2c3_dev_attr = {
static struct omap_hwmod_irq_info i2c3_mpu_irqs[] = {
{ .irq = INT_34XX_I2C3_IRQ, },
+ { .irq = -1 }
};
static struct omap_hwmod_dma_info i2c3_sdma_reqs[] = {
{ .name = "tx", .dma_req = OMAP34XX_DMA_I2C3_TX },
{ .name = "rx", .dma_req = OMAP34XX_DMA_I2C3_RX },
+ { .dma_req = -1 }
};
static struct omap_hwmod_ocp_if *omap3xxx_i2c3_slaves[] = {
@@ -1976,9 +1689,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_i2c3_slaves[] = {
static struct omap_hwmod omap3xxx_i2c3_hwmod = {
.name = "i2c3",
.mpu_irqs = i2c3_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(i2c3_mpu_irqs),
.sdma_reqs = i2c3_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(i2c3_sdma_reqs),
.main_clk = "i2c3_fck",
.prcm = {
.omap2 = {
@@ -2003,13 +1714,13 @@ static struct omap_hwmod_addr_space omap3xxx_gpio1_addrs[] = {
.pa_end = 0x483101ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
static struct omap_hwmod_ocp_if omap3xxx_l4_wkup__gpio1 = {
.master = &omap3xxx_l4_wkup_hwmod,
.slave = &omap3xxx_gpio1_hwmod,
.addr = omap3xxx_gpio1_addrs,
- .addr_cnt = ARRAY_SIZE(omap3xxx_gpio1_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -2020,13 +1731,13 @@ static struct omap_hwmod_addr_space omap3xxx_gpio2_addrs[] = {
.pa_end = 0x490501ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
static struct omap_hwmod_ocp_if omap3xxx_l4_per__gpio2 = {
.master = &omap3xxx_l4_per_hwmod,
.slave = &omap3xxx_gpio2_hwmod,
.addr = omap3xxx_gpio2_addrs,
- .addr_cnt = ARRAY_SIZE(omap3xxx_gpio2_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -2037,13 +1748,13 @@ static struct omap_hwmod_addr_space omap3xxx_gpio3_addrs[] = {
.pa_end = 0x490521ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
static struct omap_hwmod_ocp_if omap3xxx_l4_per__gpio3 = {
.master = &omap3xxx_l4_per_hwmod,
.slave = &omap3xxx_gpio3_hwmod,
.addr = omap3xxx_gpio3_addrs,
- .addr_cnt = ARRAY_SIZE(omap3xxx_gpio3_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -2054,13 +1765,13 @@ static struct omap_hwmod_addr_space omap3xxx_gpio4_addrs[] = {
.pa_end = 0x490541ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
static struct omap_hwmod_ocp_if omap3xxx_l4_per__gpio4 = {
.master = &omap3xxx_l4_per_hwmod,
.slave = &omap3xxx_gpio4_hwmod,
.addr = omap3xxx_gpio4_addrs,
- .addr_cnt = ARRAY_SIZE(omap3xxx_gpio4_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -2071,13 +1782,13 @@ static struct omap_hwmod_addr_space omap3xxx_gpio5_addrs[] = {
.pa_end = 0x490561ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
static struct omap_hwmod_ocp_if omap3xxx_l4_per__gpio5 = {
.master = &omap3xxx_l4_per_hwmod,
.slave = &omap3xxx_gpio5_hwmod,
.addr = omap3xxx_gpio5_addrs,
- .addr_cnt = ARRAY_SIZE(omap3xxx_gpio5_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -2088,13 +1799,13 @@ static struct omap_hwmod_addr_space omap3xxx_gpio6_addrs[] = {
.pa_end = 0x490581ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
static struct omap_hwmod_ocp_if omap3xxx_l4_per__gpio6 = {
.master = &omap3xxx_l4_per_hwmod,
.slave = &omap3xxx_gpio6_hwmod,
.addr = omap3xxx_gpio6_addrs,
- .addr_cnt = ARRAY_SIZE(omap3xxx_gpio6_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -2127,10 +1838,6 @@ static struct omap_gpio_dev_attr gpio_dev_attr = {
};
/* gpio1 */
-static struct omap_hwmod_irq_info omap3xxx_gpio1_irqs[] = {
- { .irq = 29 }, /* INT_34XX_GPIO_BANK1 */
-};
-
static struct omap_hwmod_opt_clk gpio1_opt_clks[] = {
{ .role = "dbclk", .clk = "gpio1_dbck", },
};
@@ -2142,8 +1849,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_gpio1_slaves[] = {
static struct omap_hwmod omap3xxx_gpio1_hwmod = {
.name = "gpio1",
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
- .mpu_irqs = omap3xxx_gpio1_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_gpio1_irqs),
+ .mpu_irqs = omap2_gpio1_irqs,
.main_clk = "gpio1_ick",
.opt_clks = gpio1_opt_clks,
.opt_clks_cnt = ARRAY_SIZE(gpio1_opt_clks),
@@ -2164,10 +1870,6 @@ static struct omap_hwmod omap3xxx_gpio1_hwmod = {
};
/* gpio2 */
-static struct omap_hwmod_irq_info omap3xxx_gpio2_irqs[] = {
- { .irq = 30 }, /* INT_34XX_GPIO_BANK2 */
-};
-
static struct omap_hwmod_opt_clk gpio2_opt_clks[] = {
{ .role = "dbclk", .clk = "gpio2_dbck", },
};
@@ -2179,8 +1881,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_gpio2_slaves[] = {
static struct omap_hwmod omap3xxx_gpio2_hwmod = {
.name = "gpio2",
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
- .mpu_irqs = omap3xxx_gpio2_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_gpio2_irqs),
+ .mpu_irqs = omap2_gpio2_irqs,
.main_clk = "gpio2_ick",
.opt_clks = gpio2_opt_clks,
.opt_clks_cnt = ARRAY_SIZE(gpio2_opt_clks),
@@ -2201,10 +1902,6 @@ static struct omap_hwmod omap3xxx_gpio2_hwmod = {
};
/* gpio3 */
-static struct omap_hwmod_irq_info omap3xxx_gpio3_irqs[] = {
- { .irq = 31 }, /* INT_34XX_GPIO_BANK3 */
-};
-
static struct omap_hwmod_opt_clk gpio3_opt_clks[] = {
{ .role = "dbclk", .clk = "gpio3_dbck", },
};
@@ -2216,8 +1913,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_gpio3_slaves[] = {
static struct omap_hwmod omap3xxx_gpio3_hwmod = {
.name = "gpio3",
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
- .mpu_irqs = omap3xxx_gpio3_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_gpio3_irqs),
+ .mpu_irqs = omap2_gpio3_irqs,
.main_clk = "gpio3_ick",
.opt_clks = gpio3_opt_clks,
.opt_clks_cnt = ARRAY_SIZE(gpio3_opt_clks),
@@ -2238,10 +1934,6 @@ static struct omap_hwmod omap3xxx_gpio3_hwmod = {
};
/* gpio4 */
-static struct omap_hwmod_irq_info omap3xxx_gpio4_irqs[] = {
- { .irq = 32 }, /* INT_34XX_GPIO_BANK4 */
-};
-
static struct omap_hwmod_opt_clk gpio4_opt_clks[] = {
{ .role = "dbclk", .clk = "gpio4_dbck", },
};
@@ -2253,8 +1945,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_gpio4_slaves[] = {
static struct omap_hwmod omap3xxx_gpio4_hwmod = {
.name = "gpio4",
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
- .mpu_irqs = omap3xxx_gpio4_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_gpio4_irqs),
+ .mpu_irqs = omap2_gpio4_irqs,
.main_clk = "gpio4_ick",
.opt_clks = gpio4_opt_clks,
.opt_clks_cnt = ARRAY_SIZE(gpio4_opt_clks),
@@ -2277,6 +1968,7 @@ static struct omap_hwmod omap3xxx_gpio4_hwmod = {
/* gpio5 */
static struct omap_hwmod_irq_info omap3xxx_gpio5_irqs[] = {
{ .irq = 33 }, /* INT_34XX_GPIO_BANK5 */
+ { .irq = -1 }
};
static struct omap_hwmod_opt_clk gpio5_opt_clks[] = {
@@ -2291,7 +1983,6 @@ static struct omap_hwmod omap3xxx_gpio5_hwmod = {
.name = "gpio5",
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
.mpu_irqs = omap3xxx_gpio5_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_gpio5_irqs),
.main_clk = "gpio5_ick",
.opt_clks = gpio5_opt_clks,
.opt_clks_cnt = ARRAY_SIZE(gpio5_opt_clks),
@@ -2314,6 +2005,7 @@ static struct omap_hwmod omap3xxx_gpio5_hwmod = {
/* gpio6 */
static struct omap_hwmod_irq_info omap3xxx_gpio6_irqs[] = {
{ .irq = 34 }, /* INT_34XX_GPIO_BANK6 */
+ { .irq = -1 }
};
static struct omap_hwmod_opt_clk gpio6_opt_clks[] = {
@@ -2328,7 +2020,6 @@ static struct omap_hwmod omap3xxx_gpio6_hwmod = {
.name = "gpio6",
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
.mpu_irqs = omap3xxx_gpio6_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_gpio6_irqs),
.main_clk = "gpio6_ick",
.opt_clks = gpio6_opt_clks,
.opt_clks_cnt = ARRAY_SIZE(gpio6_opt_clks),
@@ -2382,19 +2073,13 @@ static struct omap_hwmod_class omap3xxx_dma_hwmod_class = {
};
/* dma_system */
-static struct omap_hwmod_irq_info omap3xxx_dma_system_irqs[] = {
- { .name = "0", .irq = 12 }, /* INT_24XX_SDMA_IRQ0 */
- { .name = "1", .irq = 13 }, /* INT_24XX_SDMA_IRQ1 */
- { .name = "2", .irq = 14 }, /* INT_24XX_SDMA_IRQ2 */
- { .name = "3", .irq = 15 }, /* INT_24XX_SDMA_IRQ3 */
-};
-
static struct omap_hwmod_addr_space omap3xxx_dma_system_addrs[] = {
{
.pa_start = 0x48056000,
.pa_end = 0x48056fff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* dma_system master ports */
@@ -2408,7 +2093,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_core__dma_system = {
.slave = &omap3xxx_dma_system_hwmod,
.clk = "core_l4_ick",
.addr = omap3xxx_dma_system_addrs,
- .addr_cnt = ARRAY_SIZE(omap3xxx_dma_system_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -2420,8 +2104,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_dma_system_slaves[] = {
static struct omap_hwmod omap3xxx_dma_system_hwmod = {
.name = "dma",
.class = &omap3xxx_dma_hwmod_class,
- .mpu_irqs = omap3xxx_dma_system_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_dma_system_irqs),
+ .mpu_irqs = omap2_dma_system_irqs,
.main_clk = "core_l3_ick",
.prcm = {
.omap2 = {
@@ -2466,11 +2149,7 @@ static struct omap_hwmod_irq_info omap3xxx_mcbsp1_irqs[] = {
{ .name = "irq", .irq = 16 },
{ .name = "tx", .irq = 59 },
{ .name = "rx", .irq = 60 },
-};
-
-static struct omap_hwmod_dma_info omap3xxx_mcbsp1_sdma_chs[] = {
- { .name = "rx", .dma_req = 32 },
- { .name = "tx", .dma_req = 31 },
+ { .irq = -1 }
};
static struct omap_hwmod_addr_space omap3xxx_mcbsp1_addrs[] = {
@@ -2480,6 +2159,7 @@ static struct omap_hwmod_addr_space omap3xxx_mcbsp1_addrs[] = {
.pa_end = 0x480740ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_core -> mcbsp1 */
@@ -2488,7 +2168,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_core__mcbsp1 = {
.slave = &omap3xxx_mcbsp1_hwmod,
.clk = "mcbsp1_ick",
.addr = omap3xxx_mcbsp1_addrs,
- .addr_cnt = ARRAY_SIZE(omap3xxx_mcbsp1_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -2501,9 +2180,7 @@ static struct omap_hwmod omap3xxx_mcbsp1_hwmod = {
.name = "mcbsp1",
.class = &omap3xxx_mcbsp_hwmod_class,
.mpu_irqs = omap3xxx_mcbsp1_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_mcbsp1_irqs),
- .sdma_reqs = omap3xxx_mcbsp1_sdma_chs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap3xxx_mcbsp1_sdma_chs),
+ .sdma_reqs = omap2_mcbsp1_sdma_reqs,
.main_clk = "mcbsp1_fck",
.prcm = {
.omap2 = {
@@ -2524,11 +2201,7 @@ static struct omap_hwmod_irq_info omap3xxx_mcbsp2_irqs[] = {
{ .name = "irq", .irq = 17 },
{ .name = "tx", .irq = 62 },
{ .name = "rx", .irq = 63 },
-};
-
-static struct omap_hwmod_dma_info omap3xxx_mcbsp2_sdma_chs[] = {
- { .name = "rx", .dma_req = 34 },
- { .name = "tx", .dma_req = 33 },
+ { .irq = -1 }
};
static struct omap_hwmod_addr_space omap3xxx_mcbsp2_addrs[] = {
@@ -2538,6 +2211,7 @@ static struct omap_hwmod_addr_space omap3xxx_mcbsp2_addrs[] = {
.pa_end = 0x490220ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> mcbsp2 */
@@ -2546,7 +2220,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_per__mcbsp2 = {
.slave = &omap3xxx_mcbsp2_hwmod,
.clk = "mcbsp2_ick",
.addr = omap3xxx_mcbsp2_addrs,
- .addr_cnt = ARRAY_SIZE(omap3xxx_mcbsp2_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -2563,9 +2236,7 @@ static struct omap_hwmod omap3xxx_mcbsp2_hwmod = {
.name = "mcbsp2",
.class = &omap3xxx_mcbsp_hwmod_class,
.mpu_irqs = omap3xxx_mcbsp2_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_mcbsp2_irqs),
- .sdma_reqs = omap3xxx_mcbsp2_sdma_chs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap3xxx_mcbsp2_sdma_chs),
+ .sdma_reqs = omap2_mcbsp2_sdma_reqs,
.main_clk = "mcbsp2_fck",
.prcm = {
.omap2 = {
@@ -2587,11 +2258,7 @@ static struct omap_hwmod_irq_info omap3xxx_mcbsp3_irqs[] = {
{ .name = "irq", .irq = 22 },
{ .name = "tx", .irq = 89 },
{ .name = "rx", .irq = 90 },
-};
-
-static struct omap_hwmod_dma_info omap3xxx_mcbsp3_sdma_chs[] = {
- { .name = "rx", .dma_req = 18 },
- { .name = "tx", .dma_req = 17 },
+ { .irq = -1 }
};
static struct omap_hwmod_addr_space omap3xxx_mcbsp3_addrs[] = {
@@ -2601,6 +2268,7 @@ static struct omap_hwmod_addr_space omap3xxx_mcbsp3_addrs[] = {
.pa_end = 0x490240ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> mcbsp3 */
@@ -2609,7 +2277,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_per__mcbsp3 = {
.slave = &omap3xxx_mcbsp3_hwmod,
.clk = "mcbsp3_ick",
.addr = omap3xxx_mcbsp3_addrs,
- .addr_cnt = ARRAY_SIZE(omap3xxx_mcbsp3_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -2626,9 +2293,7 @@ static struct omap_hwmod omap3xxx_mcbsp3_hwmod = {
.name = "mcbsp3",
.class = &omap3xxx_mcbsp_hwmod_class,
.mpu_irqs = omap3xxx_mcbsp3_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_mcbsp3_irqs),
- .sdma_reqs = omap3xxx_mcbsp3_sdma_chs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap3xxx_mcbsp3_sdma_chs),
+ .sdma_reqs = omap2_mcbsp3_sdma_reqs,
.main_clk = "mcbsp3_fck",
.prcm = {
.omap2 = {
@@ -2650,11 +2315,13 @@ static struct omap_hwmod_irq_info omap3xxx_mcbsp4_irqs[] = {
{ .name = "irq", .irq = 23 },
{ .name = "tx", .irq = 54 },
{ .name = "rx", .irq = 55 },
+ { .irq = -1 }
};
static struct omap_hwmod_dma_info omap3xxx_mcbsp4_sdma_chs[] = {
{ .name = "rx", .dma_req = 20 },
{ .name = "tx", .dma_req = 19 },
+ { .dma_req = -1 }
};
static struct omap_hwmod_addr_space omap3xxx_mcbsp4_addrs[] = {
@@ -2664,6 +2331,7 @@ static struct omap_hwmod_addr_space omap3xxx_mcbsp4_addrs[] = {
.pa_end = 0x490260ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> mcbsp4 */
@@ -2672,7 +2340,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_per__mcbsp4 = {
.slave = &omap3xxx_mcbsp4_hwmod,
.clk = "mcbsp4_ick",
.addr = omap3xxx_mcbsp4_addrs,
- .addr_cnt = ARRAY_SIZE(omap3xxx_mcbsp4_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -2685,9 +2352,7 @@ static struct omap_hwmod omap3xxx_mcbsp4_hwmod = {
.name = "mcbsp4",
.class = &omap3xxx_mcbsp_hwmod_class,
.mpu_irqs = omap3xxx_mcbsp4_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_mcbsp4_irqs),
.sdma_reqs = omap3xxx_mcbsp4_sdma_chs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap3xxx_mcbsp4_sdma_chs),
.main_clk = "mcbsp4_fck",
.prcm = {
.omap2 = {
@@ -2708,11 +2373,13 @@ static struct omap_hwmod_irq_info omap3xxx_mcbsp5_irqs[] = {
{ .name = "irq", .irq = 27 },
{ .name = "tx", .irq = 81 },
{ .name = "rx", .irq = 82 },
+ { .irq = -1 }
};
static struct omap_hwmod_dma_info omap3xxx_mcbsp5_sdma_chs[] = {
{ .name = "rx", .dma_req = 22 },
{ .name = "tx", .dma_req = 21 },
+ { .dma_req = -1 }
};
static struct omap_hwmod_addr_space omap3xxx_mcbsp5_addrs[] = {
@@ -2722,6 +2389,7 @@ static struct omap_hwmod_addr_space omap3xxx_mcbsp5_addrs[] = {
.pa_end = 0x480960ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_core -> mcbsp5 */
@@ -2730,7 +2398,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_core__mcbsp5 = {
.slave = &omap3xxx_mcbsp5_hwmod,
.clk = "mcbsp5_ick",
.addr = omap3xxx_mcbsp5_addrs,
- .addr_cnt = ARRAY_SIZE(omap3xxx_mcbsp5_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -2743,9 +2410,7 @@ static struct omap_hwmod omap3xxx_mcbsp5_hwmod = {
.name = "mcbsp5",
.class = &omap3xxx_mcbsp_hwmod_class,
.mpu_irqs = omap3xxx_mcbsp5_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_mcbsp5_irqs),
.sdma_reqs = omap3xxx_mcbsp5_sdma_chs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap3xxx_mcbsp5_sdma_chs),
.main_clk = "mcbsp5_fck",
.prcm = {
.omap2 = {
@@ -2776,6 +2441,7 @@ static struct omap_hwmod_class omap3xxx_mcbsp_sidetone_hwmod_class = {
/* mcbsp2_sidetone */
static struct omap_hwmod_irq_info omap3xxx_mcbsp2_sidetone_irqs[] = {
{ .name = "irq", .irq = 4 },
+ { .irq = -1 }
};
static struct omap_hwmod_addr_space omap3xxx_mcbsp2_sidetone_addrs[] = {
@@ -2785,6 +2451,7 @@ static struct omap_hwmod_addr_space omap3xxx_mcbsp2_sidetone_addrs[] = {
.pa_end = 0x490280ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> mcbsp2_sidetone */
@@ -2793,7 +2460,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_per__mcbsp2_sidetone = {
.slave = &omap3xxx_mcbsp2_sidetone_hwmod,
.clk = "mcbsp2_ick",
.addr = omap3xxx_mcbsp2_sidetone_addrs,
- .addr_cnt = ARRAY_SIZE(omap3xxx_mcbsp2_sidetone_addrs),
.user = OCP_USER_MPU,
};
@@ -2806,7 +2472,6 @@ static struct omap_hwmod omap3xxx_mcbsp2_sidetone_hwmod = {
.name = "mcbsp2_sidetone",
.class = &omap3xxx_mcbsp_sidetone_hwmod_class,
.mpu_irqs = omap3xxx_mcbsp2_sidetone_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_mcbsp2_sidetone_irqs),
.main_clk = "mcbsp2_fck",
.prcm = {
.omap2 = {
@@ -2825,6 +2490,7 @@ static struct omap_hwmod omap3xxx_mcbsp2_sidetone_hwmod = {
/* mcbsp3_sidetone */
static struct omap_hwmod_irq_info omap3xxx_mcbsp3_sidetone_irqs[] = {
{ .name = "irq", .irq = 5 },
+ { .irq = -1 }
};
static struct omap_hwmod_addr_space omap3xxx_mcbsp3_sidetone_addrs[] = {
@@ -2834,6 +2500,7 @@ static struct omap_hwmod_addr_space omap3xxx_mcbsp3_sidetone_addrs[] = {
.pa_end = 0x4902A0ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> mcbsp3_sidetone */
@@ -2842,7 +2509,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_per__mcbsp3_sidetone = {
.slave = &omap3xxx_mcbsp3_sidetone_hwmod,
.clk = "mcbsp3_ick",
.addr = omap3xxx_mcbsp3_sidetone_addrs,
- .addr_cnt = ARRAY_SIZE(omap3xxx_mcbsp3_sidetone_addrs),
.user = OCP_USER_MPU,
};
@@ -2855,7 +2521,6 @@ static struct omap_hwmod omap3xxx_mcbsp3_sidetone_hwmod = {
.name = "mcbsp3_sidetone",
.class = &omap3xxx_mcbsp_sidetone_hwmod_class,
.mpu_irqs = omap3xxx_mcbsp3_sidetone_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_mcbsp3_sidetone_irqs),
.main_clk = "mcbsp3_fck",
.prcm = {
.omap2 = {
@@ -3025,6 +2690,7 @@ static struct omap_hwmod_class omap3xxx_mailbox_hwmod_class = {
static struct omap_hwmod omap3xxx_mailbox_hwmod;
static struct omap_hwmod_irq_info omap3xxx_mailbox_irqs[] = {
{ .irq = 26 },
+ { .irq = -1 }
};
static struct omap_hwmod_addr_space omap3xxx_mailbox_addrs[] = {
@@ -3033,6 +2699,7 @@ static struct omap_hwmod_addr_space omap3xxx_mailbox_addrs[] = {
.pa_end = 0x480941ff,
.flags = ADDR_TYPE_RT,
},
+ { }
};
/* l4_core -> mailbox */
@@ -3040,7 +2707,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_core__mailbox = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap3xxx_mailbox_hwmod,
.addr = omap3xxx_mailbox_addrs,
- .addr_cnt = ARRAY_SIZE(omap3xxx_mailbox_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -3053,7 +2719,6 @@ static struct omap_hwmod omap3xxx_mailbox_hwmod = {
.name = "mailbox",
.class = &omap3xxx_mailbox_hwmod_class,
.mpu_irqs = omap3xxx_mailbox_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_mailbox_irqs),
.main_clk = "mailboxes_ick",
.prcm = {
.omap2 = {
@@ -3070,56 +2735,29 @@ static struct omap_hwmod omap3xxx_mailbox_hwmod = {
};
/* l4 core -> mcspi1 interface */
-static struct omap_hwmod_addr_space omap34xx_mcspi1_addr_space[] = {
- {
- .pa_start = 0x48098000,
- .pa_end = 0x480980ff,
- .flags = ADDR_TYPE_RT,
- },
-};
-
static struct omap_hwmod_ocp_if omap34xx_l4_core__mcspi1 = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap34xx_mcspi1,
.clk = "mcspi1_ick",
- .addr = omap34xx_mcspi1_addr_space,
- .addr_cnt = ARRAY_SIZE(omap34xx_mcspi1_addr_space),
+ .addr = omap2_mcspi1_addr_space,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* l4 core -> mcspi2 interface */
-static struct omap_hwmod_addr_space omap34xx_mcspi2_addr_space[] = {
- {
- .pa_start = 0x4809a000,
- .pa_end = 0x4809a0ff,
- .flags = ADDR_TYPE_RT,
- },
-};
-
static struct omap_hwmod_ocp_if omap34xx_l4_core__mcspi2 = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap34xx_mcspi2,
.clk = "mcspi2_ick",
- .addr = omap34xx_mcspi2_addr_space,
- .addr_cnt = ARRAY_SIZE(omap34xx_mcspi2_addr_space),
+ .addr = omap2_mcspi2_addr_space,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
/* l4 core -> mcspi3 interface */
-static struct omap_hwmod_addr_space omap34xx_mcspi3_addr_space[] = {
- {
- .pa_start = 0x480b8000,
- .pa_end = 0x480b80ff,
- .flags = ADDR_TYPE_RT,
- },
-};
-
static struct omap_hwmod_ocp_if omap34xx_l4_core__mcspi3 = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap34xx_mcspi3,
.clk = "mcspi3_ick",
- .addr = omap34xx_mcspi3_addr_space,
- .addr_cnt = ARRAY_SIZE(omap34xx_mcspi3_addr_space),
+ .addr = omap2430_mcspi3_addr_space,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -3130,6 +2768,7 @@ static struct omap_hwmod_addr_space omap34xx_mcspi4_addr_space[] = {
.pa_end = 0x480ba0ff,
.flags = ADDR_TYPE_RT,
},
+ { }
};
static struct omap_hwmod_ocp_if omap34xx_l4_core__mcspi4 = {
@@ -3137,7 +2776,6 @@ static struct omap_hwmod_ocp_if omap34xx_l4_core__mcspi4 = {
.slave = &omap34xx_mcspi4,
.clk = "mcspi4_ick",
.addr = omap34xx_mcspi4_addr_space,
- .addr_cnt = ARRAY_SIZE(omap34xx_mcspi4_addr_space),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -3165,21 +2803,6 @@ static struct omap_hwmod_class omap34xx_mcspi_class = {
};
/* mcspi1 */
-static struct omap_hwmod_irq_info omap34xx_mcspi1_mpu_irqs[] = {
- { .name = "irq", .irq = 65 },
-};
-
-static struct omap_hwmod_dma_info omap34xx_mcspi1_sdma_reqs[] = {
- { .name = "tx0", .dma_req = 35 },
- { .name = "rx0", .dma_req = 36 },
- { .name = "tx1", .dma_req = 37 },
- { .name = "rx1", .dma_req = 38 },
- { .name = "tx2", .dma_req = 39 },
- { .name = "rx2", .dma_req = 40 },
- { .name = "tx3", .dma_req = 41 },
- { .name = "rx3", .dma_req = 42 },
-};
-
static struct omap_hwmod_ocp_if *omap34xx_mcspi1_slaves[] = {
&omap34xx_l4_core__mcspi1,
};
@@ -3190,10 +2813,8 @@ static struct omap2_mcspi_dev_attr omap_mcspi1_dev_attr = {
static struct omap_hwmod omap34xx_mcspi1 = {
.name = "mcspi1",
- .mpu_irqs = omap34xx_mcspi1_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap34xx_mcspi1_mpu_irqs),
- .sdma_reqs = omap34xx_mcspi1_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap34xx_mcspi1_sdma_reqs),
+ .mpu_irqs = omap2_mcspi1_mpu_irqs,
+ .sdma_reqs = omap2_mcspi1_sdma_reqs,
.main_clk = "mcspi1_fck",
.prcm = {
.omap2 = {
@@ -3212,17 +2833,6 @@ static struct omap_hwmod omap34xx_mcspi1 = {
};
/* mcspi2 */
-static struct omap_hwmod_irq_info omap34xx_mcspi2_mpu_irqs[] = {
- { .name = "irq", .irq = 66 },
-};
-
-static struct omap_hwmod_dma_info omap34xx_mcspi2_sdma_reqs[] = {
- { .name = "tx0", .dma_req = 43 },
- { .name = "rx0", .dma_req = 44 },
- { .name = "tx1", .dma_req = 45 },
- { .name = "rx1", .dma_req = 46 },
-};
-
static struct omap_hwmod_ocp_if *omap34xx_mcspi2_slaves[] = {
&omap34xx_l4_core__mcspi2,
};
@@ -3233,10 +2843,8 @@ static struct omap2_mcspi_dev_attr omap_mcspi2_dev_attr = {
static struct omap_hwmod omap34xx_mcspi2 = {
.name = "mcspi2",
- .mpu_irqs = omap34xx_mcspi2_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap34xx_mcspi2_mpu_irqs),
- .sdma_reqs = omap34xx_mcspi2_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap34xx_mcspi2_sdma_reqs),
+ .mpu_irqs = omap2_mcspi2_mpu_irqs,
+ .sdma_reqs = omap2_mcspi2_sdma_reqs,
.main_clk = "mcspi2_fck",
.prcm = {
.omap2 = {
@@ -3257,6 +2865,7 @@ static struct omap_hwmod omap34xx_mcspi2 = {
/* mcspi3 */
static struct omap_hwmod_irq_info omap34xx_mcspi3_mpu_irqs[] = {
{ .name = "irq", .irq = 91 }, /* 91 */
+ { .irq = -1 }
};
static struct omap_hwmod_dma_info omap34xx_mcspi3_sdma_reqs[] = {
@@ -3264,6 +2873,7 @@ static struct omap_hwmod_dma_info omap34xx_mcspi3_sdma_reqs[] = {
{ .name = "rx0", .dma_req = 16 },
{ .name = "tx1", .dma_req = 23 },
{ .name = "rx1", .dma_req = 24 },
+ { .dma_req = -1 }
};
static struct omap_hwmod_ocp_if *omap34xx_mcspi3_slaves[] = {
@@ -3277,9 +2887,7 @@ static struct omap2_mcspi_dev_attr omap_mcspi3_dev_attr = {
static struct omap_hwmod omap34xx_mcspi3 = {
.name = "mcspi3",
.mpu_irqs = omap34xx_mcspi3_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap34xx_mcspi3_mpu_irqs),
.sdma_reqs = omap34xx_mcspi3_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap34xx_mcspi3_sdma_reqs),
.main_clk = "mcspi3_fck",
.prcm = {
.omap2 = {
@@ -3300,11 +2908,13 @@ static struct omap_hwmod omap34xx_mcspi3 = {
/* SPI4 */
static struct omap_hwmod_irq_info omap34xx_mcspi4_mpu_irqs[] = {
{ .name = "irq", .irq = INT_34XX_SPI4_IRQ }, /* 48 */
+ { .irq = -1 }
};
static struct omap_hwmod_dma_info omap34xx_mcspi4_sdma_reqs[] = {
{ .name = "tx0", .dma_req = 70 }, /* DMA_SPI4_TX0 */
{ .name = "rx0", .dma_req = 71 }, /* DMA_SPI4_RX0 */
+ { .dma_req = -1 }
};
static struct omap_hwmod_ocp_if *omap34xx_mcspi4_slaves[] = {
@@ -3318,9 +2928,7 @@ static struct omap2_mcspi_dev_attr omap_mcspi4_dev_attr = {
static struct omap_hwmod omap34xx_mcspi4 = {
.name = "mcspi4",
.mpu_irqs = omap34xx_mcspi4_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap34xx_mcspi4_mpu_irqs),
.sdma_reqs = omap34xx_mcspi4_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap34xx_mcspi4_sdma_reqs),
.main_clk = "mcspi4_fck",
.prcm = {
.omap2 = {
@@ -3362,12 +2970,12 @@ static struct omap_hwmod_irq_info omap3xxx_usbhsotg_mpu_irqs[] = {
{ .name = "mc", .irq = 92 },
{ .name = "dma", .irq = 93 },
+ { .irq = -1 }
};
static struct omap_hwmod omap3xxx_usbhsotg_hwmod = {
.name = "usb_otg_hs",
.mpu_irqs = omap3xxx_usbhsotg_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap3xxx_usbhsotg_mpu_irqs),
.main_clk = "hsotgusb_ick",
.prcm = {
.omap2 = {
@@ -3399,6 +3007,7 @@ static struct omap_hwmod omap3xxx_usbhsotg_hwmod = {
static struct omap_hwmod_irq_info am35xx_usbhsotg_mpu_irqs[] = {
{ .name = "mc", .irq = 71 },
+ { .irq = -1 }
};
static struct omap_hwmod_class am35xx_usbotg_class = {
@@ -3409,7 +3018,6 @@ static struct omap_hwmod_class am35xx_usbotg_class = {
static struct omap_hwmod am35xx_usbhsotg_hwmod = {
.name = "am35x_otg_hs",
.mpu_irqs = am35xx_usbhsotg_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(am35xx_usbhsotg_mpu_irqs),
.main_clk = NULL,
.prcm = {
.omap2 = {
@@ -3445,11 +3053,13 @@ static struct omap_hwmod_class omap34xx_mmc_class = {
static struct omap_hwmod_irq_info omap34xx_mmc1_mpu_irqs[] = {
{ .irq = 83, },
+ { .irq = -1 }
};
static struct omap_hwmod_dma_info omap34xx_mmc1_sdma_reqs[] = {
{ .name = "tx", .dma_req = 61, },
{ .name = "rx", .dma_req = 62, },
+ { .dma_req = -1 }
};
static struct omap_hwmod_opt_clk omap34xx_mmc1_opt_clks[] = {
@@ -3467,9 +3077,7 @@ static struct omap_mmc_dev_attr mmc1_dev_attr = {
static struct omap_hwmod omap3xxx_mmc1_hwmod = {
.name = "mmc1",
.mpu_irqs = omap34xx_mmc1_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap34xx_mmc1_mpu_irqs),
.sdma_reqs = omap34xx_mmc1_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap34xx_mmc1_sdma_reqs),
.opt_clks = omap34xx_mmc1_opt_clks,
.opt_clks_cnt = ARRAY_SIZE(omap34xx_mmc1_opt_clks),
.main_clk = "mmchs1_fck",
@@ -3493,11 +3101,13 @@ static struct omap_hwmod omap3xxx_mmc1_hwmod = {
static struct omap_hwmod_irq_info omap34xx_mmc2_mpu_irqs[] = {
{ .irq = INT_24XX_MMC2_IRQ, },
+ { .irq = -1 }
};
static struct omap_hwmod_dma_info omap34xx_mmc2_sdma_reqs[] = {
{ .name = "tx", .dma_req = 47, },
{ .name = "rx", .dma_req = 48, },
+ { .dma_req = -1 }
};
static struct omap_hwmod_opt_clk omap34xx_mmc2_opt_clks[] = {
@@ -3511,9 +3121,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_mmc2_slaves[] = {
static struct omap_hwmod omap3xxx_mmc2_hwmod = {
.name = "mmc2",
.mpu_irqs = omap34xx_mmc2_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap34xx_mmc2_mpu_irqs),
.sdma_reqs = omap34xx_mmc2_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap34xx_mmc2_sdma_reqs),
.opt_clks = omap34xx_mmc2_opt_clks,
.opt_clks_cnt = ARRAY_SIZE(omap34xx_mmc2_opt_clks),
.main_clk = "mmchs2_fck",
@@ -3536,11 +3144,13 @@ static struct omap_hwmod omap3xxx_mmc2_hwmod = {
static struct omap_hwmod_irq_info omap34xx_mmc3_mpu_irqs[] = {
{ .irq = 94, },
+ { .irq = -1 }
};
static struct omap_hwmod_dma_info omap34xx_mmc3_sdma_reqs[] = {
{ .name = "tx", .dma_req = 77, },
{ .name = "rx", .dma_req = 78, },
+ { .dma_req = -1 }
};
static struct omap_hwmod_opt_clk omap34xx_mmc3_opt_clks[] = {
@@ -3554,9 +3164,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_mmc3_slaves[] = {
static struct omap_hwmod omap3xxx_mmc3_hwmod = {
.name = "mmc3",
.mpu_irqs = omap34xx_mmc3_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap34xx_mmc3_mpu_irqs),
.sdma_reqs = omap34xx_mmc3_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap34xx_mmc3_sdma_reqs),
.opt_clks = omap34xx_mmc3_opt_clks,
.opt_clks_cnt = ARRAY_SIZE(omap34xx_mmc3_opt_clks),
.main_clk = "mmchs3_fck",
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index e1c69ffe0f69..e01143725b08 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -80,7 +80,12 @@ static struct omap_hwmod_class omap44xx_dmm_hwmod_class = {
.name = "dmm",
};
-/* dmm interface data */
+/* dmm */
+static struct omap_hwmod_irq_info omap44xx_dmm_irqs[] = {
+ { .irq = 113 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
+};
+
/* l3_main_1 -> dmm */
static struct omap_hwmod_ocp_if omap44xx_l3_main_1__dmm = {
.master = &omap44xx_l3_main_1_hwmod,
@@ -95,6 +100,7 @@ static struct omap_hwmod_addr_space omap44xx_dmm_addrs[] = {
.pa_end = 0x4e0007ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* mpu -> dmm */
@@ -103,7 +109,6 @@ static struct omap_hwmod_ocp_if omap44xx_mpu__dmm = {
.slave = &omap44xx_dmm_hwmod,
.clk = "l3_div_ck",
.addr = omap44xx_dmm_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_dmm_addrs),
.user = OCP_USER_MPU,
};
@@ -113,17 +118,12 @@ static struct omap_hwmod_ocp_if *omap44xx_dmm_slaves[] = {
&omap44xx_mpu__dmm,
};
-static struct omap_hwmod_irq_info omap44xx_dmm_irqs[] = {
- { .irq = 113 + OMAP44XX_IRQ_GIC_START },
-};
-
static struct omap_hwmod omap44xx_dmm_hwmod = {
.name = "dmm",
.class = &omap44xx_dmm_hwmod_class,
+ .mpu_irqs = omap44xx_dmm_irqs,
.slaves = omap44xx_dmm_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_dmm_slaves),
- .mpu_irqs = omap44xx_dmm_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_dmm_irqs),
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
};
@@ -135,7 +135,7 @@ static struct omap_hwmod_class omap44xx_emif_fw_hwmod_class = {
.name = "emif_fw",
};
-/* emif_fw interface data */
+/* emif_fw */
/* dmm -> emif_fw */
static struct omap_hwmod_ocp_if omap44xx_dmm__emif_fw = {
.master = &omap44xx_dmm_hwmod,
@@ -150,6 +150,7 @@ static struct omap_hwmod_addr_space omap44xx_emif_fw_addrs[] = {
.pa_end = 0x4a20c0ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_cfg -> emif_fw */
@@ -158,7 +159,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__emif_fw = {
.slave = &omap44xx_emif_fw_hwmod,
.clk = "l4_div_ck",
.addr = omap44xx_emif_fw_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_emif_fw_addrs),
.user = OCP_USER_MPU,
};
@@ -184,7 +184,7 @@ static struct omap_hwmod_class omap44xx_l3_hwmod_class = {
.name = "l3",
};
-/* l3_instr interface data */
+/* l3_instr */
/* iva -> l3_instr */
static struct omap_hwmod_ocp_if omap44xx_iva__l3_instr = {
.master = &omap44xx_iva_hwmod,
@@ -215,7 +215,13 @@ static struct omap_hwmod omap44xx_l3_instr_hwmod = {
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
};
-/* l3_main_1 interface data */
+/* l3_main_1 */
+static struct omap_hwmod_irq_info omap44xx_l3_main_1_irqs[] = {
+ { .name = "dbg_err", .irq = 9 + OMAP44XX_IRQ_GIC_START },
+ { .name = "app_err", .irq = 10 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
+};
+
/* dsp -> l3_main_1 */
static struct omap_hwmod_ocp_if omap44xx_dsp__l3_main_1 = {
.master = &omap44xx_dsp_hwmod,
@@ -264,18 +270,13 @@ static struct omap_hwmod_ocp_if omap44xx_mmc2__l3_main_1 = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-/* L3 target configuration and error log registers */
-static struct omap_hwmod_irq_info omap44xx_l3_targ_irqs[] = {
- { .irq = 9 + OMAP44XX_IRQ_GIC_START },
- { .irq = 10 + OMAP44XX_IRQ_GIC_START },
-};
-
static struct omap_hwmod_addr_space omap44xx_l3_main_1_addrs[] = {
{
.pa_start = 0x44000000,
.pa_end = 0x44000fff,
- .flags = ADDR_TYPE_RT,
+ .flags = ADDR_TYPE_RT
},
+ { }
};
/* mpu -> l3_main_1 */
@@ -284,8 +285,7 @@ static struct omap_hwmod_ocp_if omap44xx_mpu__l3_main_1 = {
.slave = &omap44xx_l3_main_1_hwmod,
.clk = "l3_div_ck",
.addr = omap44xx_l3_main_1_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_l3_main_1_addrs),
- .user = OCP_USER_MPU | OCP_USER_SDMA,
+ .user = OCP_USER_MPU,
};
/* l3_main_1 slave ports */
@@ -302,14 +302,13 @@ static struct omap_hwmod_ocp_if *omap44xx_l3_main_1_slaves[] = {
static struct omap_hwmod omap44xx_l3_main_1_hwmod = {
.name = "l3_main_1",
.class = &omap44xx_l3_hwmod_class,
- .mpu_irqs = omap44xx_l3_targ_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_l3_targ_irqs),
+ .mpu_irqs = omap44xx_l3_main_1_irqs,
.slaves = omap44xx_l3_main_1_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_l3_main_1_slaves),
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
};
-/* l3_main_2 interface data */
+/* l3_main_2 */
/* dma_system -> l3_main_2 */
static struct omap_hwmod_ocp_if omap44xx_dma_system__l3_main_2 = {
.master = &omap44xx_dma_system_hwmod,
@@ -354,8 +353,9 @@ static struct omap_hwmod_addr_space omap44xx_l3_main_2_addrs[] = {
{
.pa_start = 0x44800000,
.pa_end = 0x44801fff,
- .flags = ADDR_TYPE_RT,
+ .flags = ADDR_TYPE_RT
},
+ { }
};
/* l3_main_1 -> l3_main_2 */
@@ -364,8 +364,7 @@ static struct omap_hwmod_ocp_if omap44xx_l3_main_1__l3_main_2 = {
.slave = &omap44xx_l3_main_2_hwmod,
.clk = "l3_div_ck",
.addr = omap44xx_l3_main_2_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_l3_main_2_addrs),
- .user = OCP_USER_MPU | OCP_USER_SDMA,
+ .user = OCP_USER_MPU,
};
/* l4_cfg -> l3_main_2 */
@@ -404,13 +403,14 @@ static struct omap_hwmod omap44xx_l3_main_2_hwmod = {
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
};
-/* l3_main_3 interface data */
+/* l3_main_3 */
static struct omap_hwmod_addr_space omap44xx_l3_main_3_addrs[] = {
{
.pa_start = 0x45000000,
.pa_end = 0x45000fff,
- .flags = ADDR_TYPE_RT,
+ .flags = ADDR_TYPE_RT
},
+ { }
};
/* l3_main_1 -> l3_main_3 */
@@ -419,8 +419,7 @@ static struct omap_hwmod_ocp_if omap44xx_l3_main_1__l3_main_3 = {
.slave = &omap44xx_l3_main_3_hwmod,
.clk = "l3_div_ck",
.addr = omap44xx_l3_main_3_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_l3_main_3_addrs),
- .user = OCP_USER_MPU | OCP_USER_SDMA,
+ .user = OCP_USER_MPU,
};
/* l3_main_2 -> l3_main_3 */
@@ -462,7 +461,7 @@ static struct omap_hwmod_class omap44xx_l4_hwmod_class = {
.name = "l4",
};
-/* l4_abe interface data */
+/* l4_abe */
/* aess -> l4_abe */
static struct omap_hwmod_ocp_if omap44xx_aess__l4_abe = {
.master = &omap44xx_aess_hwmod,
@@ -511,7 +510,7 @@ static struct omap_hwmod omap44xx_l4_abe_hwmod = {
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
};
-/* l4_cfg interface data */
+/* l4_cfg */
/* l3_main_1 -> l4_cfg */
static struct omap_hwmod_ocp_if omap44xx_l3_main_1__l4_cfg = {
.master = &omap44xx_l3_main_1_hwmod,
@@ -533,7 +532,7 @@ static struct omap_hwmod omap44xx_l4_cfg_hwmod = {
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
};
-/* l4_per interface data */
+/* l4_per */
/* l3_main_2 -> l4_per */
static struct omap_hwmod_ocp_if omap44xx_l3_main_2__l4_per = {
.master = &omap44xx_l3_main_2_hwmod,
@@ -555,7 +554,7 @@ static struct omap_hwmod omap44xx_l4_per_hwmod = {
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
};
-/* l4_wkup interface data */
+/* l4_wkup */
/* l4_cfg -> l4_wkup */
static struct omap_hwmod_ocp_if omap44xx_l4_cfg__l4_wkup = {
.master = &omap44xx_l4_cfg_hwmod,
@@ -585,7 +584,7 @@ static struct omap_hwmod_class omap44xx_mpu_bus_hwmod_class = {
.name = "mpu_bus",
};
-/* mpu_private interface data */
+/* mpu_private */
/* mpu -> mpu_private */
static struct omap_hwmod_ocp_if omap44xx_mpu__mpu_private = {
.master = &omap44xx_mpu_hwmod,
@@ -633,7 +632,9 @@ static struct omap_hwmod omap44xx_mpu_private_hwmod = {
* gpmc
* gpu
* hdq1w
- * hsi
+ * mcasp
+ * mpu_c0
+ * mpu_c1
* ocmc_ram
* ocp2scp_usb_phy
* ocp_wp_noc
@@ -660,7 +661,8 @@ static struct omap_hwmod_class_sysconfig omap44xx_aess_sysc = {
.sysc_offs = 0x0010,
.sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
+ MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART |
+ MSTANDBY_SMART_WKUP),
.sysc_fields = &omap_hwmod_sysc_type2,
};
@@ -672,6 +674,7 @@ static struct omap_hwmod_class omap44xx_aess_hwmod_class = {
/* aess */
static struct omap_hwmod_irq_info omap44xx_aess_irqs[] = {
{ .irq = 99 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_dma_info omap44xx_aess_sdma_reqs[] = {
@@ -683,6 +686,7 @@ static struct omap_hwmod_dma_info omap44xx_aess_sdma_reqs[] = {
{ .name = "fifo5", .dma_req = 105 + OMAP44XX_DMA_REQ_START },
{ .name = "fifo6", .dma_req = 106 + OMAP44XX_DMA_REQ_START },
{ .name = "fifo7", .dma_req = 107 + OMAP44XX_DMA_REQ_START },
+ { .dma_req = -1 }
};
/* aess master ports */
@@ -696,6 +700,7 @@ static struct omap_hwmod_addr_space omap44xx_aess_addrs[] = {
.pa_end = 0x401f13ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_abe -> aess */
@@ -704,7 +709,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__aess = {
.slave = &omap44xx_aess_hwmod,
.clk = "ocp_abe_iclk",
.addr = omap44xx_aess_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_aess_addrs),
.user = OCP_USER_MPU,
};
@@ -714,6 +718,7 @@ static struct omap_hwmod_addr_space omap44xx_aess_dma_addrs[] = {
.pa_end = 0x490f13ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_abe -> aess (dma) */
@@ -722,7 +727,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__aess_dma = {
.slave = &omap44xx_aess_hwmod,
.clk = "ocp_abe_iclk",
.addr = omap44xx_aess_dma_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_aess_dma_addrs),
.user = OCP_USER_SDMA,
};
@@ -736,11 +740,9 @@ static struct omap_hwmod omap44xx_aess_hwmod = {
.name = "aess",
.class = &omap44xx_aess_hwmod_class,
.mpu_irqs = omap44xx_aess_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_aess_irqs),
.sdma_reqs = omap44xx_aess_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_aess_sdma_reqs),
.main_clk = "aess_fck",
- .prcm = {
+ .prcm = {
.omap4 = {
.clkctrl_reg = OMAP4430_CM1_ABE_AESS_CLKCTRL,
},
@@ -769,7 +771,7 @@ static struct omap_hwmod_opt_clk bandgap_opt_clks[] = {
static struct omap_hwmod omap44xx_bandgap_hwmod = {
.name = "bandgap",
.class = &omap44xx_bandgap_hwmod_class,
- .prcm = {
+ .prcm = {
.omap4 = {
.clkctrl_reg = OMAP4430_CM_WKUP_BANDGAP_CLKCTRL,
},
@@ -806,6 +808,7 @@ static struct omap_hwmod_addr_space omap44xx_counter_32k_addrs[] = {
.pa_end = 0x4a30401f,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_wkup -> counter_32k */
@@ -814,7 +817,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_wkup__counter_32k = {
.slave = &omap44xx_counter_32k_hwmod,
.clk = "l4_wkup_clk_mux_ck",
.addr = omap44xx_counter_32k_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_counter_32k_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -828,7 +830,7 @@ static struct omap_hwmod omap44xx_counter_32k_hwmod = {
.class = &omap44xx_counter_hwmod_class,
.flags = HWMOD_SWSUP_SIDLE,
.main_clk = "sys_32k_ck",
- .prcm = {
+ .prcm = {
.omap4 = {
.clkctrl_reg = OMAP4430_CM_WKUP_SYNCTIMER_CLKCTRL,
},
@@ -875,6 +877,7 @@ static struct omap_hwmod_irq_info omap44xx_dma_system_irqs[] = {
{ .name = "1", .irq = 13 + OMAP44XX_IRQ_GIC_START },
{ .name = "2", .irq = 14 + OMAP44XX_IRQ_GIC_START },
{ .name = "3", .irq = 15 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
/* dma_system master ports */
@@ -888,6 +891,7 @@ static struct omap_hwmod_addr_space omap44xx_dma_system_addrs[] = {
.pa_end = 0x4a056fff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_cfg -> dma_system */
@@ -896,7 +900,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__dma_system = {
.slave = &omap44xx_dma_system_hwmod,
.clk = "l4_div_ck",
.addr = omap44xx_dma_system_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_dma_system_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -909,7 +912,6 @@ static struct omap_hwmod omap44xx_dma_system_hwmod = {
.name = "dma_system",
.class = &omap44xx_dma_hwmod_class,
.mpu_irqs = omap44xx_dma_system_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_dma_system_irqs),
.main_clk = "l3_div_ck",
.prcm = {
.omap4 = {
@@ -948,10 +950,12 @@ static struct omap_hwmod_class omap44xx_dmic_hwmod_class = {
static struct omap_hwmod omap44xx_dmic_hwmod;
static struct omap_hwmod_irq_info omap44xx_dmic_irqs[] = {
{ .irq = 114 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_dma_info omap44xx_dmic_sdma_reqs[] = {
{ .dma_req = 66 + OMAP44XX_DMA_REQ_START },
+ { .dma_req = -1 }
};
static struct omap_hwmod_addr_space omap44xx_dmic_addrs[] = {
@@ -960,6 +964,7 @@ static struct omap_hwmod_addr_space omap44xx_dmic_addrs[] = {
.pa_end = 0x4012e07f,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_abe -> dmic */
@@ -968,7 +973,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__dmic = {
.slave = &omap44xx_dmic_hwmod,
.clk = "ocp_abe_iclk",
.addr = omap44xx_dmic_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_dmic_addrs),
.user = OCP_USER_MPU,
};
@@ -978,6 +982,7 @@ static struct omap_hwmod_addr_space omap44xx_dmic_dma_addrs[] = {
.pa_end = 0x4902e07f,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_abe -> dmic (dma) */
@@ -986,7 +991,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__dmic_dma = {
.slave = &omap44xx_dmic_hwmod,
.clk = "ocp_abe_iclk",
.addr = omap44xx_dmic_dma_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_dmic_dma_addrs),
.user = OCP_USER_SDMA,
};
@@ -1000,11 +1004,9 @@ static struct omap_hwmod omap44xx_dmic_hwmod = {
.name = "dmic",
.class = &omap44xx_dmic_hwmod_class,
.mpu_irqs = omap44xx_dmic_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_dmic_irqs),
.sdma_reqs = omap44xx_dmic_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_dmic_sdma_reqs),
.main_clk = "dmic_fck",
- .prcm = {
+ .prcm = {
.omap4 = {
.clkctrl_reg = OMAP4430_CM1_ABE_DMIC_CLKCTRL,
},
@@ -1026,6 +1028,7 @@ static struct omap_hwmod_class omap44xx_dsp_hwmod_class = {
/* dsp */
static struct omap_hwmod_irq_info omap44xx_dsp_irqs[] = {
{ .irq = 28 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_rst_info omap44xx_dsp_resets[] = {
@@ -1082,7 +1085,6 @@ static struct omap_hwmod omap44xx_dsp_hwmod = {
.name = "dsp",
.class = &omap44xx_dsp_hwmod_class,
.mpu_irqs = omap44xx_dsp_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_dsp_irqs),
.rst_lines = omap44xx_dsp_resets,
.rst_lines_cnt = ARRAY_SIZE(omap44xx_dsp_resets),
.main_clk = "dsp_fck",
@@ -1127,6 +1129,7 @@ static struct omap_hwmod_addr_space omap44xx_dss_dma_addrs[] = {
.pa_end = 0x5800007f,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l3_main_2 -> dss */
@@ -1135,7 +1138,6 @@ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__dss = {
.slave = &omap44xx_dss_hwmod,
.clk = "l3_div_ck",
.addr = omap44xx_dss_dma_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_dss_dma_addrs),
.user = OCP_USER_SDMA,
};
@@ -1145,6 +1147,7 @@ static struct omap_hwmod_addr_space omap44xx_dss_addrs[] = {
.pa_end = 0x4804007f,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> dss */
@@ -1153,7 +1156,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__dss = {
.slave = &omap44xx_dss_hwmod,
.clk = "l4_div_ck",
.addr = omap44xx_dss_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_dss_addrs),
.user = OCP_USER_MPU,
};
@@ -1215,10 +1217,12 @@ static struct omap_hwmod_class omap44xx_dispc_hwmod_class = {
static struct omap_hwmod omap44xx_dss_dispc_hwmod;
static struct omap_hwmod_irq_info omap44xx_dss_dispc_irqs[] = {
{ .irq = 25 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_dma_info omap44xx_dss_dispc_sdma_reqs[] = {
{ .dma_req = 5 + OMAP44XX_DMA_REQ_START },
+ { .dma_req = -1 }
};
static struct omap_hwmod_addr_space omap44xx_dss_dispc_dma_addrs[] = {
@@ -1227,6 +1231,7 @@ static struct omap_hwmod_addr_space omap44xx_dss_dispc_dma_addrs[] = {
.pa_end = 0x58001fff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l3_main_2 -> dss_dispc */
@@ -1235,7 +1240,6 @@ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__dss_dispc = {
.slave = &omap44xx_dss_dispc_hwmod,
.clk = "l3_div_ck",
.addr = omap44xx_dss_dispc_dma_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_dss_dispc_dma_addrs),
.user = OCP_USER_SDMA,
};
@@ -1245,6 +1249,7 @@ static struct omap_hwmod_addr_space omap44xx_dss_dispc_addrs[] = {
.pa_end = 0x48041fff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> dss_dispc */
@@ -1253,7 +1258,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_dispc = {
.slave = &omap44xx_dss_dispc_hwmod,
.clk = "l4_div_ck",
.addr = omap44xx_dss_dispc_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_dss_dispc_addrs),
.user = OCP_USER_MPU,
};
@@ -1267,9 +1271,7 @@ static struct omap_hwmod omap44xx_dss_dispc_hwmod = {
.name = "dss_dispc",
.class = &omap44xx_dispc_hwmod_class,
.mpu_irqs = omap44xx_dss_dispc_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_dss_dispc_irqs),
.sdma_reqs = omap44xx_dss_dispc_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_dss_dispc_sdma_reqs),
.main_clk = "dss_fck",
.prcm = {
.omap4 = {
@@ -1306,10 +1308,12 @@ static struct omap_hwmod_class omap44xx_dsi_hwmod_class = {
static struct omap_hwmod omap44xx_dss_dsi1_hwmod;
static struct omap_hwmod_irq_info omap44xx_dss_dsi1_irqs[] = {
{ .irq = 53 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_dma_info omap44xx_dss_dsi1_sdma_reqs[] = {
{ .dma_req = 74 + OMAP44XX_DMA_REQ_START },
+ { .dma_req = -1 }
};
static struct omap_hwmod_addr_space omap44xx_dss_dsi1_dma_addrs[] = {
@@ -1318,6 +1322,7 @@ static struct omap_hwmod_addr_space omap44xx_dss_dsi1_dma_addrs[] = {
.pa_end = 0x580041ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l3_main_2 -> dss_dsi1 */
@@ -1326,7 +1331,6 @@ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__dss_dsi1 = {
.slave = &omap44xx_dss_dsi1_hwmod,
.clk = "l3_div_ck",
.addr = omap44xx_dss_dsi1_dma_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_dss_dsi1_dma_addrs),
.user = OCP_USER_SDMA,
};
@@ -1336,6 +1340,7 @@ static struct omap_hwmod_addr_space omap44xx_dss_dsi1_addrs[] = {
.pa_end = 0x480441ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> dss_dsi1 */
@@ -1344,7 +1349,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_dsi1 = {
.slave = &omap44xx_dss_dsi1_hwmod,
.clk = "l4_div_ck",
.addr = omap44xx_dss_dsi1_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_dss_dsi1_addrs),
.user = OCP_USER_MPU,
};
@@ -1358,9 +1362,7 @@ static struct omap_hwmod omap44xx_dss_dsi1_hwmod = {
.name = "dss_dsi1",
.class = &omap44xx_dsi_hwmod_class,
.mpu_irqs = omap44xx_dss_dsi1_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_dss_dsi1_irqs),
.sdma_reqs = omap44xx_dss_dsi1_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_dss_dsi1_sdma_reqs),
.main_clk = "dss_fck",
.prcm = {
.omap4 = {
@@ -1376,10 +1378,12 @@ static struct omap_hwmod omap44xx_dss_dsi1_hwmod = {
static struct omap_hwmod omap44xx_dss_dsi2_hwmod;
static struct omap_hwmod_irq_info omap44xx_dss_dsi2_irqs[] = {
{ .irq = 84 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_dma_info omap44xx_dss_dsi2_sdma_reqs[] = {
{ .dma_req = 83 + OMAP44XX_DMA_REQ_START },
+ { .dma_req = -1 }
};
static struct omap_hwmod_addr_space omap44xx_dss_dsi2_dma_addrs[] = {
@@ -1388,6 +1392,7 @@ static struct omap_hwmod_addr_space omap44xx_dss_dsi2_dma_addrs[] = {
.pa_end = 0x580051ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l3_main_2 -> dss_dsi2 */
@@ -1396,7 +1401,6 @@ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__dss_dsi2 = {
.slave = &omap44xx_dss_dsi2_hwmod,
.clk = "l3_div_ck",
.addr = omap44xx_dss_dsi2_dma_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_dss_dsi2_dma_addrs),
.user = OCP_USER_SDMA,
};
@@ -1406,6 +1410,7 @@ static struct omap_hwmod_addr_space omap44xx_dss_dsi2_addrs[] = {
.pa_end = 0x480451ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> dss_dsi2 */
@@ -1414,7 +1419,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_dsi2 = {
.slave = &omap44xx_dss_dsi2_hwmod,
.clk = "l4_div_ck",
.addr = omap44xx_dss_dsi2_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_dss_dsi2_addrs),
.user = OCP_USER_MPU,
};
@@ -1428,9 +1432,7 @@ static struct omap_hwmod omap44xx_dss_dsi2_hwmod = {
.name = "dss_dsi2",
.class = &omap44xx_dsi_hwmod_class,
.mpu_irqs = omap44xx_dss_dsi2_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_dss_dsi2_irqs),
.sdma_reqs = omap44xx_dss_dsi2_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_dss_dsi2_sdma_reqs),
.main_clk = "dss_fck",
.prcm = {
.omap4 = {
@@ -1466,10 +1468,12 @@ static struct omap_hwmod_class omap44xx_hdmi_hwmod_class = {
static struct omap_hwmod omap44xx_dss_hdmi_hwmod;
static struct omap_hwmod_irq_info omap44xx_dss_hdmi_irqs[] = {
{ .irq = 101 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_dma_info omap44xx_dss_hdmi_sdma_reqs[] = {
{ .dma_req = 75 + OMAP44XX_DMA_REQ_START },
+ { .dma_req = -1 }
};
static struct omap_hwmod_addr_space omap44xx_dss_hdmi_dma_addrs[] = {
@@ -1478,6 +1482,7 @@ static struct omap_hwmod_addr_space omap44xx_dss_hdmi_dma_addrs[] = {
.pa_end = 0x58006fff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l3_main_2 -> dss_hdmi */
@@ -1486,7 +1491,6 @@ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__dss_hdmi = {
.slave = &omap44xx_dss_hdmi_hwmod,
.clk = "l3_div_ck",
.addr = omap44xx_dss_hdmi_dma_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_dss_hdmi_dma_addrs),
.user = OCP_USER_SDMA,
};
@@ -1496,6 +1500,7 @@ static struct omap_hwmod_addr_space omap44xx_dss_hdmi_addrs[] = {
.pa_end = 0x48046fff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> dss_hdmi */
@@ -1504,7 +1509,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_hdmi = {
.slave = &omap44xx_dss_hdmi_hwmod,
.clk = "l4_div_ck",
.addr = omap44xx_dss_hdmi_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_dss_hdmi_addrs),
.user = OCP_USER_MPU,
};
@@ -1518,9 +1522,7 @@ static struct omap_hwmod omap44xx_dss_hdmi_hwmod = {
.name = "dss_hdmi",
.class = &omap44xx_hdmi_hwmod_class,
.mpu_irqs = omap44xx_dss_hdmi_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_dss_hdmi_irqs),
.sdma_reqs = omap44xx_dss_hdmi_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_dss_hdmi_sdma_reqs),
.main_clk = "dss_fck",
.prcm = {
.omap4 = {
@@ -1556,6 +1558,7 @@ static struct omap_hwmod_class omap44xx_rfbi_hwmod_class = {
static struct omap_hwmod omap44xx_dss_rfbi_hwmod;
static struct omap_hwmod_dma_info omap44xx_dss_rfbi_sdma_reqs[] = {
{ .dma_req = 13 + OMAP44XX_DMA_REQ_START },
+ { .dma_req = -1 }
};
static struct omap_hwmod_addr_space omap44xx_dss_rfbi_dma_addrs[] = {
@@ -1564,6 +1567,7 @@ static struct omap_hwmod_addr_space omap44xx_dss_rfbi_dma_addrs[] = {
.pa_end = 0x580020ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l3_main_2 -> dss_rfbi */
@@ -1572,7 +1576,6 @@ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__dss_rfbi = {
.slave = &omap44xx_dss_rfbi_hwmod,
.clk = "l3_div_ck",
.addr = omap44xx_dss_rfbi_dma_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_dss_rfbi_dma_addrs),
.user = OCP_USER_SDMA,
};
@@ -1582,6 +1585,7 @@ static struct omap_hwmod_addr_space omap44xx_dss_rfbi_addrs[] = {
.pa_end = 0x480420ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> dss_rfbi */
@@ -1590,7 +1594,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_rfbi = {
.slave = &omap44xx_dss_rfbi_hwmod,
.clk = "l4_div_ck",
.addr = omap44xx_dss_rfbi_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_dss_rfbi_addrs),
.user = OCP_USER_MPU,
};
@@ -1604,7 +1607,6 @@ static struct omap_hwmod omap44xx_dss_rfbi_hwmod = {
.name = "dss_rfbi",
.class = &omap44xx_rfbi_hwmod_class,
.sdma_reqs = omap44xx_dss_rfbi_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_dss_rfbi_sdma_reqs),
.main_clk = "dss_fck",
.prcm = {
.omap4 = {
@@ -1633,6 +1635,7 @@ static struct omap_hwmod_addr_space omap44xx_dss_venc_dma_addrs[] = {
.pa_end = 0x580030ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l3_main_2 -> dss_venc */
@@ -1641,7 +1644,6 @@ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__dss_venc = {
.slave = &omap44xx_dss_venc_hwmod,
.clk = "l3_div_ck",
.addr = omap44xx_dss_venc_dma_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_dss_venc_dma_addrs),
.user = OCP_USER_SDMA,
};
@@ -1651,6 +1653,7 @@ static struct omap_hwmod_addr_space omap44xx_dss_venc_addrs[] = {
.pa_end = 0x480430ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> dss_venc */
@@ -1659,7 +1662,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_venc = {
.slave = &omap44xx_dss_venc_hwmod,
.clk = "l4_div_ck",
.addr = omap44xx_dss_venc_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_dss_venc_addrs),
.user = OCP_USER_MPU,
};
@@ -1716,6 +1718,7 @@ static struct omap_gpio_dev_attr gpio_dev_attr = {
static struct omap_hwmod omap44xx_gpio1_hwmod;
static struct omap_hwmod_irq_info omap44xx_gpio1_irqs[] = {
{ .irq = 29 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_addr_space omap44xx_gpio1_addrs[] = {
@@ -1724,6 +1727,7 @@ static struct omap_hwmod_addr_space omap44xx_gpio1_addrs[] = {
.pa_end = 0x4a3101ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_wkup -> gpio1 */
@@ -1732,7 +1736,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_wkup__gpio1 = {
.slave = &omap44xx_gpio1_hwmod,
.clk = "l4_wkup_clk_mux_ck",
.addr = omap44xx_gpio1_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_gpio1_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -1749,7 +1752,6 @@ static struct omap_hwmod omap44xx_gpio1_hwmod = {
.name = "gpio1",
.class = &omap44xx_gpio_hwmod_class,
.mpu_irqs = omap44xx_gpio1_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_gpio1_irqs),
.main_clk = "gpio1_ick",
.prcm = {
.omap4 = {
@@ -1768,6 +1770,7 @@ static struct omap_hwmod omap44xx_gpio1_hwmod = {
static struct omap_hwmod omap44xx_gpio2_hwmod;
static struct omap_hwmod_irq_info omap44xx_gpio2_irqs[] = {
{ .irq = 30 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_addr_space omap44xx_gpio2_addrs[] = {
@@ -1776,6 +1779,7 @@ static struct omap_hwmod_addr_space omap44xx_gpio2_addrs[] = {
.pa_end = 0x480551ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> gpio2 */
@@ -1784,7 +1788,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__gpio2 = {
.slave = &omap44xx_gpio2_hwmod,
.clk = "l4_div_ck",
.addr = omap44xx_gpio2_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_gpio2_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -1802,7 +1805,6 @@ static struct omap_hwmod omap44xx_gpio2_hwmod = {
.class = &omap44xx_gpio_hwmod_class,
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
.mpu_irqs = omap44xx_gpio2_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_gpio2_irqs),
.main_clk = "gpio2_ick",
.prcm = {
.omap4 = {
@@ -1821,6 +1823,7 @@ static struct omap_hwmod omap44xx_gpio2_hwmod = {
static struct omap_hwmod omap44xx_gpio3_hwmod;
static struct omap_hwmod_irq_info omap44xx_gpio3_irqs[] = {
{ .irq = 31 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_addr_space omap44xx_gpio3_addrs[] = {
@@ -1829,6 +1832,7 @@ static struct omap_hwmod_addr_space omap44xx_gpio3_addrs[] = {
.pa_end = 0x480571ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> gpio3 */
@@ -1837,7 +1841,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__gpio3 = {
.slave = &omap44xx_gpio3_hwmod,
.clk = "l4_div_ck",
.addr = omap44xx_gpio3_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_gpio3_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -1855,7 +1858,6 @@ static struct omap_hwmod omap44xx_gpio3_hwmod = {
.class = &omap44xx_gpio_hwmod_class,
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
.mpu_irqs = omap44xx_gpio3_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_gpio3_irqs),
.main_clk = "gpio3_ick",
.prcm = {
.omap4 = {
@@ -1874,6 +1876,7 @@ static struct omap_hwmod omap44xx_gpio3_hwmod = {
static struct omap_hwmod omap44xx_gpio4_hwmod;
static struct omap_hwmod_irq_info omap44xx_gpio4_irqs[] = {
{ .irq = 32 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_addr_space omap44xx_gpio4_addrs[] = {
@@ -1882,6 +1885,7 @@ static struct omap_hwmod_addr_space omap44xx_gpio4_addrs[] = {
.pa_end = 0x480591ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> gpio4 */
@@ -1890,7 +1894,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__gpio4 = {
.slave = &omap44xx_gpio4_hwmod,
.clk = "l4_div_ck",
.addr = omap44xx_gpio4_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_gpio4_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -1908,7 +1911,6 @@ static struct omap_hwmod omap44xx_gpio4_hwmod = {
.class = &omap44xx_gpio_hwmod_class,
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
.mpu_irqs = omap44xx_gpio4_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_gpio4_irqs),
.main_clk = "gpio4_ick",
.prcm = {
.omap4 = {
@@ -1927,6 +1929,7 @@ static struct omap_hwmod omap44xx_gpio4_hwmod = {
static struct omap_hwmod omap44xx_gpio5_hwmod;
static struct omap_hwmod_irq_info omap44xx_gpio5_irqs[] = {
{ .irq = 33 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_addr_space omap44xx_gpio5_addrs[] = {
@@ -1935,6 +1938,7 @@ static struct omap_hwmod_addr_space omap44xx_gpio5_addrs[] = {
.pa_end = 0x4805b1ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> gpio5 */
@@ -1943,7 +1947,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__gpio5 = {
.slave = &omap44xx_gpio5_hwmod,
.clk = "l4_div_ck",
.addr = omap44xx_gpio5_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_gpio5_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -1961,7 +1964,6 @@ static struct omap_hwmod omap44xx_gpio5_hwmod = {
.class = &omap44xx_gpio_hwmod_class,
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
.mpu_irqs = omap44xx_gpio5_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_gpio5_irqs),
.main_clk = "gpio5_ick",
.prcm = {
.omap4 = {
@@ -1980,6 +1982,7 @@ static struct omap_hwmod omap44xx_gpio5_hwmod = {
static struct omap_hwmod omap44xx_gpio6_hwmod;
static struct omap_hwmod_irq_info omap44xx_gpio6_irqs[] = {
{ .irq = 34 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_addr_space omap44xx_gpio6_addrs[] = {
@@ -1988,6 +1991,7 @@ static struct omap_hwmod_addr_space omap44xx_gpio6_addrs[] = {
.pa_end = 0x4805d1ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> gpio6 */
@@ -1996,7 +2000,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__gpio6 = {
.slave = &omap44xx_gpio6_hwmod,
.clk = "l4_div_ck",
.addr = omap44xx_gpio6_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_gpio6_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -2014,7 +2017,6 @@ static struct omap_hwmod omap44xx_gpio6_hwmod = {
.class = &omap44xx_gpio_hwmod_class,
.flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
.mpu_irqs = omap44xx_gpio6_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_gpio6_irqs),
.main_clk = "gpio6_ick",
.prcm = {
.omap4 = {
@@ -2044,7 +2046,7 @@ static struct omap_hwmod_class_sysconfig omap44xx_hsi_sysc = {
SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
- MSTANDBY_SMART),
+ MSTANDBY_SMART | MSTANDBY_SMART_WKUP),
.sysc_fields = &omap_hwmod_sysc_type1,
};
@@ -2058,6 +2060,7 @@ static struct omap_hwmod_irq_info omap44xx_hsi_irqs[] = {
{ .name = "mpu_p1", .irq = 67 + OMAP44XX_IRQ_GIC_START },
{ .name = "mpu_p2", .irq = 68 + OMAP44XX_IRQ_GIC_START },
{ .name = "mpu_dma", .irq = 71 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
/* hsi master ports */
@@ -2071,6 +2074,7 @@ static struct omap_hwmod_addr_space omap44xx_hsi_addrs[] = {
.pa_end = 0x4a05bfff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_cfg -> hsi */
@@ -2079,7 +2083,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__hsi = {
.slave = &omap44xx_hsi_hwmod,
.clk = "l4_div_ck",
.addr = omap44xx_hsi_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_hsi_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -2092,9 +2095,8 @@ static struct omap_hwmod omap44xx_hsi_hwmod = {
.name = "hsi",
.class = &omap44xx_hsi_hwmod_class,
.mpu_irqs = omap44xx_hsi_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_hsi_irqs),
.main_clk = "hsi_fck",
- .prcm = {
+ .prcm = {
.omap4 = {
.clkctrl_reg = OMAP4430_CM_L3INIT_HSI_CLKCTRL,
},
@@ -2131,11 +2133,13 @@ static struct omap_hwmod_class omap44xx_i2c_hwmod_class = {
static struct omap_hwmod omap44xx_i2c1_hwmod;
static struct omap_hwmod_irq_info omap44xx_i2c1_irqs[] = {
{ .irq = 56 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_dma_info omap44xx_i2c1_sdma_reqs[] = {
{ .name = "tx", .dma_req = 26 + OMAP44XX_DMA_REQ_START },
{ .name = "rx", .dma_req = 27 + OMAP44XX_DMA_REQ_START },
+ { .dma_req = -1 }
};
static struct omap_hwmod_addr_space omap44xx_i2c1_addrs[] = {
@@ -2144,6 +2148,7 @@ static struct omap_hwmod_addr_space omap44xx_i2c1_addrs[] = {
.pa_end = 0x480700ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> i2c1 */
@@ -2152,7 +2157,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__i2c1 = {
.slave = &omap44xx_i2c1_hwmod,
.clk = "l4_div_ck",
.addr = omap44xx_i2c1_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_i2c1_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -2166,9 +2170,7 @@ static struct omap_hwmod omap44xx_i2c1_hwmod = {
.class = &omap44xx_i2c_hwmod_class,
.flags = HWMOD_INIT_NO_RESET,
.mpu_irqs = omap44xx_i2c1_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_i2c1_irqs),
.sdma_reqs = omap44xx_i2c1_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_i2c1_sdma_reqs),
.main_clk = "i2c1_fck",
.prcm = {
.omap4 = {
@@ -2184,11 +2186,13 @@ static struct omap_hwmod omap44xx_i2c1_hwmod = {
static struct omap_hwmod omap44xx_i2c2_hwmod;
static struct omap_hwmod_irq_info omap44xx_i2c2_irqs[] = {
{ .irq = 57 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_dma_info omap44xx_i2c2_sdma_reqs[] = {
{ .name = "tx", .dma_req = 28 + OMAP44XX_DMA_REQ_START },
{ .name = "rx", .dma_req = 29 + OMAP44XX_DMA_REQ_START },
+ { .dma_req = -1 }
};
static struct omap_hwmod_addr_space omap44xx_i2c2_addrs[] = {
@@ -2197,6 +2201,7 @@ static struct omap_hwmod_addr_space omap44xx_i2c2_addrs[] = {
.pa_end = 0x480720ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> i2c2 */
@@ -2205,7 +2210,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__i2c2 = {
.slave = &omap44xx_i2c2_hwmod,
.clk = "l4_div_ck",
.addr = omap44xx_i2c2_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_i2c2_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -2219,9 +2223,7 @@ static struct omap_hwmod omap44xx_i2c2_hwmod = {
.class = &omap44xx_i2c_hwmod_class,
.flags = HWMOD_INIT_NO_RESET,
.mpu_irqs = omap44xx_i2c2_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_i2c2_irqs),
.sdma_reqs = omap44xx_i2c2_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_i2c2_sdma_reqs),
.main_clk = "i2c2_fck",
.prcm = {
.omap4 = {
@@ -2237,11 +2239,13 @@ static struct omap_hwmod omap44xx_i2c2_hwmod = {
static struct omap_hwmod omap44xx_i2c3_hwmod;
static struct omap_hwmod_irq_info omap44xx_i2c3_irqs[] = {
{ .irq = 61 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_dma_info omap44xx_i2c3_sdma_reqs[] = {
{ .name = "tx", .dma_req = 24 + OMAP44XX_DMA_REQ_START },
{ .name = "rx", .dma_req = 25 + OMAP44XX_DMA_REQ_START },
+ { .dma_req = -1 }
};
static struct omap_hwmod_addr_space omap44xx_i2c3_addrs[] = {
@@ -2250,6 +2254,7 @@ static struct omap_hwmod_addr_space omap44xx_i2c3_addrs[] = {
.pa_end = 0x480600ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> i2c3 */
@@ -2258,7 +2263,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__i2c3 = {
.slave = &omap44xx_i2c3_hwmod,
.clk = "l4_div_ck",
.addr = omap44xx_i2c3_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_i2c3_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -2272,9 +2276,7 @@ static struct omap_hwmod omap44xx_i2c3_hwmod = {
.class = &omap44xx_i2c_hwmod_class,
.flags = HWMOD_INIT_NO_RESET,
.mpu_irqs = omap44xx_i2c3_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_i2c3_irqs),
.sdma_reqs = omap44xx_i2c3_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_i2c3_sdma_reqs),
.main_clk = "i2c3_fck",
.prcm = {
.omap4 = {
@@ -2290,11 +2292,13 @@ static struct omap_hwmod omap44xx_i2c3_hwmod = {
static struct omap_hwmod omap44xx_i2c4_hwmod;
static struct omap_hwmod_irq_info omap44xx_i2c4_irqs[] = {
{ .irq = 62 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_dma_info omap44xx_i2c4_sdma_reqs[] = {
{ .name = "tx", .dma_req = 123 + OMAP44XX_DMA_REQ_START },
{ .name = "rx", .dma_req = 124 + OMAP44XX_DMA_REQ_START },
+ { .dma_req = -1 }
};
static struct omap_hwmod_addr_space omap44xx_i2c4_addrs[] = {
@@ -2303,6 +2307,7 @@ static struct omap_hwmod_addr_space omap44xx_i2c4_addrs[] = {
.pa_end = 0x483500ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> i2c4 */
@@ -2311,7 +2316,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__i2c4 = {
.slave = &omap44xx_i2c4_hwmod,
.clk = "l4_div_ck",
.addr = omap44xx_i2c4_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_i2c4_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -2325,9 +2329,7 @@ static struct omap_hwmod omap44xx_i2c4_hwmod = {
.class = &omap44xx_i2c_hwmod_class,
.flags = HWMOD_INIT_NO_RESET,
.mpu_irqs = omap44xx_i2c4_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_i2c4_irqs),
.sdma_reqs = omap44xx_i2c4_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_i2c4_sdma_reqs),
.main_clk = "i2c4_fck",
.prcm = {
.omap4 = {
@@ -2351,6 +2353,7 @@ static struct omap_hwmod_class omap44xx_ipu_hwmod_class = {
/* ipu */
static struct omap_hwmod_irq_info omap44xx_ipu_irqs[] = {
{ .irq = 100 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_rst_info omap44xx_ipu_c0_resets[] = {
@@ -2390,7 +2393,7 @@ static struct omap_hwmod omap44xx_ipu_c0_hwmod = {
.flags = HWMOD_INIT_NO_RESET,
.rst_lines = omap44xx_ipu_c0_resets,
.rst_lines_cnt = ARRAY_SIZE(omap44xx_ipu_c0_resets),
- .prcm = {
+ .prcm = {
.omap4 = {
.rstctrl_reg = OMAP4430_RM_DUCATI_RSTCTRL,
},
@@ -2405,7 +2408,7 @@ static struct omap_hwmod omap44xx_ipu_c1_hwmod = {
.flags = HWMOD_INIT_NO_RESET,
.rst_lines = omap44xx_ipu_c1_resets,
.rst_lines_cnt = ARRAY_SIZE(omap44xx_ipu_c1_resets),
- .prcm = {
+ .prcm = {
.omap4 = {
.rstctrl_reg = OMAP4430_RM_DUCATI_RSTCTRL,
},
@@ -2417,11 +2420,10 @@ static struct omap_hwmod omap44xx_ipu_hwmod = {
.name = "ipu",
.class = &omap44xx_ipu_hwmod_class,
.mpu_irqs = omap44xx_ipu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_ipu_irqs),
.rst_lines = omap44xx_ipu_resets,
.rst_lines_cnt = ARRAY_SIZE(omap44xx_ipu_resets),
.main_clk = "ipu_fck",
- .prcm = {
+ .prcm = {
.omap4 = {
.clkctrl_reg = OMAP4430_CM_DUCATI_DUCATI_CLKCTRL,
.rstctrl_reg = OMAP4430_RM_DUCATI_RSTCTRL,
@@ -2446,7 +2448,7 @@ static struct omap_hwmod_class_sysconfig omap44xx_iss_sysc = {
SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
- MSTANDBY_SMART),
+ MSTANDBY_SMART | MSTANDBY_SMART_WKUP),
.sysc_fields = &omap_hwmod_sysc_type2,
};
@@ -2458,6 +2460,7 @@ static struct omap_hwmod_class omap44xx_iss_hwmod_class = {
/* iss */
static struct omap_hwmod_irq_info omap44xx_iss_irqs[] = {
{ .irq = 24 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_dma_info omap44xx_iss_sdma_reqs[] = {
@@ -2465,6 +2468,7 @@ static struct omap_hwmod_dma_info omap44xx_iss_sdma_reqs[] = {
{ .name = "2", .dma_req = 9 + OMAP44XX_DMA_REQ_START },
{ .name = "3", .dma_req = 11 + OMAP44XX_DMA_REQ_START },
{ .name = "4", .dma_req = 12 + OMAP44XX_DMA_REQ_START },
+ { .dma_req = -1 }
};
/* iss master ports */
@@ -2478,6 +2482,7 @@ static struct omap_hwmod_addr_space omap44xx_iss_addrs[] = {
.pa_end = 0x520000ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l3_main_2 -> iss */
@@ -2486,7 +2491,6 @@ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__iss = {
.slave = &omap44xx_iss_hwmod,
.clk = "l3_div_ck",
.addr = omap44xx_iss_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_iss_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -2503,11 +2507,9 @@ static struct omap_hwmod omap44xx_iss_hwmod = {
.name = "iss",
.class = &omap44xx_iss_hwmod_class,
.mpu_irqs = omap44xx_iss_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_iss_irqs),
.sdma_reqs = omap44xx_iss_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_iss_sdma_reqs),
.main_clk = "iss_fck",
- .prcm = {
+ .prcm = {
.omap4 = {
.clkctrl_reg = OMAP4430_CM_CAM_ISS_CLKCTRL,
},
@@ -2535,6 +2537,7 @@ static struct omap_hwmod_irq_info omap44xx_iva_irqs[] = {
{ .name = "sync_1", .irq = 103 + OMAP44XX_IRQ_GIC_START },
{ .name = "sync_0", .irq = 104 + OMAP44XX_IRQ_GIC_START },
{ .name = "mailbox_0", .irq = 107 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_rst_info omap44xx_iva_resets[] = {
@@ -2561,6 +2564,7 @@ static struct omap_hwmod_addr_space omap44xx_iva_addrs[] = {
.pa_end = 0x5a07ffff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l3_main_2 -> iva */
@@ -2569,7 +2573,6 @@ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__iva = {
.slave = &omap44xx_iva_hwmod,
.clk = "l3_div_ck",
.addr = omap44xx_iva_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_iva_addrs),
.user = OCP_USER_MPU,
};
@@ -2613,7 +2616,6 @@ static struct omap_hwmod omap44xx_iva_hwmod = {
.name = "iva",
.class = &omap44xx_iva_hwmod_class,
.mpu_irqs = omap44xx_iva_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_iva_irqs),
.rst_lines = omap44xx_iva_resets,
.rst_lines_cnt = ARRAY_SIZE(omap44xx_iva_resets),
.main_clk = "iva_fck",
@@ -2656,6 +2658,7 @@ static struct omap_hwmod_class omap44xx_kbd_hwmod_class = {
static struct omap_hwmod omap44xx_kbd_hwmod;
static struct omap_hwmod_irq_info omap44xx_kbd_irqs[] = {
{ .irq = 120 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_addr_space omap44xx_kbd_addrs[] = {
@@ -2664,6 +2667,7 @@ static struct omap_hwmod_addr_space omap44xx_kbd_addrs[] = {
.pa_end = 0x4a31c07f,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_wkup -> kbd */
@@ -2672,7 +2676,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_wkup__kbd = {
.slave = &omap44xx_kbd_hwmod,
.clk = "l4_wkup_clk_mux_ck",
.addr = omap44xx_kbd_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_kbd_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -2685,9 +2688,8 @@ static struct omap_hwmod omap44xx_kbd_hwmod = {
.name = "kbd",
.class = &omap44xx_kbd_hwmod_class,
.mpu_irqs = omap44xx_kbd_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_kbd_irqs),
.main_clk = "kbd_fck",
- .prcm = {
+ .prcm = {
.omap4 = {
.clkctrl_reg = OMAP4430_CM_WKUP_KEYBOARD_CLKCTRL,
},
@@ -2721,6 +2723,7 @@ static struct omap_hwmod_class omap44xx_mailbox_hwmod_class = {
static struct omap_hwmod omap44xx_mailbox_hwmod;
static struct omap_hwmod_irq_info omap44xx_mailbox_irqs[] = {
{ .irq = 26 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_addr_space omap44xx_mailbox_addrs[] = {
@@ -2729,6 +2732,7 @@ static struct omap_hwmod_addr_space omap44xx_mailbox_addrs[] = {
.pa_end = 0x4a0f41ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_cfg -> mailbox */
@@ -2737,7 +2741,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__mailbox = {
.slave = &omap44xx_mailbox_hwmod,
.clk = "l4_div_ck",
.addr = omap44xx_mailbox_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_mailbox_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -2750,8 +2753,7 @@ static struct omap_hwmod omap44xx_mailbox_hwmod = {
.name = "mailbox",
.class = &omap44xx_mailbox_hwmod_class,
.mpu_irqs = omap44xx_mailbox_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_mailbox_irqs),
- .prcm = {
+ .prcm = {
.omap4 = {
.clkctrl_reg = OMAP4430_CM_L4CFG_MAILBOX_CLKCTRL,
},
@@ -2784,11 +2786,13 @@ static struct omap_hwmod_class omap44xx_mcbsp_hwmod_class = {
static struct omap_hwmod omap44xx_mcbsp1_hwmod;
static struct omap_hwmod_irq_info omap44xx_mcbsp1_irqs[] = {
{ .irq = 17 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_dma_info omap44xx_mcbsp1_sdma_reqs[] = {
{ .name = "tx", .dma_req = 32 + OMAP44XX_DMA_REQ_START },
{ .name = "rx", .dma_req = 33 + OMAP44XX_DMA_REQ_START },
+ { .dma_req = -1 }
};
static struct omap_hwmod_addr_space omap44xx_mcbsp1_addrs[] = {
@@ -2798,6 +2802,7 @@ static struct omap_hwmod_addr_space omap44xx_mcbsp1_addrs[] = {
.pa_end = 0x401220ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_abe -> mcbsp1 */
@@ -2806,7 +2811,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__mcbsp1 = {
.slave = &omap44xx_mcbsp1_hwmod,
.clk = "ocp_abe_iclk",
.addr = omap44xx_mcbsp1_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_mcbsp1_addrs),
.user = OCP_USER_MPU,
};
@@ -2817,6 +2821,7 @@ static struct omap_hwmod_addr_space omap44xx_mcbsp1_dma_addrs[] = {
.pa_end = 0x490220ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_abe -> mcbsp1 (dma) */
@@ -2825,7 +2830,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__mcbsp1_dma = {
.slave = &omap44xx_mcbsp1_hwmod,
.clk = "ocp_abe_iclk",
.addr = omap44xx_mcbsp1_dma_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_mcbsp1_dma_addrs),
.user = OCP_USER_SDMA,
};
@@ -2839,9 +2843,7 @@ static struct omap_hwmod omap44xx_mcbsp1_hwmod = {
.name = "mcbsp1",
.class = &omap44xx_mcbsp_hwmod_class,
.mpu_irqs = omap44xx_mcbsp1_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_mcbsp1_irqs),
.sdma_reqs = omap44xx_mcbsp1_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_mcbsp1_sdma_reqs),
.main_clk = "mcbsp1_fck",
.prcm = {
.omap4 = {
@@ -2857,11 +2859,13 @@ static struct omap_hwmod omap44xx_mcbsp1_hwmod = {
static struct omap_hwmod omap44xx_mcbsp2_hwmod;
static struct omap_hwmod_irq_info omap44xx_mcbsp2_irqs[] = {
{ .irq = 22 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_dma_info omap44xx_mcbsp2_sdma_reqs[] = {
{ .name = "tx", .dma_req = 16 + OMAP44XX_DMA_REQ_START },
{ .name = "rx", .dma_req = 17 + OMAP44XX_DMA_REQ_START },
+ { .dma_req = -1 }
};
static struct omap_hwmod_addr_space omap44xx_mcbsp2_addrs[] = {
@@ -2871,6 +2875,7 @@ static struct omap_hwmod_addr_space omap44xx_mcbsp2_addrs[] = {
.pa_end = 0x401240ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_abe -> mcbsp2 */
@@ -2879,7 +2884,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__mcbsp2 = {
.slave = &omap44xx_mcbsp2_hwmod,
.clk = "ocp_abe_iclk",
.addr = omap44xx_mcbsp2_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_mcbsp2_addrs),
.user = OCP_USER_MPU,
};
@@ -2890,6 +2894,7 @@ static struct omap_hwmod_addr_space omap44xx_mcbsp2_dma_addrs[] = {
.pa_end = 0x490240ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_abe -> mcbsp2 (dma) */
@@ -2898,7 +2903,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__mcbsp2_dma = {
.slave = &omap44xx_mcbsp2_hwmod,
.clk = "ocp_abe_iclk",
.addr = omap44xx_mcbsp2_dma_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_mcbsp2_dma_addrs),
.user = OCP_USER_SDMA,
};
@@ -2912,9 +2916,7 @@ static struct omap_hwmod omap44xx_mcbsp2_hwmod = {
.name = "mcbsp2",
.class = &omap44xx_mcbsp_hwmod_class,
.mpu_irqs = omap44xx_mcbsp2_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_mcbsp2_irqs),
.sdma_reqs = omap44xx_mcbsp2_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_mcbsp2_sdma_reqs),
.main_clk = "mcbsp2_fck",
.prcm = {
.omap4 = {
@@ -2930,11 +2932,13 @@ static struct omap_hwmod omap44xx_mcbsp2_hwmod = {
static struct omap_hwmod omap44xx_mcbsp3_hwmod;
static struct omap_hwmod_irq_info omap44xx_mcbsp3_irqs[] = {
{ .irq = 23 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_dma_info omap44xx_mcbsp3_sdma_reqs[] = {
{ .name = "tx", .dma_req = 18 + OMAP44XX_DMA_REQ_START },
{ .name = "rx", .dma_req = 19 + OMAP44XX_DMA_REQ_START },
+ { .dma_req = -1 }
};
static struct omap_hwmod_addr_space omap44xx_mcbsp3_addrs[] = {
@@ -2944,6 +2948,7 @@ static struct omap_hwmod_addr_space omap44xx_mcbsp3_addrs[] = {
.pa_end = 0x401260ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_abe -> mcbsp3 */
@@ -2952,7 +2957,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__mcbsp3 = {
.slave = &omap44xx_mcbsp3_hwmod,
.clk = "ocp_abe_iclk",
.addr = omap44xx_mcbsp3_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_mcbsp3_addrs),
.user = OCP_USER_MPU,
};
@@ -2963,6 +2967,7 @@ static struct omap_hwmod_addr_space omap44xx_mcbsp3_dma_addrs[] = {
.pa_end = 0x490260ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_abe -> mcbsp3 (dma) */
@@ -2971,7 +2976,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__mcbsp3_dma = {
.slave = &omap44xx_mcbsp3_hwmod,
.clk = "ocp_abe_iclk",
.addr = omap44xx_mcbsp3_dma_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_mcbsp3_dma_addrs),
.user = OCP_USER_SDMA,
};
@@ -2985,9 +2989,7 @@ static struct omap_hwmod omap44xx_mcbsp3_hwmod = {
.name = "mcbsp3",
.class = &omap44xx_mcbsp_hwmod_class,
.mpu_irqs = omap44xx_mcbsp3_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_mcbsp3_irqs),
.sdma_reqs = omap44xx_mcbsp3_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_mcbsp3_sdma_reqs),
.main_clk = "mcbsp3_fck",
.prcm = {
.omap4 = {
@@ -3003,11 +3005,13 @@ static struct omap_hwmod omap44xx_mcbsp3_hwmod = {
static struct omap_hwmod omap44xx_mcbsp4_hwmod;
static struct omap_hwmod_irq_info omap44xx_mcbsp4_irqs[] = {
{ .irq = 16 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_dma_info omap44xx_mcbsp4_sdma_reqs[] = {
{ .name = "tx", .dma_req = 30 + OMAP44XX_DMA_REQ_START },
{ .name = "rx", .dma_req = 31 + OMAP44XX_DMA_REQ_START },
+ { .dma_req = -1 }
};
static struct omap_hwmod_addr_space omap44xx_mcbsp4_addrs[] = {
@@ -3016,6 +3020,7 @@ static struct omap_hwmod_addr_space omap44xx_mcbsp4_addrs[] = {
.pa_end = 0x480960ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> mcbsp4 */
@@ -3024,7 +3029,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__mcbsp4 = {
.slave = &omap44xx_mcbsp4_hwmod,
.clk = "l4_div_ck",
.addr = omap44xx_mcbsp4_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_mcbsp4_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -3037,9 +3041,7 @@ static struct omap_hwmod omap44xx_mcbsp4_hwmod = {
.name = "mcbsp4",
.class = &omap44xx_mcbsp_hwmod_class,
.mpu_irqs = omap44xx_mcbsp4_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_mcbsp4_irqs),
.sdma_reqs = omap44xx_mcbsp4_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_mcbsp4_sdma_reqs),
.main_clk = "mcbsp4_fck",
.prcm = {
.omap4 = {
@@ -3076,11 +3078,13 @@ static struct omap_hwmod_class omap44xx_mcpdm_hwmod_class = {
static struct omap_hwmod omap44xx_mcpdm_hwmod;
static struct omap_hwmod_irq_info omap44xx_mcpdm_irqs[] = {
{ .irq = 112 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_dma_info omap44xx_mcpdm_sdma_reqs[] = {
{ .name = "up_link", .dma_req = 64 + OMAP44XX_DMA_REQ_START },
{ .name = "dn_link", .dma_req = 65 + OMAP44XX_DMA_REQ_START },
+ { .dma_req = -1 }
};
static struct omap_hwmod_addr_space omap44xx_mcpdm_addrs[] = {
@@ -3089,6 +3093,7 @@ static struct omap_hwmod_addr_space omap44xx_mcpdm_addrs[] = {
.pa_end = 0x4013207f,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_abe -> mcpdm */
@@ -3097,7 +3102,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__mcpdm = {
.slave = &omap44xx_mcpdm_hwmod,
.clk = "ocp_abe_iclk",
.addr = omap44xx_mcpdm_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_mcpdm_addrs),
.user = OCP_USER_MPU,
};
@@ -3107,6 +3111,7 @@ static struct omap_hwmod_addr_space omap44xx_mcpdm_dma_addrs[] = {
.pa_end = 0x4903207f,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_abe -> mcpdm (dma) */
@@ -3115,7 +3120,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__mcpdm_dma = {
.slave = &omap44xx_mcpdm_hwmod,
.clk = "ocp_abe_iclk",
.addr = omap44xx_mcpdm_dma_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_mcpdm_dma_addrs),
.user = OCP_USER_SDMA,
};
@@ -3129,11 +3133,9 @@ static struct omap_hwmod omap44xx_mcpdm_hwmod = {
.name = "mcpdm",
.class = &omap44xx_mcpdm_hwmod_class,
.mpu_irqs = omap44xx_mcpdm_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_mcpdm_irqs),
.sdma_reqs = omap44xx_mcpdm_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_mcpdm_sdma_reqs),
.main_clk = "mcpdm_fck",
- .prcm = {
+ .prcm = {
.omap4 = {
.clkctrl_reg = OMAP4430_CM1_ABE_PDM_CLKCTRL,
},
@@ -3169,6 +3171,7 @@ static struct omap_hwmod_class omap44xx_mcspi_hwmod_class = {
static struct omap_hwmod omap44xx_mcspi1_hwmod;
static struct omap_hwmod_irq_info omap44xx_mcspi1_irqs[] = {
{ .irq = 65 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_dma_info omap44xx_mcspi1_sdma_reqs[] = {
@@ -3180,6 +3183,7 @@ static struct omap_hwmod_dma_info omap44xx_mcspi1_sdma_reqs[] = {
{ .name = "rx2", .dma_req = 39 + OMAP44XX_DMA_REQ_START },
{ .name = "tx3", .dma_req = 40 + OMAP44XX_DMA_REQ_START },
{ .name = "rx3", .dma_req = 41 + OMAP44XX_DMA_REQ_START },
+ { .dma_req = -1 }
};
static struct omap_hwmod_addr_space omap44xx_mcspi1_addrs[] = {
@@ -3188,6 +3192,7 @@ static struct omap_hwmod_addr_space omap44xx_mcspi1_addrs[] = {
.pa_end = 0x480981ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> mcspi1 */
@@ -3196,7 +3201,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__mcspi1 = {
.slave = &omap44xx_mcspi1_hwmod,
.clk = "l4_div_ck",
.addr = omap44xx_mcspi1_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_mcspi1_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -3214,9 +3218,7 @@ static struct omap_hwmod omap44xx_mcspi1_hwmod = {
.name = "mcspi1",
.class = &omap44xx_mcspi_hwmod_class,
.mpu_irqs = omap44xx_mcspi1_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_mcspi1_irqs),
.sdma_reqs = omap44xx_mcspi1_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_mcspi1_sdma_reqs),
.main_clk = "mcspi1_fck",
.prcm = {
.omap4 = {
@@ -3233,6 +3235,7 @@ static struct omap_hwmod omap44xx_mcspi1_hwmod = {
static struct omap_hwmod omap44xx_mcspi2_hwmod;
static struct omap_hwmod_irq_info omap44xx_mcspi2_irqs[] = {
{ .irq = 66 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_dma_info omap44xx_mcspi2_sdma_reqs[] = {
@@ -3240,6 +3243,7 @@ static struct omap_hwmod_dma_info omap44xx_mcspi2_sdma_reqs[] = {
{ .name = "rx0", .dma_req = 43 + OMAP44XX_DMA_REQ_START },
{ .name = "tx1", .dma_req = 44 + OMAP44XX_DMA_REQ_START },
{ .name = "rx1", .dma_req = 45 + OMAP44XX_DMA_REQ_START },
+ { .dma_req = -1 }
};
static struct omap_hwmod_addr_space omap44xx_mcspi2_addrs[] = {
@@ -3248,6 +3252,7 @@ static struct omap_hwmod_addr_space omap44xx_mcspi2_addrs[] = {
.pa_end = 0x4809a1ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> mcspi2 */
@@ -3256,7 +3261,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__mcspi2 = {
.slave = &omap44xx_mcspi2_hwmod,
.clk = "l4_div_ck",
.addr = omap44xx_mcspi2_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_mcspi2_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -3274,9 +3278,7 @@ static struct omap_hwmod omap44xx_mcspi2_hwmod = {
.name = "mcspi2",
.class = &omap44xx_mcspi_hwmod_class,
.mpu_irqs = omap44xx_mcspi2_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_mcspi2_irqs),
.sdma_reqs = omap44xx_mcspi2_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_mcspi2_sdma_reqs),
.main_clk = "mcspi2_fck",
.prcm = {
.omap4 = {
@@ -3293,6 +3295,7 @@ static struct omap_hwmod omap44xx_mcspi2_hwmod = {
static struct omap_hwmod omap44xx_mcspi3_hwmod;
static struct omap_hwmod_irq_info omap44xx_mcspi3_irqs[] = {
{ .irq = 91 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_dma_info omap44xx_mcspi3_sdma_reqs[] = {
@@ -3300,6 +3303,7 @@ static struct omap_hwmod_dma_info omap44xx_mcspi3_sdma_reqs[] = {
{ .name = "rx0", .dma_req = 15 + OMAP44XX_DMA_REQ_START },
{ .name = "tx1", .dma_req = 22 + OMAP44XX_DMA_REQ_START },
{ .name = "rx1", .dma_req = 23 + OMAP44XX_DMA_REQ_START },
+ { .dma_req = -1 }
};
static struct omap_hwmod_addr_space omap44xx_mcspi3_addrs[] = {
@@ -3308,6 +3312,7 @@ static struct omap_hwmod_addr_space omap44xx_mcspi3_addrs[] = {
.pa_end = 0x480b81ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> mcspi3 */
@@ -3316,7 +3321,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__mcspi3 = {
.slave = &omap44xx_mcspi3_hwmod,
.clk = "l4_div_ck",
.addr = omap44xx_mcspi3_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_mcspi3_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -3334,9 +3338,7 @@ static struct omap_hwmod omap44xx_mcspi3_hwmod = {
.name = "mcspi3",
.class = &omap44xx_mcspi_hwmod_class,
.mpu_irqs = omap44xx_mcspi3_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_mcspi3_irqs),
.sdma_reqs = omap44xx_mcspi3_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_mcspi3_sdma_reqs),
.main_clk = "mcspi3_fck",
.prcm = {
.omap4 = {
@@ -3353,11 +3355,13 @@ static struct omap_hwmod omap44xx_mcspi3_hwmod = {
static struct omap_hwmod omap44xx_mcspi4_hwmod;
static struct omap_hwmod_irq_info omap44xx_mcspi4_irqs[] = {
{ .irq = 48 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_dma_info omap44xx_mcspi4_sdma_reqs[] = {
{ .name = "tx0", .dma_req = 69 + OMAP44XX_DMA_REQ_START },
{ .name = "rx0", .dma_req = 70 + OMAP44XX_DMA_REQ_START },
+ { .dma_req = -1 }
};
static struct omap_hwmod_addr_space omap44xx_mcspi4_addrs[] = {
@@ -3366,6 +3370,7 @@ static struct omap_hwmod_addr_space omap44xx_mcspi4_addrs[] = {
.pa_end = 0x480ba1ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> mcspi4 */
@@ -3374,7 +3379,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__mcspi4 = {
.slave = &omap44xx_mcspi4_hwmod,
.clk = "l4_div_ck",
.addr = omap44xx_mcspi4_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_mcspi4_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -3392,9 +3396,7 @@ static struct omap_hwmod omap44xx_mcspi4_hwmod = {
.name = "mcspi4",
.class = &omap44xx_mcspi_hwmod_class,
.mpu_irqs = omap44xx_mcspi4_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_mcspi4_irqs),
.sdma_reqs = omap44xx_mcspi4_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_mcspi4_sdma_reqs),
.main_clk = "mcspi4_fck",
.prcm = {
.omap4 = {
@@ -3420,7 +3422,7 @@ static struct omap_hwmod_class_sysconfig omap44xx_mmc_sysc = {
SYSC_HAS_SOFTRESET),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
- MSTANDBY_SMART),
+ MSTANDBY_SMART | MSTANDBY_SMART_WKUP),
.sysc_fields = &omap_hwmod_sysc_type2,
};
@@ -3430,14 +3432,15 @@ static struct omap_hwmod_class omap44xx_mmc_hwmod_class = {
};
/* mmc1 */
-
static struct omap_hwmod_irq_info omap44xx_mmc1_irqs[] = {
{ .irq = 83 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_dma_info omap44xx_mmc1_sdma_reqs[] = {
{ .name = "tx", .dma_req = 60 + OMAP44XX_DMA_REQ_START },
{ .name = "rx", .dma_req = 61 + OMAP44XX_DMA_REQ_START },
+ { .dma_req = -1 }
};
/* mmc1 master ports */
@@ -3451,6 +3454,7 @@ static struct omap_hwmod_addr_space omap44xx_mmc1_addrs[] = {
.pa_end = 0x4809c3ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> mmc1 */
@@ -3459,7 +3463,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__mmc1 = {
.slave = &omap44xx_mmc1_hwmod,
.clk = "l4_div_ck",
.addr = omap44xx_mmc1_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_mmc1_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -3477,11 +3480,9 @@ static struct omap_hwmod omap44xx_mmc1_hwmod = {
.name = "mmc1",
.class = &omap44xx_mmc_hwmod_class,
.mpu_irqs = omap44xx_mmc1_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_mmc1_irqs),
.sdma_reqs = omap44xx_mmc1_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_mmc1_sdma_reqs),
.main_clk = "mmc1_fck",
- .prcm = {
+ .prcm = {
.omap4 = {
.clkctrl_reg = OMAP4430_CM_L3INIT_MMC1_CLKCTRL,
},
@@ -3497,11 +3498,13 @@ static struct omap_hwmod omap44xx_mmc1_hwmod = {
/* mmc2 */
static struct omap_hwmod_irq_info omap44xx_mmc2_irqs[] = {
{ .irq = 86 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_dma_info omap44xx_mmc2_sdma_reqs[] = {
{ .name = "tx", .dma_req = 46 + OMAP44XX_DMA_REQ_START },
{ .name = "rx", .dma_req = 47 + OMAP44XX_DMA_REQ_START },
+ { .dma_req = -1 }
};
/* mmc2 master ports */
@@ -3515,6 +3518,7 @@ static struct omap_hwmod_addr_space omap44xx_mmc2_addrs[] = {
.pa_end = 0x480b43ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> mmc2 */
@@ -3523,7 +3527,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__mmc2 = {
.slave = &omap44xx_mmc2_hwmod,
.clk = "l4_div_ck",
.addr = omap44xx_mmc2_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_mmc2_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -3536,11 +3539,9 @@ static struct omap_hwmod omap44xx_mmc2_hwmod = {
.name = "mmc2",
.class = &omap44xx_mmc_hwmod_class,
.mpu_irqs = omap44xx_mmc2_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_mmc2_irqs),
.sdma_reqs = omap44xx_mmc2_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_mmc2_sdma_reqs),
.main_clk = "mmc2_fck",
- .prcm = {
+ .prcm = {
.omap4 = {
.clkctrl_reg = OMAP4430_CM_L3INIT_MMC2_CLKCTRL,
},
@@ -3556,11 +3557,13 @@ static struct omap_hwmod omap44xx_mmc2_hwmod = {
static struct omap_hwmod omap44xx_mmc3_hwmod;
static struct omap_hwmod_irq_info omap44xx_mmc3_irqs[] = {
{ .irq = 94 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_dma_info omap44xx_mmc3_sdma_reqs[] = {
{ .name = "tx", .dma_req = 76 + OMAP44XX_DMA_REQ_START },
{ .name = "rx", .dma_req = 77 + OMAP44XX_DMA_REQ_START },
+ { .dma_req = -1 }
};
static struct omap_hwmod_addr_space omap44xx_mmc3_addrs[] = {
@@ -3569,6 +3572,7 @@ static struct omap_hwmod_addr_space omap44xx_mmc3_addrs[] = {
.pa_end = 0x480ad3ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> mmc3 */
@@ -3577,7 +3581,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__mmc3 = {
.slave = &omap44xx_mmc3_hwmod,
.clk = "l4_div_ck",
.addr = omap44xx_mmc3_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_mmc3_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -3590,11 +3593,9 @@ static struct omap_hwmod omap44xx_mmc3_hwmod = {
.name = "mmc3",
.class = &omap44xx_mmc_hwmod_class,
.mpu_irqs = omap44xx_mmc3_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_mmc3_irqs),
.sdma_reqs = omap44xx_mmc3_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_mmc3_sdma_reqs),
.main_clk = "mmc3_fck",
- .prcm = {
+ .prcm = {
.omap4 = {
.clkctrl_reg = OMAP4430_CM_L4PER_MMCSD3_CLKCTRL,
},
@@ -3608,11 +3609,13 @@ static struct omap_hwmod omap44xx_mmc3_hwmod = {
static struct omap_hwmod omap44xx_mmc4_hwmod;
static struct omap_hwmod_irq_info omap44xx_mmc4_irqs[] = {
{ .irq = 96 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_dma_info omap44xx_mmc4_sdma_reqs[] = {
{ .name = "tx", .dma_req = 56 + OMAP44XX_DMA_REQ_START },
{ .name = "rx", .dma_req = 57 + OMAP44XX_DMA_REQ_START },
+ { .dma_req = -1 }
};
static struct omap_hwmod_addr_space omap44xx_mmc4_addrs[] = {
@@ -3621,6 +3624,7 @@ static struct omap_hwmod_addr_space omap44xx_mmc4_addrs[] = {
.pa_end = 0x480d13ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> mmc4 */
@@ -3629,7 +3633,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__mmc4 = {
.slave = &omap44xx_mmc4_hwmod,
.clk = "l4_div_ck",
.addr = omap44xx_mmc4_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_mmc4_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -3642,11 +3645,10 @@ static struct omap_hwmod omap44xx_mmc4_hwmod = {
.name = "mmc4",
.class = &omap44xx_mmc_hwmod_class,
.mpu_irqs = omap44xx_mmc4_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_mmc4_irqs),
+
.sdma_reqs = omap44xx_mmc4_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_mmc4_sdma_reqs),
.main_clk = "mmc4_fck",
- .prcm = {
+ .prcm = {
.omap4 = {
.clkctrl_reg = OMAP4430_CM_L4PER_MMCSD4_CLKCTRL,
},
@@ -3660,11 +3662,13 @@ static struct omap_hwmod omap44xx_mmc4_hwmod = {
static struct omap_hwmod omap44xx_mmc5_hwmod;
static struct omap_hwmod_irq_info omap44xx_mmc5_irqs[] = {
{ .irq = 59 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_dma_info omap44xx_mmc5_sdma_reqs[] = {
{ .name = "tx", .dma_req = 58 + OMAP44XX_DMA_REQ_START },
{ .name = "rx", .dma_req = 59 + OMAP44XX_DMA_REQ_START },
+ { .dma_req = -1 }
};
static struct omap_hwmod_addr_space omap44xx_mmc5_addrs[] = {
@@ -3673,6 +3677,7 @@ static struct omap_hwmod_addr_space omap44xx_mmc5_addrs[] = {
.pa_end = 0x480d53ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> mmc5 */
@@ -3681,7 +3686,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__mmc5 = {
.slave = &omap44xx_mmc5_hwmod,
.clk = "l4_div_ck",
.addr = omap44xx_mmc5_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_mmc5_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -3694,11 +3698,9 @@ static struct omap_hwmod omap44xx_mmc5_hwmod = {
.name = "mmc5",
.class = &omap44xx_mmc_hwmod_class,
.mpu_irqs = omap44xx_mmc5_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_mmc5_irqs),
.sdma_reqs = omap44xx_mmc5_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_mmc5_sdma_reqs),
.main_clk = "mmc5_fck",
- .prcm = {
+ .prcm = {
.omap4 = {
.clkctrl_reg = OMAP4430_CM_L4PER_MMCSD5_CLKCTRL,
},
@@ -3722,6 +3724,7 @@ static struct omap_hwmod_irq_info omap44xx_mpu_irqs[] = {
{ .name = "pl310", .irq = 0 + OMAP44XX_IRQ_GIC_START },
{ .name = "cti0", .irq = 1 + OMAP44XX_IRQ_GIC_START },
{ .name = "cti1", .irq = 2 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
/* mpu master ports */
@@ -3734,9 +3737,8 @@ static struct omap_hwmod_ocp_if *omap44xx_mpu_masters[] = {
static struct omap_hwmod omap44xx_mpu_hwmod = {
.name = "mpu",
.class = &omap44xx_mpu_hwmod_class,
- .flags = (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET),
+ .flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET,
.mpu_irqs = omap44xx_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_mpu_irqs),
.main_clk = "dpll_mpu_m2_ck",
.prcm = {
.omap4 = {
@@ -3778,6 +3780,7 @@ static struct omap_hwmod_class omap44xx_smartreflex_hwmod_class = {
static struct omap_hwmod omap44xx_smartreflex_core_hwmod;
static struct omap_hwmod_irq_info omap44xx_smartreflex_core_irqs[] = {
{ .irq = 19 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_addr_space omap44xx_smartreflex_core_addrs[] = {
@@ -3786,6 +3789,7 @@ static struct omap_hwmod_addr_space omap44xx_smartreflex_core_addrs[] = {
.pa_end = 0x4a0dd03f,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_cfg -> smartreflex_core */
@@ -3794,7 +3798,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__smartreflex_core = {
.slave = &omap44xx_smartreflex_core_hwmod,
.clk = "l4_div_ck",
.addr = omap44xx_smartreflex_core_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_smartreflex_core_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -3807,7 +3810,7 @@ static struct omap_hwmod omap44xx_smartreflex_core_hwmod = {
.name = "smartreflex_core",
.class = &omap44xx_smartreflex_hwmod_class,
.mpu_irqs = omap44xx_smartreflex_core_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_smartreflex_core_irqs),
+
.main_clk = "smartreflex_core_fck",
.vdd_name = "core",
.prcm = {
@@ -3824,6 +3827,7 @@ static struct omap_hwmod omap44xx_smartreflex_core_hwmod = {
static struct omap_hwmod omap44xx_smartreflex_iva_hwmod;
static struct omap_hwmod_irq_info omap44xx_smartreflex_iva_irqs[] = {
{ .irq = 102 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_addr_space omap44xx_smartreflex_iva_addrs[] = {
@@ -3832,6 +3836,7 @@ static struct omap_hwmod_addr_space omap44xx_smartreflex_iva_addrs[] = {
.pa_end = 0x4a0db03f,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_cfg -> smartreflex_iva */
@@ -3840,7 +3845,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__smartreflex_iva = {
.slave = &omap44xx_smartreflex_iva_hwmod,
.clk = "l4_div_ck",
.addr = omap44xx_smartreflex_iva_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_smartreflex_iva_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -3853,7 +3857,6 @@ static struct omap_hwmod omap44xx_smartreflex_iva_hwmod = {
.name = "smartreflex_iva",
.class = &omap44xx_smartreflex_hwmod_class,
.mpu_irqs = omap44xx_smartreflex_iva_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_smartreflex_iva_irqs),
.main_clk = "smartreflex_iva_fck",
.vdd_name = "iva",
.prcm = {
@@ -3870,6 +3873,7 @@ static struct omap_hwmod omap44xx_smartreflex_iva_hwmod = {
static struct omap_hwmod omap44xx_smartreflex_mpu_hwmod;
static struct omap_hwmod_irq_info omap44xx_smartreflex_mpu_irqs[] = {
{ .irq = 18 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_addr_space omap44xx_smartreflex_mpu_addrs[] = {
@@ -3878,6 +3882,7 @@ static struct omap_hwmod_addr_space omap44xx_smartreflex_mpu_addrs[] = {
.pa_end = 0x4a0d903f,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_cfg -> smartreflex_mpu */
@@ -3886,7 +3891,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__smartreflex_mpu = {
.slave = &omap44xx_smartreflex_mpu_hwmod,
.clk = "l4_div_ck",
.addr = omap44xx_smartreflex_mpu_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_smartreflex_mpu_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -3899,7 +3903,6 @@ static struct omap_hwmod omap44xx_smartreflex_mpu_hwmod = {
.name = "smartreflex_mpu",
.class = &omap44xx_smartreflex_hwmod_class,
.mpu_irqs = omap44xx_smartreflex_mpu_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_smartreflex_mpu_irqs),
.main_clk = "smartreflex_mpu_fck",
.vdd_name = "mpu",
.prcm = {
@@ -3943,6 +3946,7 @@ static struct omap_hwmod_addr_space omap44xx_spinlock_addrs[] = {
.pa_end = 0x4a0f6fff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_cfg -> spinlock */
@@ -3951,7 +3955,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__spinlock = {
.slave = &omap44xx_spinlock_hwmod,
.clk = "l4_div_ck",
.addr = omap44xx_spinlock_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_spinlock_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -4015,6 +4018,7 @@ static struct omap_hwmod_class omap44xx_timer_hwmod_class = {
static struct omap_hwmod omap44xx_timer1_hwmod;
static struct omap_hwmod_irq_info omap44xx_timer1_irqs[] = {
{ .irq = 37 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_addr_space omap44xx_timer1_addrs[] = {
@@ -4023,6 +4027,7 @@ static struct omap_hwmod_addr_space omap44xx_timer1_addrs[] = {
.pa_end = 0x4a31807f,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_wkup -> timer1 */
@@ -4031,7 +4036,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_wkup__timer1 = {
.slave = &omap44xx_timer1_hwmod,
.clk = "l4_wkup_clk_mux_ck",
.addr = omap44xx_timer1_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_timer1_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -4044,7 +4048,6 @@ static struct omap_hwmod omap44xx_timer1_hwmod = {
.name = "timer1",
.class = &omap44xx_timer_1ms_hwmod_class,
.mpu_irqs = omap44xx_timer1_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_timer1_irqs),
.main_clk = "timer1_fck",
.prcm = {
.omap4 = {
@@ -4060,6 +4063,7 @@ static struct omap_hwmod omap44xx_timer1_hwmod = {
static struct omap_hwmod omap44xx_timer2_hwmod;
static struct omap_hwmod_irq_info omap44xx_timer2_irqs[] = {
{ .irq = 38 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_addr_space omap44xx_timer2_addrs[] = {
@@ -4068,6 +4072,7 @@ static struct omap_hwmod_addr_space omap44xx_timer2_addrs[] = {
.pa_end = 0x4803207f,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> timer2 */
@@ -4076,7 +4081,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__timer2 = {
.slave = &omap44xx_timer2_hwmod,
.clk = "l4_div_ck",
.addr = omap44xx_timer2_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_timer2_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -4089,7 +4093,6 @@ static struct omap_hwmod omap44xx_timer2_hwmod = {
.name = "timer2",
.class = &omap44xx_timer_1ms_hwmod_class,
.mpu_irqs = omap44xx_timer2_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_timer2_irqs),
.main_clk = "timer2_fck",
.prcm = {
.omap4 = {
@@ -4105,6 +4108,7 @@ static struct omap_hwmod omap44xx_timer2_hwmod = {
static struct omap_hwmod omap44xx_timer3_hwmod;
static struct omap_hwmod_irq_info omap44xx_timer3_irqs[] = {
{ .irq = 39 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_addr_space omap44xx_timer3_addrs[] = {
@@ -4113,6 +4117,7 @@ static struct omap_hwmod_addr_space omap44xx_timer3_addrs[] = {
.pa_end = 0x4803407f,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> timer3 */
@@ -4121,7 +4126,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__timer3 = {
.slave = &omap44xx_timer3_hwmod,
.clk = "l4_div_ck",
.addr = omap44xx_timer3_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_timer3_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -4134,7 +4138,6 @@ static struct omap_hwmod omap44xx_timer3_hwmod = {
.name = "timer3",
.class = &omap44xx_timer_hwmod_class,
.mpu_irqs = omap44xx_timer3_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_timer3_irqs),
.main_clk = "timer3_fck",
.prcm = {
.omap4 = {
@@ -4150,6 +4153,7 @@ static struct omap_hwmod omap44xx_timer3_hwmod = {
static struct omap_hwmod omap44xx_timer4_hwmod;
static struct omap_hwmod_irq_info omap44xx_timer4_irqs[] = {
{ .irq = 40 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_addr_space omap44xx_timer4_addrs[] = {
@@ -4158,6 +4162,7 @@ static struct omap_hwmod_addr_space omap44xx_timer4_addrs[] = {
.pa_end = 0x4803607f,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> timer4 */
@@ -4166,7 +4171,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__timer4 = {
.slave = &omap44xx_timer4_hwmod,
.clk = "l4_div_ck",
.addr = omap44xx_timer4_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_timer4_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -4179,7 +4183,6 @@ static struct omap_hwmod omap44xx_timer4_hwmod = {
.name = "timer4",
.class = &omap44xx_timer_hwmod_class,
.mpu_irqs = omap44xx_timer4_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_timer4_irqs),
.main_clk = "timer4_fck",
.prcm = {
.omap4 = {
@@ -4195,6 +4198,7 @@ static struct omap_hwmod omap44xx_timer4_hwmod = {
static struct omap_hwmod omap44xx_timer5_hwmod;
static struct omap_hwmod_irq_info omap44xx_timer5_irqs[] = {
{ .irq = 41 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_addr_space omap44xx_timer5_addrs[] = {
@@ -4203,6 +4207,7 @@ static struct omap_hwmod_addr_space omap44xx_timer5_addrs[] = {
.pa_end = 0x4013807f,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_abe -> timer5 */
@@ -4211,7 +4216,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__timer5 = {
.slave = &omap44xx_timer5_hwmod,
.clk = "ocp_abe_iclk",
.addr = omap44xx_timer5_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_timer5_addrs),
.user = OCP_USER_MPU,
};
@@ -4221,6 +4225,7 @@ static struct omap_hwmod_addr_space omap44xx_timer5_dma_addrs[] = {
.pa_end = 0x4903807f,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_abe -> timer5 (dma) */
@@ -4229,7 +4234,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__timer5_dma = {
.slave = &omap44xx_timer5_hwmod,
.clk = "ocp_abe_iclk",
.addr = omap44xx_timer5_dma_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_timer5_dma_addrs),
.user = OCP_USER_SDMA,
};
@@ -4243,7 +4247,6 @@ static struct omap_hwmod omap44xx_timer5_hwmod = {
.name = "timer5",
.class = &omap44xx_timer_hwmod_class,
.mpu_irqs = omap44xx_timer5_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_timer5_irqs),
.main_clk = "timer5_fck",
.prcm = {
.omap4 = {
@@ -4259,6 +4262,7 @@ static struct omap_hwmod omap44xx_timer5_hwmod = {
static struct omap_hwmod omap44xx_timer6_hwmod;
static struct omap_hwmod_irq_info omap44xx_timer6_irqs[] = {
{ .irq = 42 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_addr_space omap44xx_timer6_addrs[] = {
@@ -4267,6 +4271,7 @@ static struct omap_hwmod_addr_space omap44xx_timer6_addrs[] = {
.pa_end = 0x4013a07f,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_abe -> timer6 */
@@ -4275,7 +4280,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__timer6 = {
.slave = &omap44xx_timer6_hwmod,
.clk = "ocp_abe_iclk",
.addr = omap44xx_timer6_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_timer6_addrs),
.user = OCP_USER_MPU,
};
@@ -4285,6 +4289,7 @@ static struct omap_hwmod_addr_space omap44xx_timer6_dma_addrs[] = {
.pa_end = 0x4903a07f,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_abe -> timer6 (dma) */
@@ -4293,7 +4298,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__timer6_dma = {
.slave = &omap44xx_timer6_hwmod,
.clk = "ocp_abe_iclk",
.addr = omap44xx_timer6_dma_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_timer6_dma_addrs),
.user = OCP_USER_SDMA,
};
@@ -4307,7 +4311,7 @@ static struct omap_hwmod omap44xx_timer6_hwmod = {
.name = "timer6",
.class = &omap44xx_timer_hwmod_class,
.mpu_irqs = omap44xx_timer6_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_timer6_irqs),
+
.main_clk = "timer6_fck",
.prcm = {
.omap4 = {
@@ -4323,6 +4327,7 @@ static struct omap_hwmod omap44xx_timer6_hwmod = {
static struct omap_hwmod omap44xx_timer7_hwmod;
static struct omap_hwmod_irq_info omap44xx_timer7_irqs[] = {
{ .irq = 43 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_addr_space omap44xx_timer7_addrs[] = {
@@ -4331,6 +4336,7 @@ static struct omap_hwmod_addr_space omap44xx_timer7_addrs[] = {
.pa_end = 0x4013c07f,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_abe -> timer7 */
@@ -4339,7 +4345,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__timer7 = {
.slave = &omap44xx_timer7_hwmod,
.clk = "ocp_abe_iclk",
.addr = omap44xx_timer7_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_timer7_addrs),
.user = OCP_USER_MPU,
};
@@ -4349,6 +4354,7 @@ static struct omap_hwmod_addr_space omap44xx_timer7_dma_addrs[] = {
.pa_end = 0x4903c07f,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_abe -> timer7 (dma) */
@@ -4357,7 +4363,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__timer7_dma = {
.slave = &omap44xx_timer7_hwmod,
.clk = "ocp_abe_iclk",
.addr = omap44xx_timer7_dma_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_timer7_dma_addrs),
.user = OCP_USER_SDMA,
};
@@ -4371,7 +4376,6 @@ static struct omap_hwmod omap44xx_timer7_hwmod = {
.name = "timer7",
.class = &omap44xx_timer_hwmod_class,
.mpu_irqs = omap44xx_timer7_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_timer7_irqs),
.main_clk = "timer7_fck",
.prcm = {
.omap4 = {
@@ -4387,6 +4391,7 @@ static struct omap_hwmod omap44xx_timer7_hwmod = {
static struct omap_hwmod omap44xx_timer8_hwmod;
static struct omap_hwmod_irq_info omap44xx_timer8_irqs[] = {
{ .irq = 44 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_addr_space omap44xx_timer8_addrs[] = {
@@ -4395,6 +4400,7 @@ static struct omap_hwmod_addr_space omap44xx_timer8_addrs[] = {
.pa_end = 0x4013e07f,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_abe -> timer8 */
@@ -4403,7 +4409,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__timer8 = {
.slave = &omap44xx_timer8_hwmod,
.clk = "ocp_abe_iclk",
.addr = omap44xx_timer8_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_timer8_addrs),
.user = OCP_USER_MPU,
};
@@ -4413,6 +4418,7 @@ static struct omap_hwmod_addr_space omap44xx_timer8_dma_addrs[] = {
.pa_end = 0x4903e07f,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_abe -> timer8 (dma) */
@@ -4421,7 +4427,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__timer8_dma = {
.slave = &omap44xx_timer8_hwmod,
.clk = "ocp_abe_iclk",
.addr = omap44xx_timer8_dma_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_timer8_dma_addrs),
.user = OCP_USER_SDMA,
};
@@ -4435,7 +4440,6 @@ static struct omap_hwmod omap44xx_timer8_hwmod = {
.name = "timer8",
.class = &omap44xx_timer_hwmod_class,
.mpu_irqs = omap44xx_timer8_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_timer8_irqs),
.main_clk = "timer8_fck",
.prcm = {
.omap4 = {
@@ -4451,6 +4455,7 @@ static struct omap_hwmod omap44xx_timer8_hwmod = {
static struct omap_hwmod omap44xx_timer9_hwmod;
static struct omap_hwmod_irq_info omap44xx_timer9_irqs[] = {
{ .irq = 45 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_addr_space omap44xx_timer9_addrs[] = {
@@ -4459,6 +4464,7 @@ static struct omap_hwmod_addr_space omap44xx_timer9_addrs[] = {
.pa_end = 0x4803e07f,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> timer9 */
@@ -4467,7 +4473,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__timer9 = {
.slave = &omap44xx_timer9_hwmod,
.clk = "l4_div_ck",
.addr = omap44xx_timer9_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_timer9_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -4480,7 +4485,6 @@ static struct omap_hwmod omap44xx_timer9_hwmod = {
.name = "timer9",
.class = &omap44xx_timer_hwmod_class,
.mpu_irqs = omap44xx_timer9_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_timer9_irqs),
.main_clk = "timer9_fck",
.prcm = {
.omap4 = {
@@ -4496,6 +4500,7 @@ static struct omap_hwmod omap44xx_timer9_hwmod = {
static struct omap_hwmod omap44xx_timer10_hwmod;
static struct omap_hwmod_irq_info omap44xx_timer10_irqs[] = {
{ .irq = 46 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_addr_space omap44xx_timer10_addrs[] = {
@@ -4504,6 +4509,7 @@ static struct omap_hwmod_addr_space omap44xx_timer10_addrs[] = {
.pa_end = 0x4808607f,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> timer10 */
@@ -4512,7 +4518,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__timer10 = {
.slave = &omap44xx_timer10_hwmod,
.clk = "l4_div_ck",
.addr = omap44xx_timer10_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_timer10_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -4525,7 +4530,6 @@ static struct omap_hwmod omap44xx_timer10_hwmod = {
.name = "timer10",
.class = &omap44xx_timer_1ms_hwmod_class,
.mpu_irqs = omap44xx_timer10_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_timer10_irqs),
.main_clk = "timer10_fck",
.prcm = {
.omap4 = {
@@ -4541,6 +4545,7 @@ static struct omap_hwmod omap44xx_timer10_hwmod = {
static struct omap_hwmod omap44xx_timer11_hwmod;
static struct omap_hwmod_irq_info omap44xx_timer11_irqs[] = {
{ .irq = 47 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_addr_space omap44xx_timer11_addrs[] = {
@@ -4549,6 +4554,7 @@ static struct omap_hwmod_addr_space omap44xx_timer11_addrs[] = {
.pa_end = 0x4808807f,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> timer11 */
@@ -4557,7 +4563,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__timer11 = {
.slave = &omap44xx_timer11_hwmod,
.clk = "l4_div_ck",
.addr = omap44xx_timer11_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_timer11_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -4570,7 +4575,6 @@ static struct omap_hwmod omap44xx_timer11_hwmod = {
.name = "timer11",
.class = &omap44xx_timer_hwmod_class,
.mpu_irqs = omap44xx_timer11_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_timer11_irqs),
.main_clk = "timer11_fck",
.prcm = {
.omap4 = {
@@ -4608,11 +4612,13 @@ static struct omap_hwmod_class omap44xx_uart_hwmod_class = {
static struct omap_hwmod omap44xx_uart1_hwmod;
static struct omap_hwmod_irq_info omap44xx_uart1_irqs[] = {
{ .irq = 72 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_dma_info omap44xx_uart1_sdma_reqs[] = {
{ .name = "tx", .dma_req = 48 + OMAP44XX_DMA_REQ_START },
{ .name = "rx", .dma_req = 49 + OMAP44XX_DMA_REQ_START },
+ { .dma_req = -1 }
};
static struct omap_hwmod_addr_space omap44xx_uart1_addrs[] = {
@@ -4621,6 +4627,7 @@ static struct omap_hwmod_addr_space omap44xx_uart1_addrs[] = {
.pa_end = 0x4806a0ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> uart1 */
@@ -4629,7 +4636,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__uart1 = {
.slave = &omap44xx_uart1_hwmod,
.clk = "l4_div_ck",
.addr = omap44xx_uart1_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_uart1_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -4642,9 +4648,7 @@ static struct omap_hwmod omap44xx_uart1_hwmod = {
.name = "uart1",
.class = &omap44xx_uart_hwmod_class,
.mpu_irqs = omap44xx_uart1_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_uart1_irqs),
.sdma_reqs = omap44xx_uart1_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_uart1_sdma_reqs),
.main_clk = "uart1_fck",
.prcm = {
.omap4 = {
@@ -4660,11 +4664,13 @@ static struct omap_hwmod omap44xx_uart1_hwmod = {
static struct omap_hwmod omap44xx_uart2_hwmod;
static struct omap_hwmod_irq_info omap44xx_uart2_irqs[] = {
{ .irq = 73 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_dma_info omap44xx_uart2_sdma_reqs[] = {
{ .name = "tx", .dma_req = 50 + OMAP44XX_DMA_REQ_START },
{ .name = "rx", .dma_req = 51 + OMAP44XX_DMA_REQ_START },
+ { .dma_req = -1 }
};
static struct omap_hwmod_addr_space omap44xx_uart2_addrs[] = {
@@ -4673,6 +4679,7 @@ static struct omap_hwmod_addr_space omap44xx_uart2_addrs[] = {
.pa_end = 0x4806c0ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> uart2 */
@@ -4681,7 +4688,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__uart2 = {
.slave = &omap44xx_uart2_hwmod,
.clk = "l4_div_ck",
.addr = omap44xx_uart2_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_uart2_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -4694,9 +4700,7 @@ static struct omap_hwmod omap44xx_uart2_hwmod = {
.name = "uart2",
.class = &omap44xx_uart_hwmod_class,
.mpu_irqs = omap44xx_uart2_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_uart2_irqs),
.sdma_reqs = omap44xx_uart2_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_uart2_sdma_reqs),
.main_clk = "uart2_fck",
.prcm = {
.omap4 = {
@@ -4712,11 +4716,13 @@ static struct omap_hwmod omap44xx_uart2_hwmod = {
static struct omap_hwmod omap44xx_uart3_hwmod;
static struct omap_hwmod_irq_info omap44xx_uart3_irqs[] = {
{ .irq = 74 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_dma_info omap44xx_uart3_sdma_reqs[] = {
{ .name = "tx", .dma_req = 52 + OMAP44XX_DMA_REQ_START },
{ .name = "rx", .dma_req = 53 + OMAP44XX_DMA_REQ_START },
+ { .dma_req = -1 }
};
static struct omap_hwmod_addr_space omap44xx_uart3_addrs[] = {
@@ -4725,6 +4731,7 @@ static struct omap_hwmod_addr_space omap44xx_uart3_addrs[] = {
.pa_end = 0x480200ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> uart3 */
@@ -4733,7 +4740,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__uart3 = {
.slave = &omap44xx_uart3_hwmod,
.clk = "l4_div_ck",
.addr = omap44xx_uart3_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_uart3_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -4745,11 +4751,9 @@ static struct omap_hwmod_ocp_if *omap44xx_uart3_slaves[] = {
static struct omap_hwmod omap44xx_uart3_hwmod = {
.name = "uart3",
.class = &omap44xx_uart_hwmod_class,
- .flags = (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET),
+ .flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET,
.mpu_irqs = omap44xx_uart3_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_uart3_irqs),
.sdma_reqs = omap44xx_uart3_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_uart3_sdma_reqs),
.main_clk = "uart3_fck",
.prcm = {
.omap4 = {
@@ -4765,11 +4769,13 @@ static struct omap_hwmod omap44xx_uart3_hwmod = {
static struct omap_hwmod omap44xx_uart4_hwmod;
static struct omap_hwmod_irq_info omap44xx_uart4_irqs[] = {
{ .irq = 70 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_dma_info omap44xx_uart4_sdma_reqs[] = {
{ .name = "tx", .dma_req = 54 + OMAP44XX_DMA_REQ_START },
{ .name = "rx", .dma_req = 55 + OMAP44XX_DMA_REQ_START },
+ { .dma_req = -1 }
};
static struct omap_hwmod_addr_space omap44xx_uart4_addrs[] = {
@@ -4778,6 +4784,7 @@ static struct omap_hwmod_addr_space omap44xx_uart4_addrs[] = {
.pa_end = 0x4806e0ff,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_per -> uart4 */
@@ -4786,7 +4793,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__uart4 = {
.slave = &omap44xx_uart4_hwmod,
.clk = "l4_div_ck",
.addr = omap44xx_uart4_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_uart4_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -4799,9 +4805,7 @@ static struct omap_hwmod omap44xx_uart4_hwmod = {
.name = "uart4",
.class = &omap44xx_uart_hwmod_class,
.mpu_irqs = omap44xx_uart4_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_uart4_irqs),
.sdma_reqs = omap44xx_uart4_sdma_reqs,
- .sdma_reqs_cnt = ARRAY_SIZE(omap44xx_uart4_sdma_reqs),
.main_clk = "uart4_fck",
.prcm = {
.omap4 = {
@@ -4832,14 +4836,15 @@ static struct omap_hwmod_class_sysconfig omap44xx_usb_otg_hs_sysc = {
};
static struct omap_hwmod_class omap44xx_usb_otg_hs_hwmod_class = {
- .name = "usb_otg_hs",
- .sysc = &omap44xx_usb_otg_hs_sysc,
+ .name = "usb_otg_hs",
+ .sysc = &omap44xx_usb_otg_hs_sysc,
};
/* usb_otg_hs */
static struct omap_hwmod_irq_info omap44xx_usb_otg_hs_irqs[] = {
{ .name = "mc", .irq = 92 + OMAP44XX_IRQ_GIC_START },
{ .name = "dma", .irq = 93 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
/* usb_otg_hs master ports */
@@ -4853,6 +4858,7 @@ static struct omap_hwmod_addr_space omap44xx_usb_otg_hs_addrs[] = {
.pa_end = 0x4a0ab003,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_cfg -> usb_otg_hs */
@@ -4861,7 +4867,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__usb_otg_hs = {
.slave = &omap44xx_usb_otg_hs_hwmod,
.clk = "l4_div_ck",
.addr = omap44xx_usb_otg_hs_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_usb_otg_hs_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -4879,7 +4884,6 @@ static struct omap_hwmod omap44xx_usb_otg_hs_hwmod = {
.class = &omap44xx_usb_otg_hs_hwmod_class,
.flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY,
.mpu_irqs = omap44xx_usb_otg_hs_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_usb_otg_hs_irqs),
.main_clk = "usb_otg_hs_ick",
.prcm = {
.omap4 = {
@@ -4887,7 +4891,7 @@ static struct omap_hwmod omap44xx_usb_otg_hs_hwmod = {
},
},
.opt_clks = usb_otg_hs_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(usb_otg_hs_opt_clks),
+ .opt_clks_cnt = ARRAY_SIZE(usb_otg_hs_opt_clks),
.slaves = omap44xx_usb_otg_hs_slaves,
.slaves_cnt = ARRAY_SIZE(omap44xx_usb_otg_hs_slaves),
.masters = omap44xx_usb_otg_hs_masters,
@@ -4922,6 +4926,7 @@ static struct omap_hwmod_class omap44xx_wd_timer_hwmod_class = {
static struct omap_hwmod omap44xx_wd_timer2_hwmod;
static struct omap_hwmod_irq_info omap44xx_wd_timer2_irqs[] = {
{ .irq = 80 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_addr_space omap44xx_wd_timer2_addrs[] = {
@@ -4930,6 +4935,7 @@ static struct omap_hwmod_addr_space omap44xx_wd_timer2_addrs[] = {
.pa_end = 0x4a31407f,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_wkup -> wd_timer2 */
@@ -4938,7 +4944,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_wkup__wd_timer2 = {
.slave = &omap44xx_wd_timer2_hwmod,
.clk = "l4_wkup_clk_mux_ck",
.addr = omap44xx_wd_timer2_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_wd_timer2_addrs),
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -4951,7 +4956,6 @@ static struct omap_hwmod omap44xx_wd_timer2_hwmod = {
.name = "wd_timer2",
.class = &omap44xx_wd_timer_hwmod_class,
.mpu_irqs = omap44xx_wd_timer2_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_wd_timer2_irqs),
.main_clk = "wd_timer2_fck",
.prcm = {
.omap4 = {
@@ -4967,6 +4971,7 @@ static struct omap_hwmod omap44xx_wd_timer2_hwmod = {
static struct omap_hwmod omap44xx_wd_timer3_hwmod;
static struct omap_hwmod_irq_info omap44xx_wd_timer3_irqs[] = {
{ .irq = 36 + OMAP44XX_IRQ_GIC_START },
+ { .irq = -1 }
};
static struct omap_hwmod_addr_space omap44xx_wd_timer3_addrs[] = {
@@ -4975,6 +4980,7 @@ static struct omap_hwmod_addr_space omap44xx_wd_timer3_addrs[] = {
.pa_end = 0x4013007f,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_abe -> wd_timer3 */
@@ -4983,7 +4989,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__wd_timer3 = {
.slave = &omap44xx_wd_timer3_hwmod,
.clk = "ocp_abe_iclk",
.addr = omap44xx_wd_timer3_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_wd_timer3_addrs),
.user = OCP_USER_MPU,
};
@@ -4993,6 +4998,7 @@ static struct omap_hwmod_addr_space omap44xx_wd_timer3_dma_addrs[] = {
.pa_end = 0x4903007f,
.flags = ADDR_TYPE_RT
},
+ { }
};
/* l4_abe -> wd_timer3 (dma) */
@@ -5001,7 +5007,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__wd_timer3_dma = {
.slave = &omap44xx_wd_timer3_hwmod,
.clk = "ocp_abe_iclk",
.addr = omap44xx_wd_timer3_dma_addrs,
- .addr_cnt = ARRAY_SIZE(omap44xx_wd_timer3_dma_addrs),
.user = OCP_USER_SDMA,
};
@@ -5015,7 +5020,6 @@ static struct omap_hwmod omap44xx_wd_timer3_hwmod = {
.name = "wd_timer3",
.class = &omap44xx_wd_timer_hwmod_class,
.mpu_irqs = omap44xx_wd_timer3_irqs,
- .mpu_irqs_cnt = ARRAY_SIZE(omap44xx_wd_timer3_irqs),
.main_clk = "wd_timer3_fck",
.prcm = {
.omap4 = {
diff --git a/arch/arm/mach-omap2/omap_hwmod_common_data.c b/arch/arm/mach-omap2/omap_hwmod_common_data.c
index 08a134243ecb..de832ebc93a9 100644
--- a/arch/arm/mach-omap2/omap_hwmod_common_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_common_data.c
@@ -49,23 +49,3 @@ struct omap_hwmod_sysc_fields omap_hwmod_sysc_type2 = {
.srst_shift = SYSC_TYPE2_SOFTRESET_SHIFT,
};
-
-/*
- * omap_hwmod class data
- */
-
-struct omap_hwmod_class l3_hwmod_class = {
- .name = "l3"
-};
-
-struct omap_hwmod_class l4_hwmod_class = {
- .name = "l4"
-};
-
-struct omap_hwmod_class mpu_hwmod_class = {
- .name = "mpu"
-};
-
-struct omap_hwmod_class iva_hwmod_class = {
- .name = "iva"
-};
diff --git a/arch/arm/mach-omap2/omap_hwmod_common_data.h b/arch/arm/mach-omap2/omap_hwmod_common_data.h
index c34e98bf1242..39a7c37f4587 100644
--- a/arch/arm/mach-omap2/omap_hwmod_common_data.h
+++ b/arch/arm/mach-omap2/omap_hwmod_common_data.h
@@ -1,10 +1,10 @@
/*
* omap_hwmod_common_data.h - OMAP hwmod common macros and declarations
*
- * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2010-2011 Nokia Corporation
* Paul Walmsley
*
- * Copyright (C) 2010 Texas Instruments, Inc.
+ * Copyright (C) 2010-2011 Texas Instruments, Inc.
* Benoît Cousson
*
* This program is free software; you can redistribute it and/or modify
@@ -16,10 +16,99 @@
#include <plat/omap_hwmod.h>
+/* Common address space across OMAP2xxx */
+extern struct omap_hwmod_addr_space omap2xxx_uart1_addr_space[];
+extern struct omap_hwmod_addr_space omap2xxx_uart2_addr_space[];
+extern struct omap_hwmod_addr_space omap2xxx_uart3_addr_space[];
+extern struct omap_hwmod_addr_space omap2xxx_timer2_addrs[];
+extern struct omap_hwmod_addr_space omap2xxx_timer3_addrs[];
+extern struct omap_hwmod_addr_space omap2xxx_timer4_addrs[];
+extern struct omap_hwmod_addr_space omap2xxx_timer5_addrs[];
+extern struct omap_hwmod_addr_space omap2xxx_timer6_addrs[];
+extern struct omap_hwmod_addr_space omap2xxx_timer7_addrs[];
+extern struct omap_hwmod_addr_space omap2xxx_timer8_addrs[];
+extern struct omap_hwmod_addr_space omap2xxx_timer9_addrs[];
+extern struct omap_hwmod_addr_space omap2xxx_timer12_addrs[];
+extern struct omap_hwmod_addr_space omap2xxx_mcbsp2_addrs[];
+
+/* Common address space across OMAP2xxx/3xxx */
+extern struct omap_hwmod_addr_space omap2_i2c1_addr_space[];
+extern struct omap_hwmod_addr_space omap2_i2c2_addr_space[];
+extern struct omap_hwmod_addr_space omap2_dss_addrs[];
+extern struct omap_hwmod_addr_space omap2_dss_dispc_addrs[];
+extern struct omap_hwmod_addr_space omap2_dss_rfbi_addrs[];
+extern struct omap_hwmod_addr_space omap2_dss_venc_addrs[];
+extern struct omap_hwmod_addr_space omap2_timer10_addrs[];
+extern struct omap_hwmod_addr_space omap2_timer11_addrs[];
+extern struct omap_hwmod_addr_space omap2430_mmc1_addr_space[];
+extern struct omap_hwmod_addr_space omap2430_mmc2_addr_space[];
+extern struct omap_hwmod_addr_space omap2_mcspi1_addr_space[];
+extern struct omap_hwmod_addr_space omap2_mcspi2_addr_space[];
+extern struct omap_hwmod_addr_space omap2430_mcspi3_addr_space[];
+extern struct omap_hwmod_addr_space omap2_dma_system_addrs[];
+extern struct omap_hwmod_addr_space omap2_mailbox_addrs[];
+extern struct omap_hwmod_addr_space omap2_mcbsp1_addrs[];
+
+/* Common IP block data across OMAP2xxx */
+extern struct omap_hwmod_irq_info omap2xxx_timer12_mpu_irqs[];
+extern struct omap_hwmod_dma_info omap2xxx_dss_sdma_chs[];
+
+/* Common IP block data */
+extern struct omap_hwmod_dma_info omap2_uart1_sdma_reqs[];
+extern struct omap_hwmod_dma_info omap2_uart2_sdma_reqs[];
+extern struct omap_hwmod_dma_info omap2_uart3_sdma_reqs[];
+extern struct omap_hwmod_dma_info omap2_i2c1_sdma_reqs[];
+extern struct omap_hwmod_dma_info omap2_i2c2_sdma_reqs[];
+extern struct omap_hwmod_dma_info omap2_mcspi1_sdma_reqs[];
+extern struct omap_hwmod_dma_info omap2_mcspi2_sdma_reqs[];
+extern struct omap_hwmod_dma_info omap2_mcbsp1_sdma_reqs[];
+extern struct omap_hwmod_dma_info omap2_mcbsp2_sdma_reqs[];
+
+/* Common IP block data on OMAP2430/OMAP3 */
+extern struct omap_hwmod_dma_info omap2_mcbsp3_sdma_reqs[];
+
+/* Common IP block data across OMAP2/3 */
+extern struct omap_hwmod_irq_info omap2_timer1_mpu_irqs[];
+extern struct omap_hwmod_irq_info omap2_timer2_mpu_irqs[];
+extern struct omap_hwmod_irq_info omap2_timer3_mpu_irqs[];
+extern struct omap_hwmod_irq_info omap2_timer4_mpu_irqs[];
+extern struct omap_hwmod_irq_info omap2_timer5_mpu_irqs[];
+extern struct omap_hwmod_irq_info omap2_timer6_mpu_irqs[];
+extern struct omap_hwmod_irq_info omap2_timer7_mpu_irqs[];
+extern struct omap_hwmod_irq_info omap2_timer8_mpu_irqs[];
+extern struct omap_hwmod_irq_info omap2_timer9_mpu_irqs[];
+extern struct omap_hwmod_irq_info omap2_timer10_mpu_irqs[];
+extern struct omap_hwmod_irq_info omap2_timer11_mpu_irqs[];
+extern struct omap_hwmod_irq_info omap2_uart1_mpu_irqs[];
+extern struct omap_hwmod_irq_info omap2_uart2_mpu_irqs[];
+extern struct omap_hwmod_irq_info omap2_uart3_mpu_irqs[];
+extern struct omap_hwmod_irq_info omap2_dispc_irqs[];
+extern struct omap_hwmod_irq_info omap2_i2c1_mpu_irqs[];
+extern struct omap_hwmod_irq_info omap2_i2c2_mpu_irqs[];
+extern struct omap_hwmod_irq_info omap2_gpio1_irqs[];
+extern struct omap_hwmod_irq_info omap2_gpio2_irqs[];
+extern struct omap_hwmod_irq_info omap2_gpio3_irqs[];
+extern struct omap_hwmod_irq_info omap2_gpio4_irqs[];
+extern struct omap_hwmod_irq_info omap2_dma_system_irqs[];
+extern struct omap_hwmod_irq_info omap2_mcspi1_mpu_irqs[];
+extern struct omap_hwmod_irq_info omap2_mcspi2_mpu_irqs[];
+
/* OMAP hwmod classes - forward declarations */
extern struct omap_hwmod_class l3_hwmod_class;
extern struct omap_hwmod_class l4_hwmod_class;
extern struct omap_hwmod_class mpu_hwmod_class;
extern struct omap_hwmod_class iva_hwmod_class;
+extern struct omap_hwmod_class omap2_uart_class;
+extern struct omap_hwmod_class omap2_dss_hwmod_class;
+extern struct omap_hwmod_class omap2_dispc_hwmod_class;
+extern struct omap_hwmod_class omap2_rfbi_hwmod_class;
+extern struct omap_hwmod_class omap2_venc_hwmod_class;
+
+extern struct omap_hwmod_class omap2xxx_timer_hwmod_class;
+extern struct omap_hwmod_class omap2xxx_wd_timer_hwmod_class;
+extern struct omap_hwmod_class omap2xxx_gpio_hwmod_class;
+extern struct omap_hwmod_class omap2xxx_dma_hwmod_class;
+extern struct omap_hwmod_class omap2xxx_mailbox_hwmod_class;
+extern struct omap_hwmod_class omap2xxx_mcspi_class;
#endif
diff --git a/arch/arm/mach-omap2/pm-debug.c b/arch/arm/mach-omap2/pm-debug.c
index e01da45c0537..4411163e012d 100644
--- a/arch/arm/mach-omap2/pm-debug.c
+++ b/arch/arm/mach-omap2/pm-debug.c
@@ -38,155 +38,12 @@
#include "prm2xxx_3xxx.h"
#include "pm.h"
-int omap2_pm_debug;
u32 enable_off_mode;
-u32 sleep_while_idle;
-u32 wakeup_timer_seconds;
-u32 wakeup_timer_milliseconds;
-
-#define DUMP_PRM_MOD_REG(mod, reg) \
- regs[reg_count].name = #mod "." #reg; \
- regs[reg_count++].val = omap2_prm_read_mod_reg(mod, reg)
-#define DUMP_CM_MOD_REG(mod, reg) \
- regs[reg_count].name = #mod "." #reg; \
- regs[reg_count++].val = omap2_cm_read_mod_reg(mod, reg)
-#define DUMP_PRM_REG(reg) \
- regs[reg_count].name = #reg; \
- regs[reg_count++].val = __raw_readl(reg)
-#define DUMP_CM_REG(reg) \
- regs[reg_count].name = #reg; \
- regs[reg_count++].val = __raw_readl(reg)
-#define DUMP_INTC_REG(reg, off) \
- regs[reg_count].name = #reg; \
- regs[reg_count++].val = \
- __raw_readl(OMAP2_L4_IO_ADDRESS(0x480fe000 + (off)))
-
-void omap2_pm_dump(int mode, int resume, unsigned int us)
-{
- struct reg {
- const char *name;
- u32 val;
- } regs[32];
- int reg_count = 0, i;
- const char *s1 = NULL, *s2 = NULL;
-
- if (!resume) {
-#if 0
- /* MPU */
- DUMP_PRM_MOD_REG(OCP_MOD, OMAP2_PRM_IRQENABLE_MPU_OFFSET);
- DUMP_CM_MOD_REG(MPU_MOD, OMAP2_CM_CLKSTCTRL);
- DUMP_PRM_MOD_REG(MPU_MOD, OMAP2_PM_PWSTCTRL);
- DUMP_PRM_MOD_REG(MPU_MOD, OMAP2_PM_PWSTST);
- DUMP_PRM_MOD_REG(MPU_MOD, PM_WKDEP);
-#endif
-#if 0
- /* INTC */
- DUMP_INTC_REG(INTC_MIR0, 0x0084);
- DUMP_INTC_REG(INTC_MIR1, 0x00a4);
- DUMP_INTC_REG(INTC_MIR2, 0x00c4);
-#endif
-#if 0
- DUMP_CM_MOD_REG(CORE_MOD, CM_FCLKEN1);
- if (cpu_is_omap24xx()) {
- DUMP_CM_MOD_REG(CORE_MOD, OMAP24XX_CM_FCLKEN2);
- DUMP_PRM_MOD_REG(OMAP24XX_GR_MOD,
- OMAP2_PRCM_CLKEMUL_CTRL_OFFSET);
- DUMP_PRM_MOD_REG(OMAP24XX_GR_MOD,
- OMAP2_PRCM_CLKSRC_CTRL_OFFSET);
- }
- DUMP_CM_MOD_REG(WKUP_MOD, CM_FCLKEN);
- DUMP_CM_MOD_REG(CORE_MOD, CM_ICLKEN1);
- DUMP_CM_MOD_REG(CORE_MOD, CM_ICLKEN2);
- DUMP_CM_MOD_REG(WKUP_MOD, CM_ICLKEN);
- DUMP_CM_MOD_REG(PLL_MOD, CM_CLKEN);
- DUMP_CM_MOD_REG(PLL_MOD, CM_AUTOIDLE);
- DUMP_PRM_MOD_REG(CORE_MOD, OMAP2_PM_PWSTST);
-#endif
-#if 0
- /* DSP */
- if (cpu_is_omap24xx()) {
- DUMP_CM_MOD_REG(OMAP24XX_DSP_MOD, CM_FCLKEN);
- DUMP_CM_MOD_REG(OMAP24XX_DSP_MOD, CM_ICLKEN);
- DUMP_CM_MOD_REG(OMAP24XX_DSP_MOD, CM_IDLEST);
- DUMP_CM_MOD_REG(OMAP24XX_DSP_MOD, CM_AUTOIDLE);
- DUMP_CM_MOD_REG(OMAP24XX_DSP_MOD, CM_CLKSEL);
- DUMP_CM_MOD_REG(OMAP24XX_DSP_MOD, OMAP2_CM_CLKSTCTRL);
- DUMP_PRM_MOD_REG(OMAP24XX_DSP_MOD, OMAP2_RM_RSTCTRL);
- DUMP_PRM_MOD_REG(OMAP24XX_DSP_MOD, OMAP2_RM_RSTST);
- DUMP_PRM_MOD_REG(OMAP24XX_DSP_MOD, OMAP2_PM_PWSTCTRL);
- DUMP_PRM_MOD_REG(OMAP24XX_DSP_MOD, OMAP2_PM_PWSTST);
- }
-#endif
- } else {
- DUMP_PRM_MOD_REG(CORE_MOD, PM_WKST1);
- if (cpu_is_omap24xx())
- DUMP_PRM_MOD_REG(CORE_MOD, OMAP24XX_PM_WKST2);
- DUMP_PRM_MOD_REG(WKUP_MOD, PM_WKST);
- DUMP_PRM_MOD_REG(OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET);
-#if 1
- DUMP_INTC_REG(INTC_PENDING_IRQ0, 0x0098);
- DUMP_INTC_REG(INTC_PENDING_IRQ1, 0x00b8);
- DUMP_INTC_REG(INTC_PENDING_IRQ2, 0x00d8);
-#endif
- }
-
- switch (mode) {
- case 0:
- s1 = "full";
- s2 = "retention";
- break;
- case 1:
- s1 = "MPU";
- s2 = "retention";
- break;
- case 2:
- s1 = "MPU";
- s2 = "idle";
- break;
- }
-
- if (!resume)
-#ifdef CONFIG_NO_HZ
- printk(KERN_INFO
- "--- Going to %s %s (next timer after %u ms)\n", s1, s2,
- jiffies_to_msecs(get_next_timer_interrupt(jiffies) -
- jiffies));
-#else
- printk(KERN_INFO "--- Going to %s %s\n", s1, s2);
-#endif
- else
- printk(KERN_INFO "--- Woke up (slept for %u.%03u ms)\n",
- us / 1000, us % 1000);
-
- for (i = 0; i < reg_count; i++)
- printk(KERN_INFO "%-20s: 0x%08x\n", regs[i].name, regs[i].val);
-}
-
-void omap2_pm_wakeup_on_timer(u32 seconds, u32 milliseconds)
-{
- u32 tick_rate, cycles;
-
- if (!seconds && !milliseconds)
- return;
-
- tick_rate = clk_get_rate(omap_dm_timer_get_fclk(gptimer_wakeup));
- cycles = tick_rate * seconds + tick_rate * milliseconds / 1000;
- omap_dm_timer_stop(gptimer_wakeup);
- omap_dm_timer_set_load_start(gptimer_wakeup, 0, 0xffffffff - cycles);
-
- pr_info("PM: Resume timer in %u.%03u secs"
- " (%d ticks at %d ticks/sec.)\n",
- seconds, milliseconds, cycles, tick_rate);
-}
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
#include <linux/seq_file.h>
-static void pm_dbg_regset_store(u32 *ptr);
-
-static struct dentry *pm_dbg_dir;
-
static int pm_dbg_init_done;
static int pm_dbg_init(void);
@@ -196,160 +53,6 @@ enum {
DEBUG_FILE_TIMERS,
};
-struct pm_module_def {
- char name[8]; /* Name of the module */
- short type; /* CM or PRM */
- unsigned short offset;
- int low; /* First register address on this module */
- int high; /* Last register address on this module */
-};
-
-#define MOD_CM 0
-#define MOD_PRM 1
-
-static const struct pm_module_def *pm_dbg_reg_modules;
-static const struct pm_module_def omap3_pm_reg_modules[] = {
- { "IVA2", MOD_CM, OMAP3430_IVA2_MOD, 0, 0x4c },
- { "OCP", MOD_CM, OCP_MOD, 0, 0x10 },
- { "MPU", MOD_CM, MPU_MOD, 4, 0x4c },
- { "CORE", MOD_CM, CORE_MOD, 0, 0x4c },
- { "SGX", MOD_CM, OMAP3430ES2_SGX_MOD, 0, 0x4c },
- { "WKUP", MOD_CM, WKUP_MOD, 0, 0x40 },
- { "CCR", MOD_CM, PLL_MOD, 0, 0x70 },
- { "DSS", MOD_CM, OMAP3430_DSS_MOD, 0, 0x4c },
- { "CAM", MOD_CM, OMAP3430_CAM_MOD, 0, 0x4c },
- { "PER", MOD_CM, OMAP3430_PER_MOD, 0, 0x4c },
- { "EMU", MOD_CM, OMAP3430_EMU_MOD, 0x40, 0x54 },
- { "NEON", MOD_CM, OMAP3430_NEON_MOD, 0x20, 0x48 },
- { "USB", MOD_CM, OMAP3430ES2_USBHOST_MOD, 0, 0x4c },
-
- { "IVA2", MOD_PRM, OMAP3430_IVA2_MOD, 0x50, 0xfc },
- { "OCP", MOD_PRM, OCP_MOD, 4, 0x1c },
- { "MPU", MOD_PRM, MPU_MOD, 0x58, 0xe8 },
- { "CORE", MOD_PRM, CORE_MOD, 0x58, 0xf8 },
- { "SGX", MOD_PRM, OMAP3430ES2_SGX_MOD, 0x58, 0xe8 },
- { "WKUP", MOD_PRM, WKUP_MOD, 0xa0, 0xb0 },
- { "CCR", MOD_PRM, PLL_MOD, 0x40, 0x70 },
- { "DSS", MOD_PRM, OMAP3430_DSS_MOD, 0x58, 0xe8 },
- { "CAM", MOD_PRM, OMAP3430_CAM_MOD, 0x58, 0xe8 },
- { "PER", MOD_PRM, OMAP3430_PER_MOD, 0x58, 0xe8 },
- { "EMU", MOD_PRM, OMAP3430_EMU_MOD, 0x58, 0xe4 },
- { "GLBL", MOD_PRM, OMAP3430_GR_MOD, 0x20, 0xe4 },
- { "NEON", MOD_PRM, OMAP3430_NEON_MOD, 0x58, 0xe8 },
- { "USB", MOD_PRM, OMAP3430ES2_USBHOST_MOD, 0x58, 0xe8 },
- { "", 0, 0, 0, 0 },
-};
-
-#define PM_DBG_MAX_REG_SETS 4
-
-static void *pm_dbg_reg_set[PM_DBG_MAX_REG_SETS];
-
-static int pm_dbg_get_regset_size(void)
-{
- static int regset_size;
-
- if (regset_size == 0) {
- int i = 0;
-
- while (pm_dbg_reg_modules[i].name[0] != 0) {
- regset_size += pm_dbg_reg_modules[i].high +
- 4 - pm_dbg_reg_modules[i].low;
- i++;
- }
- }
- return regset_size;
-}
-
-static int pm_dbg_show_regs(struct seq_file *s, void *unused)
-{
- int i, j;
- unsigned long val;
- int reg_set = (int)s->private;
- u32 *ptr;
- void *store = NULL;
- int regs;
- int linefeed;
-
- if (reg_set == 0) {
- store = kmalloc(pm_dbg_get_regset_size(), GFP_KERNEL);
- ptr = store;
- pm_dbg_regset_store(ptr);
- } else {
- ptr = pm_dbg_reg_set[reg_set - 1];
- }
-
- i = 0;
-
- while (pm_dbg_reg_modules[i].name[0] != 0) {
- regs = 0;
- linefeed = 0;
- if (pm_dbg_reg_modules[i].type == MOD_CM)
- seq_printf(s, "MOD: CM_%s (%08x)\n",
- pm_dbg_reg_modules[i].name,
- (u32)(OMAP3430_CM_BASE +
- pm_dbg_reg_modules[i].offset));
- else
- seq_printf(s, "MOD: PRM_%s (%08x)\n",
- pm_dbg_reg_modules[i].name,
- (u32)(OMAP3430_PRM_BASE +
- pm_dbg_reg_modules[i].offset));
-
- for (j = pm_dbg_reg_modules[i].low;
- j <= pm_dbg_reg_modules[i].high; j += 4) {
- val = *(ptr++);
- if (val != 0) {
- regs++;
- if (linefeed) {
- seq_printf(s, "\n");
- linefeed = 0;
- }
- seq_printf(s, " %02x => %08lx", j, val);
- if (regs % 4 == 0)
- linefeed = 1;
- }
- }
- seq_printf(s, "\n");
- i++;
- }
-
- if (store != NULL)
- kfree(store);
-
- return 0;
-}
-
-static void pm_dbg_regset_store(u32 *ptr)
-{
- int i, j;
- u32 val;
-
- i = 0;
-
- while (pm_dbg_reg_modules[i].name[0] != 0) {
- for (j = pm_dbg_reg_modules[i].low;
- j <= pm_dbg_reg_modules[i].high; j += 4) {
- if (pm_dbg_reg_modules[i].type == MOD_CM)
- val = omap2_cm_read_mod_reg(
- pm_dbg_reg_modules[i].offset, j);
- else
- val = omap2_prm_read_mod_reg(
- pm_dbg_reg_modules[i].offset, j);
- *(ptr++) = val;
- }
- i++;
- }
-}
-
-int pm_dbg_regset_save(int reg_set)
-{
- if (pm_dbg_reg_set[reg_set-1] == NULL)
- return -EINVAL;
-
- pm_dbg_regset_store(pm_dbg_reg_set[reg_set-1]);
-
- return 0;
-}
-
static const char pwrdm_state_names[][PWRDM_MAX_PWRSTS] = {
"OFF",
"RET",
@@ -469,11 +172,6 @@ static int pm_dbg_open(struct inode *inode, struct file *file)
};
}
-static int pm_dbg_reg_open(struct inode *inode, struct file *file)
-{
- return single_open(file, pm_dbg_show_regs, inode->i_private);
-}
-
static const struct file_operations debug_fops = {
.open = pm_dbg_open,
.read = seq_read,
@@ -481,40 +179,6 @@ static const struct file_operations debug_fops = {
.release = single_release,
};
-static const struct file_operations debug_reg_fops = {
- .open = pm_dbg_reg_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-int pm_dbg_regset_init(int reg_set)
-{
- char name[2];
-
- if (!pm_dbg_init_done)
- pm_dbg_init();
-
- if (reg_set < 1 || reg_set > PM_DBG_MAX_REG_SETS ||
- pm_dbg_reg_set[reg_set-1] != NULL)
- return -EINVAL;
-
- pm_dbg_reg_set[reg_set-1] =
- kmalloc(pm_dbg_get_regset_size(), GFP_KERNEL);
-
- if (pm_dbg_reg_set[reg_set-1] == NULL)
- return -ENOMEM;
-
- if (pm_dbg_dir != NULL) {
- sprintf(name, "%d", reg_set);
-
- (void) debugfs_create_file(name, S_IRUGO,
- pm_dbg_dir, (void *)reg_set, &debug_reg_fops);
- }
-
- return 0;
-}
-
static int pwrdm_suspend_get(void *data, u64 *val)
{
int ret = -EINVAL;
@@ -576,9 +240,6 @@ static int option_set(void *data, u64 val)
{
u32 *option = data;
- if (option == &wakeup_timer_milliseconds && val >= 1000)
- return -EINVAL;
-
*option = val;
if (option == &enable_off_mode) {
@@ -595,22 +256,13 @@ static int option_set(void *data, u64 val)
DEFINE_SIMPLE_ATTRIBUTE(pm_dbg_option_fops, option_get, option_set, "%llu\n");
-static int pm_dbg_init(void)
+static int __init pm_dbg_init(void)
{
- int i;
struct dentry *d;
- char name[2];
if (pm_dbg_init_done)
return 0;
- if (cpu_is_omap34xx())
- pm_dbg_reg_modules = omap3_pm_reg_modules;
- else {
- printk(KERN_ERR "%s: only OMAP3 supported\n", __func__);
- return -ENODEV;
- }
-
d = debugfs_create_dir("pm_debug", NULL);
if (IS_ERR(d))
return PTR_ERR(d);
@@ -622,30 +274,8 @@ static int pm_dbg_init(void)
pwrdm_for_each(pwrdms_setup, (void *)d);
- pm_dbg_dir = debugfs_create_dir("registers", d);
- if (IS_ERR(pm_dbg_dir))
- return PTR_ERR(pm_dbg_dir);
-
- (void) debugfs_create_file("current", S_IRUGO,
- pm_dbg_dir, (void *)0, &debug_reg_fops);
-
- for (i = 0; i < PM_DBG_MAX_REG_SETS; i++)
- if (pm_dbg_reg_set[i] != NULL) {
- sprintf(name, "%d", i+1);
- (void) debugfs_create_file(name, S_IRUGO,
- pm_dbg_dir, (void *)(i+1), &debug_reg_fops);
-
- }
-
(void) debugfs_create_file("enable_off_mode", S_IRUGO | S_IWUSR, d,
&enable_off_mode, &pm_dbg_option_fops);
- (void) debugfs_create_file("sleep_while_idle", S_IRUGO | S_IWUSR, d,
- &sleep_while_idle, &pm_dbg_option_fops);
- (void) debugfs_create_file("wakeup_timer_seconds", S_IRUGO | S_IWUSR, d,
- &wakeup_timer_seconds, &pm_dbg_option_fops);
- (void) debugfs_create_file("wakeup_timer_milliseconds",
- S_IRUGO | S_IWUSR, d, &wakeup_timer_milliseconds,
- &pm_dbg_option_fops);
pm_dbg_init_done = 1;
return 0;
diff --git a/arch/arm/mach-omap2/pm.h b/arch/arm/mach-omap2/pm.h
index 45bcfce77352..4e166add2f35 100644
--- a/arch/arm/mach-omap2/pm.h
+++ b/arch/arm/mach-omap2/pm.h
@@ -60,46 +60,40 @@ inline void omap3_pm_init_cpuidle(struct cpuidle_params *cpuidle_board_params)
extern int omap3_pm_get_suspend_state(struct powerdomain *pwrdm);
extern int omap3_pm_set_suspend_state(struct powerdomain *pwrdm, int state);
-extern u32 wakeup_timer_seconds;
-extern u32 wakeup_timer_milliseconds;
-extern struct omap_dm_timer *gptimer_wakeup;
-
#ifdef CONFIG_PM_DEBUG
-extern void omap2_pm_dump(int mode, int resume, unsigned int us);
-extern void omap2_pm_wakeup_on_timer(u32 seconds, u32 milliseconds);
-extern int omap2_pm_debug;
extern u32 enable_off_mode;
-extern u32 sleep_while_idle;
#else
-#define omap2_pm_dump(mode, resume, us) do {} while (0);
-#define omap2_pm_wakeup_on_timer(seconds, milliseconds) do {} while (0);
-#define omap2_pm_debug 0
#define enable_off_mode 0
-#define sleep_while_idle 0
#endif
#if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
extern void pm_dbg_update_time(struct powerdomain *pwrdm, int prev);
-extern int pm_dbg_regset_save(int reg_set);
-extern int pm_dbg_regset_init(int reg_set);
#else
#define pm_dbg_update_time(pwrdm, prev) do {} while (0);
-#define pm_dbg_regset_save(reg_set) do {} while (0);
-#define pm_dbg_regset_init(reg_set) do {} while (0);
#endif /* CONFIG_PM_DEBUG */
+/* 24xx */
extern void omap24xx_idle_loop_suspend(void);
+extern unsigned int omap24xx_idle_loop_suspend_sz;
extern void omap24xx_cpu_suspend(u32 dll_ctrl, void __iomem *sdrc_dlla_ctrl,
void __iomem *sdrc_power);
-extern void omap34xx_cpu_suspend(u32 *addr, int save_state);
-extern int save_secure_ram_context(u32 *addr);
-extern void omap3_save_scratchpad_contents(void);
+extern unsigned int omap24xx_cpu_suspend_sz;
-extern unsigned int omap24xx_idle_loop_suspend_sz;
+/* 3xxx */
+extern void omap34xx_cpu_suspend(int save_state);
+
+/* omap3_do_wfi function pointer and size, for copy to SRAM */
+extern void omap3_do_wfi(void);
+extern unsigned int omap3_do_wfi_sz;
+/* ... and its pointer from SRAM after copy */
+extern void (*omap3_do_wfi_sram)(void);
+
+/* save_secure_ram_context function pointer and size, for copy to SRAM */
+extern int save_secure_ram_context(u32 *addr);
extern unsigned int save_secure_ram_context_sz;
-extern unsigned int omap24xx_cpu_suspend_sz;
-extern unsigned int omap34xx_cpu_suspend_sz;
+
+extern void omap3_save_scratchpad_contents(void);
#define PM_RTA_ERRATUM_i608 (1 << 0)
#define PM_SDRC_WAKEUP_ERRATUM_i583 (1 << 1)
diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
index df3ded6fe194..bf089e743ed9 100644
--- a/arch/arm/mach-omap2/pm24xx.c
+++ b/arch/arm/mach-omap2/pm24xx.c
@@ -53,6 +53,8 @@
#include "powerdomain.h"
#include "clockdomain.h"
+static int omap2_pm_debug;
+
#ifdef CONFIG_SUSPEND
static suspend_state_t suspend_state = PM_SUSPEND_ON;
static inline bool is_suspending(void)
@@ -123,7 +125,6 @@ static void omap2_enter_full_retention(void)
omap2_gpio_prepare_for_idle(0);
if (omap2_pm_debug) {
- omap2_pm_dump(0, 0, 0);
getnstimeofday(&ts_preidle);
}
@@ -160,7 +161,6 @@ no_sleep:
getnstimeofday(&ts_postidle);
ts_idle = timespec_sub(ts_postidle, ts_preidle);
tmp = timespec_to_ns(&ts_idle) * NSEC_PER_USEC;
- omap2_pm_dump(0, 1, tmp);
}
omap2_gpio_resume_after_idle();
@@ -247,7 +247,6 @@ static void omap2_enter_mpu_retention(void)
}
if (omap2_pm_debug) {
- omap2_pm_dump(only_idle ? 2 : 1, 0, 0);
getnstimeofday(&ts_preidle);
}
@@ -259,7 +258,6 @@ static void omap2_enter_mpu_retention(void)
getnstimeofday(&ts_postidle);
ts_idle = timespec_sub(ts_postidle, ts_preidle);
tmp = timespec_to_ns(&ts_idle) * NSEC_PER_USEC;
- omap2_pm_dump(only_idle ? 2 : 1, 1, tmp);
}
}
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index c155c9d1c82c..7255d9bce868 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -31,6 +31,8 @@
#include <linux/console.h>
#include <trace/events/power.h>
+#include <asm/suspend.h>
+
#include <plat/sram.h>
#include "clockdomain.h"
#include "powerdomain.h"
@@ -40,8 +42,6 @@
#include <plat/gpmc.h>
#include <plat/dma.h>
-#include <asm/tlbflush.h>
-
#include "cm2xxx_3xxx.h"
#include "cm-regbits-34xx.h"
#include "prm-regbits-34xx.h"
@@ -64,11 +64,6 @@ static inline bool is_suspending(void)
}
#endif
-/* Scratchpad offsets */
-#define OMAP343X_TABLE_ADDRESS_OFFSET 0xc4
-#define OMAP343X_TABLE_VALUE_OFFSET 0xc0
-#define OMAP343X_CONTROL_REG_VALUE_OFFSET 0xc8
-
/* pm34xx errata defined in pm.h */
u16 pm34xx_errata;
@@ -83,9 +78,8 @@ struct power_state {
static LIST_HEAD(pwrst_list);
-static void (*_omap_sram_idle)(u32 *addr, int save_state);
-
static int (*_omap_save_secure_sram)(u32 *addr);
+void (*omap3_do_wfi_sram)(void);
static struct powerdomain *mpu_pwrdm, *neon_pwrdm;
static struct powerdomain *core_pwrdm, *per_pwrdm;
@@ -312,28 +306,25 @@ static irqreturn_t prcm_interrupt_handler (int irq, void *dev_id)
return IRQ_HANDLED;
}
-/* Function to restore the table entry that was modified for enabling MMU */
-static void restore_table_entry(void)
+static void omap34xx_save_context(u32 *save)
{
- void __iomem *scratchpad_address;
- u32 previous_value, control_reg_value;
- u32 *address;
+ u32 val;
- scratchpad_address = OMAP2_L4_IO_ADDRESS(OMAP343X_SCRATCHPAD);
+ /* Read Auxiliary Control Register */
+ asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (val));
+ *save++ = 1;
+ *save++ = val;
- /* Get address of entry that was modified */
- address = (u32 *)__raw_readl(scratchpad_address +
- OMAP343X_TABLE_ADDRESS_OFFSET);
- /* Get the previous value which needs to be restored */
- previous_value = __raw_readl(scratchpad_address +
- OMAP343X_TABLE_VALUE_OFFSET);
- address = __va(address);
- *address = previous_value;
- flush_tlb_all();
- control_reg_value = __raw_readl(scratchpad_address
- + OMAP343X_CONTROL_REG_VALUE_OFFSET);
- /* This will enable caches and prediction */
- set_cr(control_reg_value);
+ /* Read L2 AUX ctrl register */
+ asm("mrc p15, 1, %0, c9, c0, 2" : "=r" (val));
+ *save++ = 1;
+ *save++ = val;
+}
+
+static int omap34xx_do_sram_idle(unsigned long save_state)
+{
+ omap34xx_cpu_suspend(save_state);
+ return 0;
}
void omap_sram_idle(void)
@@ -352,9 +343,6 @@ void omap_sram_idle(void)
int core_prev_state, per_prev_state;
u32 sdrc_pwr = 0;
- if (!_omap_sram_idle)
- return;
-
pwrdm_clear_all_prev_pwrst(mpu_pwrdm);
pwrdm_clear_all_prev_pwrst(neon_pwrdm);
pwrdm_clear_all_prev_pwrst(core_pwrdm);
@@ -432,12 +420,16 @@ void omap_sram_idle(void)
sdrc_pwr = sdrc_read_reg(SDRC_POWER);
/*
- * omap3_arm_context is the location where ARM registers
- * get saved. The restore path then reads from this
- * location and restores them back.
+ * omap3_arm_context is the location where some ARM context
+ * get saved. The rest is placed on the stack, and restored
+ * from there before resuming.
*/
- _omap_sram_idle(omap3_arm_context, save_state);
- cpu_init();
+ if (save_state)
+ omap34xx_save_context(omap3_arm_context);
+ if (save_state == 1 || save_state == 3)
+ cpu_suspend(save_state, omap34xx_do_sram_idle);
+ else
+ omap34xx_do_sram_idle(save_state);
/* Restore normal SDRC POWER settings */
if (omap_rev() >= OMAP3430_REV_ES3_0 &&
@@ -445,10 +437,6 @@ void omap_sram_idle(void)
core_next_state == PWRDM_POWER_OFF)
sdrc_write_reg(sdrc_pwr, SDRC_POWER);
- /* Restore table entry modified during MMU restoration */
- if (pwrdm_read_prev_pwrst(mpu_pwrdm) == PWRDM_POWER_OFF)
- restore_table_entry();
-
/* CORE */
if (core_next_state < PWRDM_POWER_ON) {
core_prev_state = pwrdm_read_prev_pwrst(core_pwrdm);
@@ -497,8 +485,6 @@ console_still_active:
int omap3_can_sleep(void)
{
- if (!sleep_while_idle)
- return 0;
if (!omap_uart_can_sleep())
return 0;
return 1;
@@ -534,10 +520,6 @@ static int omap3_pm_suspend(void)
struct power_state *pwrst;
int state, ret = 0;
- if (wakeup_timer_seconds || wakeup_timer_milliseconds)
- omap2_pm_wakeup_on_timer(wakeup_timer_seconds,
- wakeup_timer_milliseconds);
-
/* Read current next_pwrsts */
list_for_each_entry(pwrst, &pwrst_list, node)
pwrst->saved_state = pwrdm_read_next_pwrst(pwrst->pwrdm);
@@ -852,10 +834,17 @@ static int __init clkdms_setup(struct clockdomain *clkdm, void *unused)
return 0;
}
+/*
+ * Push functions to SRAM
+ *
+ * The minimum set of functions is pushed to SRAM for execution:
+ * - omap3_do_wfi for erratum i581 WA,
+ * - save_secure_ram_context for security extensions.
+ */
void omap_push_sram_idle(void)
{
- _omap_sram_idle = omap_sram_push(omap34xx_cpu_suspend,
- omap34xx_cpu_suspend_sz);
+ omap3_do_wfi_sram = omap_sram_push(omap3_do_wfi, omap3_do_wfi_sz);
+
if (omap_type() != OMAP2_DEVICE_TYPE_GP)
_omap_save_secure_sram = omap_sram_push(save_secure_ram_context,
save_secure_ram_context_sz);
@@ -920,7 +909,6 @@ static int __init omap3_pm_init(void)
per_clkdm = clkdm_lookup("per_clkdm");
core_clkdm = clkdm_lookup("core_clkdm");
- omap_push_sram_idle();
#ifdef CONFIG_SUSPEND
suspend_set_ops(&omap_pm_ops);
#endif /* CONFIG_SUSPEND */
diff --git a/arch/arm/mach-omap2/powerdomains44xx_data.c b/arch/arm/mach-omap2/powerdomains44xx_data.c
index c4222c7036a5..3a7e678fd5f1 100644
--- a/arch/arm/mach-omap2/powerdomains44xx_data.c
+++ b/arch/arm/mach-omap2/powerdomains44xx_data.c
@@ -53,7 +53,7 @@ static struct powerdomain core_44xx_pwrdm = {
[3] = PWRSTS_ON, /* ducati_l2ram */
[4] = PWRSTS_ON, /* ducati_unicache */
},
- .flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
+ .flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
};
/* gfx_44xx_pwrdm: 3D accelerator power domain */
@@ -70,7 +70,7 @@ static struct powerdomain gfx_44xx_pwrdm = {
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* gfx_mem */
},
- .flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
+ .flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
};
/* abe_44xx_pwrdm: Audio back end power domain */
@@ -90,7 +90,7 @@ static struct powerdomain abe_44xx_pwrdm = {
[0] = PWRSTS_ON, /* aessmem */
[1] = PWRSTS_ON, /* periphmem */
},
- .flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
+ .flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
};
/* dss_44xx_pwrdm: Display subsystem power domain */
@@ -108,7 +108,7 @@ static struct powerdomain dss_44xx_pwrdm = {
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* dss_mem */
},
- .flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
+ .flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
};
/* tesla_44xx_pwrdm: Tesla processor power domain */
@@ -130,7 +130,7 @@ static struct powerdomain tesla_44xx_pwrdm = {
[1] = PWRSTS_ON, /* tesla_l1 */
[2] = PWRSTS_ON, /* tesla_l2 */
},
- .flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
+ .flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
};
/* wkup_44xx_pwrdm: Wake-up power domain */
@@ -241,7 +241,7 @@ static struct powerdomain ivahd_44xx_pwrdm = {
[2] = PWRSTS_ON, /* tcm1_mem */
[3] = PWRSTS_ON, /* tcm2_mem */
},
- .flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
+ .flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
};
/* cam_44xx_pwrdm: Camera subsystem power domain */
@@ -258,7 +258,7 @@ static struct powerdomain cam_44xx_pwrdm = {
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* cam_mem */
},
- .flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
+ .flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
};
/* l3init_44xx_pwrdm: L3 initators pheripherals power domain */
@@ -276,7 +276,7 @@ static struct powerdomain l3init_44xx_pwrdm = {
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* l3init_bank1 */
},
- .flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
+ .flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
};
/* l4per_44xx_pwrdm: Target peripherals power domain */
@@ -296,7 +296,7 @@ static struct powerdomain l4per_44xx_pwrdm = {
[0] = PWRSTS_ON, /* nonretained_bank */
[1] = PWRSTS_ON, /* retained_bank */
},
- .flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
+ .flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
};
/*
diff --git a/arch/arm/mach-omap2/prcm_mpu44xx.h b/arch/arm/mach-omap2/prcm_mpu44xx.h
index d22d1b43bccd..8a6e250f04b5 100644
--- a/arch/arm/mach-omap2/prcm_mpu44xx.h
+++ b/arch/arm/mach-omap2/prcm_mpu44xx.h
@@ -31,7 +31,6 @@
OMAP2_L4_IO_ADDRESS(OMAP4430_PRCM_MPU_BASE + (inst) + (reg))
/* PRCM_MPU instances */
-
#define OMAP4430_PRCM_MPU_OCP_SOCKET_PRCM_INST 0x0000
#define OMAP4430_PRCM_MPU_DEVICE_PRM_INST 0x0200
#define OMAP4430_PRCM_MPU_CPU0_INST 0x0400
@@ -52,46 +51,46 @@
*/
/* PRCM_MPU.OCP_SOCKET_PRCM register offsets */
-#define OMAP4_REVISION_PRCM_OFFSET 0x0000
-#define OMAP4430_REVISION_PRCM OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_OCP_SOCKET_PRCM_INST, 0x0000)
+#define OMAP4_REVISION_PRCM_OFFSET 0x0000
+#define OMAP4430_REVISION_PRCM OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_OCP_SOCKET_PRCM_INST, 0x0000)
/* PRCM_MPU.DEVICE_PRM register offsets */
-#define OMAP4_PRCM_MPU_PRM_RSTST_OFFSET 0x0000
-#define OMAP4430_PRCM_MPU_PRM_RSTST OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_DEVICE_PRM_INST, 0x0000)
-#define OMAP4_PRCM_MPU_PRM_PSCON_COUNT_OFFSET 0x0004
-#define OMAP4430_PRCM_MPU_PRM_PSCON_COUNT OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_DEVICE_PRM_INST, 0x0004)
+#define OMAP4_PRCM_MPU_PRM_RSTST_OFFSET 0x0000
+#define OMAP4430_PRCM_MPU_PRM_RSTST OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_DEVICE_PRM_INST, 0x0000)
+#define OMAP4_PRCM_MPU_PRM_PSCON_COUNT_OFFSET 0x0004
+#define OMAP4430_PRCM_MPU_PRM_PSCON_COUNT OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_DEVICE_PRM_INST, 0x0004)
/* PRCM_MPU.CPU0 register offsets */
-#define OMAP4_PM_CPU0_PWRSTCTRL_OFFSET 0x0000
-#define OMAP4430_PM_CPU0_PWRSTCTRL OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_INST, 0x0000)
-#define OMAP4_PM_CPU0_PWRSTST_OFFSET 0x0004
-#define OMAP4430_PM_CPU0_PWRSTST OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_INST, 0x0004)
-#define OMAP4_RM_CPU0_CPU0_CONTEXT_OFFSET 0x0008
-#define OMAP4430_RM_CPU0_CPU0_CONTEXT OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_INST, 0x0008)
-#define OMAP4_RM_CPU0_CPU0_RSTCTRL_OFFSET 0x000c
-#define OMAP4430_RM_CPU0_CPU0_RSTCTRL OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_INST, 0x000c)
-#define OMAP4_RM_CPU0_CPU0_RSTST_OFFSET 0x0010
-#define OMAP4430_RM_CPU0_CPU0_RSTST OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_INST, 0x0010)
-#define OMAP4_CM_CPU0_CPU0_CLKCTRL_OFFSET 0x0014
-#define OMAP4430_CM_CPU0_CPU0_CLKCTRL OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_INST, 0x0014)
-#define OMAP4_CM_CPU0_CLKSTCTRL_OFFSET 0x0018
-#define OMAP4430_CM_CPU0_CLKSTCTRL OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_INST, 0x0018)
+#define OMAP4_PM_CPU0_PWRSTCTRL_OFFSET 0x0000
+#define OMAP4430_PM_CPU0_PWRSTCTRL OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_INST, 0x0000)
+#define OMAP4_PM_CPU0_PWRSTST_OFFSET 0x0004
+#define OMAP4430_PM_CPU0_PWRSTST OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_INST, 0x0004)
+#define OMAP4_RM_CPU0_CPU0_CONTEXT_OFFSET 0x0008
+#define OMAP4430_RM_CPU0_CPU0_CONTEXT OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_INST, 0x0008)
+#define OMAP4_RM_CPU0_CPU0_RSTCTRL_OFFSET 0x000c
+#define OMAP4430_RM_CPU0_CPU0_RSTCTRL OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_INST, 0x000c)
+#define OMAP4_RM_CPU0_CPU0_RSTST_OFFSET 0x0010
+#define OMAP4430_RM_CPU0_CPU0_RSTST OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_INST, 0x0010)
+#define OMAP4_CM_CPU0_CPU0_CLKCTRL_OFFSET 0x0014
+#define OMAP4430_CM_CPU0_CPU0_CLKCTRL OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_INST, 0x0014)
+#define OMAP4_CM_CPU0_CLKSTCTRL_OFFSET 0x0018
+#define OMAP4430_CM_CPU0_CLKSTCTRL OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_INST, 0x0018)
/* PRCM_MPU.CPU1 register offsets */
-#define OMAP4_PM_CPU1_PWRSTCTRL_OFFSET 0x0000
-#define OMAP4430_PM_CPU1_PWRSTCTRL OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_INST, 0x0000)
-#define OMAP4_PM_CPU1_PWRSTST_OFFSET 0x0004
-#define OMAP4430_PM_CPU1_PWRSTST OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_INST, 0x0004)
-#define OMAP4_RM_CPU1_CPU1_CONTEXT_OFFSET 0x0008
-#define OMAP4430_RM_CPU1_CPU1_CONTEXT OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_INST, 0x0008)
-#define OMAP4_RM_CPU1_CPU1_RSTCTRL_OFFSET 0x000c
-#define OMAP4430_RM_CPU1_CPU1_RSTCTRL OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_INST, 0x000c)
-#define OMAP4_RM_CPU1_CPU1_RSTST_OFFSET 0x0010
-#define OMAP4430_RM_CPU1_CPU1_RSTST OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_INST, 0x0010)
-#define OMAP4_CM_CPU1_CPU1_CLKCTRL_OFFSET 0x0014
-#define OMAP4430_CM_CPU1_CPU1_CLKCTRL OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_INST, 0x0014)
-#define OMAP4_CM_CPU1_CLKSTCTRL_OFFSET 0x0018
-#define OMAP4430_CM_CPU1_CLKSTCTRL OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_INST, 0x0018)
+#define OMAP4_PM_CPU1_PWRSTCTRL_OFFSET 0x0000
+#define OMAP4430_PM_CPU1_PWRSTCTRL OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_INST, 0x0000)
+#define OMAP4_PM_CPU1_PWRSTST_OFFSET 0x0004
+#define OMAP4430_PM_CPU1_PWRSTST OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_INST, 0x0004)
+#define OMAP4_RM_CPU1_CPU1_CONTEXT_OFFSET 0x0008
+#define OMAP4430_RM_CPU1_CPU1_CONTEXT OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_INST, 0x0008)
+#define OMAP4_RM_CPU1_CPU1_RSTCTRL_OFFSET 0x000c
+#define OMAP4430_RM_CPU1_CPU1_RSTCTRL OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_INST, 0x000c)
+#define OMAP4_RM_CPU1_CPU1_RSTST_OFFSET 0x0010
+#define OMAP4430_RM_CPU1_CPU1_RSTST OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_INST, 0x0010)
+#define OMAP4_CM_CPU1_CPU1_CLKCTRL_OFFSET 0x0014
+#define OMAP4430_CM_CPU1_CPU1_CLKCTRL OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_INST, 0x0014)
+#define OMAP4_CM_CPU1_CLKSTCTRL_OFFSET 0x0018
+#define OMAP4430_CM_CPU1_CLKSTCTRL OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_INST, 0x0018)
/* Function prototypes */
# ifndef __ASSEMBLER__
diff --git a/arch/arm/mach-omap2/prm44xx.h b/arch/arm/mach-omap2/prm44xx.h
index 67a0d3feb3f6..6e53120fd6cb 100644
--- a/arch/arm/mach-omap2/prm44xx.h
+++ b/arch/arm/mach-omap2/prm44xx.h
@@ -31,7 +31,7 @@
#define OMAP4430_PRM_BASE 0x4a306000
#define OMAP44XX_PRM_REGADDR(inst, reg) \
- OMAP2_L4_IO_ADDRESS(OMAP4430_PRM_BASE + (inst) + (reg))
+ OMAP2_L4_IO_ADDRESS(OMAP4430_PRM_BASE + (inst) + (reg))
/* PRM instances */
@@ -46,30 +46,18 @@
#define OMAP4430_PRM_CAM_INST 0x1000
#define OMAP4430_PRM_DSS_INST 0x1100
#define OMAP4430_PRM_GFX_INST 0x1200
-#define OMAP4430_PRM_L3INIT_INST 0x1300
+#define OMAP4430_PRM_L3INIT_INST 0x1300
#define OMAP4430_PRM_L4PER_INST 0x1400
-#define OMAP4430_PRM_CEFUSE_INST 0x1600
+#define OMAP4430_PRM_CEFUSE_INST 0x1600
#define OMAP4430_PRM_WKUP_INST 0x1700
#define OMAP4430_PRM_WKUP_CM_INST 0x1800
#define OMAP4430_PRM_EMU_INST 0x1900
-#define OMAP4430_PRM_EMU_CM_INST 0x1a00
-#define OMAP4430_PRM_DEVICE_INST 0x1b00
+#define OMAP4430_PRM_EMU_CM_INST 0x1a00
+#define OMAP4430_PRM_DEVICE_INST 0x1b00
#define OMAP4430_PRM_INSTR_INST 0x1f00
/* PRM clockdomain register offsets (from instance start) */
-#define OMAP4430_PRM_MPU_MPU_CDOFFS 0x0000
-#define OMAP4430_PRM_TESLA_TESLA_CDOFFS 0x0000
-#define OMAP4430_PRM_ABE_ABE_CDOFFS 0x0000
-#define OMAP4430_PRM_CORE_CORE_CDOFFS 0x0000
-#define OMAP4430_PRM_IVAHD_IVAHD_CDOFFS 0x0000
-#define OMAP4430_PRM_CAM_CAM_CDOFFS 0x0000
-#define OMAP4430_PRM_DSS_DSS_CDOFFS 0x0000
-#define OMAP4430_PRM_GFX_GFX_CDOFFS 0x0000
-#define OMAP4430_PRM_L3INIT_L3INIT_CDOFFS 0x0000
-#define OMAP4430_PRM_L4PER_L4PER_CDOFFS 0x0000
-#define OMAP4430_PRM_CEFUSE_CEFUSE_CDOFFS 0x0000
#define OMAP4430_PRM_WKUP_CM_WKUP_CDOFFS 0x0000
-#define OMAP4430_PRM_EMU_EMU_CDOFFS 0x0000
#define OMAP4430_PRM_EMU_CM_EMU_CDOFFS 0x0000
/* OMAP4 specific register offsets */
@@ -247,8 +235,8 @@
#define OMAP4430_RM_MEMIF_DLL_H_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_INST, 0x0464)
#define OMAP4_RM_D2D_SAD2D_CONTEXT_OFFSET 0x0524
#define OMAP4430_RM_D2D_SAD2D_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_INST, 0x0524)
-#define OMAP4_RM_D2D_INSTEM_ICR_CONTEXT_OFFSET 0x052c
-#define OMAP4430_RM_D2D_INSTEM_ICR_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_INST, 0x052c)
+#define OMAP4_RM_D2D_MODEM_ICR_CONTEXT_OFFSET 0x052c
+#define OMAP4430_RM_D2D_MODEM_ICR_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_INST, 0x052c)
#define OMAP4_RM_D2D_SAD2D_FW_CONTEXT_OFFSET 0x0534
#define OMAP4430_RM_D2D_SAD2D_FW_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_INST, 0x0534)
#define OMAP4_RM_L4CFG_L4_CFG_CONTEXT_OFFSET 0x0624
@@ -713,8 +701,8 @@
#define OMAP4430_PRM_VC_VAL_BYPASS OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00a0)
#define OMAP4_PRM_VC_CFG_CHANNEL_OFFSET 0x00a4
#define OMAP4430_PRM_VC_CFG_CHANNEL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00a4)
-#define OMAP4_PRM_VC_CFG_I2C_INSTE_OFFSET 0x00a8
-#define OMAP4430_PRM_VC_CFG_I2C_INSTE OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00a8)
+#define OMAP4_PRM_VC_CFG_I2C_MODE_OFFSET 0x00a8
+#define OMAP4430_PRM_VC_CFG_I2C_MODE OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00a8)
#define OMAP4_PRM_VC_CFG_I2C_CLK_OFFSET 0x00ac
#define OMAP4430_PRM_VC_CFG_I2C_CLK OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00ac)
#define OMAP4_PRM_SRAM_COUNT_OFFSET 0x00b0
@@ -751,8 +739,8 @@
#define OMAP4430_PRM_PHASE2A_CNDP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00ec)
#define OMAP4_PRM_PHASE2B_CNDP_OFFSET 0x00f0
#define OMAP4430_PRM_PHASE2B_CNDP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00f0)
-#define OMAP4_PRM_INSTEM_IF_CTRL_OFFSET 0x00f4
-#define OMAP4430_PRM_INSTEM_IF_CTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00f4)
+#define OMAP4_PRM_MODEM_IF_CTRL_OFFSET 0x00f4
+#define OMAP4430_PRM_MODEM_IF_CTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00f4)
#define OMAP4_PRM_VC_ERRST_OFFSET 0x00f8
#define OMAP4430_PRM_VC_ERRST OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00f8)
diff --git a/arch/arm/mach-omap2/sleep34xx.S b/arch/arm/mach-omap2/sleep34xx.S
index 63f10669571a..f2ea1bd1c691 100644
--- a/arch/arm/mach-omap2/sleep34xx.S
+++ b/arch/arm/mach-omap2/sleep34xx.S
@@ -74,46 +74,6 @@
* API functions
*/
-/*
- * The "get_*restore_pointer" functions are used to provide a
- * physical restore address where the ROM code jumps while waking
- * up from MPU OFF/OSWR state.
- * The restore pointer is stored into the scratchpad.
- */
-
- .text
-/* Function call to get the restore pointer for resume from OFF */
-ENTRY(get_restore_pointer)
- stmfd sp!, {lr} @ save registers on stack
- adr r0, restore
- ldmfd sp!, {pc} @ restore regs and return
-ENDPROC(get_restore_pointer)
- .align
-ENTRY(get_restore_pointer_sz)
- .word . - get_restore_pointer
-
- .text
-/* Function call to get the restore pointer for 3630 resume from OFF */
-ENTRY(get_omap3630_restore_pointer)
- stmfd sp!, {lr} @ save registers on stack
- adr r0, restore_3630
- ldmfd sp!, {pc} @ restore regs and return
-ENDPROC(get_omap3630_restore_pointer)
- .align
-ENTRY(get_omap3630_restore_pointer_sz)
- .word . - get_omap3630_restore_pointer
-
- .text
-/* Function call to get the restore pointer for ES3 to resume from OFF */
-ENTRY(get_es3_restore_pointer)
- stmfd sp!, {lr} @ save registers on stack
- adr r0, restore_es3
- ldmfd sp!, {pc} @ restore regs and return
-ENDPROC(get_es3_restore_pointer)
- .align
-ENTRY(get_es3_restore_pointer_sz)
- .word . - get_es3_restore_pointer
-
.text
/*
* L2 cache needs to be toggled for stable OFF mode functionality on 3630.
@@ -133,7 +93,7 @@ ENDPROC(enable_omap3630_toggle_l2_on_restore)
/* Function to call rom code to save secure ram context */
.align 3
ENTRY(save_secure_ram_context)
- stmfd sp!, {r1-r12, lr} @ save registers on stack
+ stmfd sp!, {r4 - r11, lr} @ save registers on stack
adr r3, api_params @ r3 points to parameters
str r0, [r3,#0x4] @ r0 has sdram address
ldr r12, high_mask
@@ -152,7 +112,7 @@ ENTRY(save_secure_ram_context)
nop
nop
nop
- ldmfd sp!, {r1-r12, pc}
+ ldmfd sp!, {r4 - r11, pc}
.align
sram_phy_addr_mask:
.word SRAM_BASE_P
@@ -179,69 +139,38 @@ ENTRY(save_secure_ram_context_sz)
*
*
* Notes:
- * - this code gets copied to internal SRAM at boot and after wake-up
- * from OFF mode. The execution pointer in SRAM is _omap_sram_idle.
+ * - only the minimum set of functions gets copied to internal SRAM at boot
+ * and after wake-up from OFF mode, cf. omap_push_sram_idle. The function
+ * pointers in SDRAM or SRAM are called depending on the desired low power
+ * target state.
* - when the OMAP wakes up it continues at different execution points
* depending on the low power mode (non-OFF vs OFF modes),
* cf. 'Resume path for xxx mode' comments.
*/
.align 3
ENTRY(omap34xx_cpu_suspend)
- stmfd sp!, {r0-r12, lr} @ save registers on stack
+ stmfd sp!, {r4 - r11, lr} @ save registers on stack
/*
- * r0 contains CPU context save/restore pointer in sdram
- * r1 contains information about saving context:
+ * r0 contains information about saving context:
* 0 - No context lost
* 1 - Only L1 and logic lost
* 2 - Only L2 lost (Even L1 is retained we clean it along with L2)
* 3 - Both L1 and L2 lost and logic lost
*/
- /* Directly jump to WFI is the context save is not required */
- cmp r1, #0x0
- beq omap3_do_wfi
+ /*
+ * For OFF mode: save context and jump to WFI in SDRAM (omap3_do_wfi)
+ * For non-OFF modes: jump to the WFI code in SRAM (omap3_do_wfi_sram)
+ */
+ ldr r4, omap3_do_wfi_sram_addr
+ ldr r5, [r4]
+ cmp r0, #0x0 @ If no context save required,
+ bxeq r5 @ jump to the WFI code in SRAM
+
/* Otherwise fall through to the save context code */
save_context_wfi:
- mov r8, r0 @ Store SDRAM address in r8
- mrc p15, 0, r5, c1, c0, 1 @ Read Auxiliary Control Register
- mov r4, #0x1 @ Number of parameters for restore call
- stmia r8!, {r4-r5} @ Push parameters for restore call
- mrc p15, 1, r5, c9, c0, 2 @ Read L2 AUX ctrl register
- stmia r8!, {r4-r5} @ Push parameters for restore call
-
- /* Check what that target sleep state is from r1 */
- cmp r1, #0x2 @ Only L2 lost, no need to save context
- beq clean_caches
-
-l1_logic_lost:
- mov r4, sp @ Store sp
- mrs r5, spsr @ Store spsr
- mov r6, lr @ Store lr
- stmia r8!, {r4-r6}
-
- mrc p15, 0, r4, c1, c0, 2 @ Coprocessor access control register
- mrc p15, 0, r5, c2, c0, 0 @ TTBR0
- mrc p15, 0, r6, c2, c0, 1 @ TTBR1
- mrc p15, 0, r7, c2, c0, 2 @ TTBCR
- stmia r8!, {r4-r7}
-
- mrc p15, 0, r4, c3, c0, 0 @ Domain access Control Register
- mrc p15, 0, r5, c10, c2, 0 @ PRRR
- mrc p15, 0, r6, c10, c2, 1 @ NMRR
- stmia r8!,{r4-r6}
-
- mrc p15, 0, r4, c13, c0, 1 @ Context ID
- mrc p15, 0, r5, c13, c0, 2 @ User r/w thread and process ID
- mrc p15, 0, r6, c12, c0, 0 @ Secure or NS vector base address
- mrs r7, cpsr @ Store current cpsr
- stmia r8!, {r4-r7}
-
- mrc p15, 0, r4, c1, c0, 0 @ save control register
- stmia r8!, {r4}
-
-clean_caches:
/*
* jump out to kernel flush routine
* - reuse that code is better
@@ -284,7 +213,32 @@ clean_caches:
THUMB( nop )
.arm
-omap3_do_wfi:
+ b omap3_do_wfi
+
+/*
+ * Local variables
+ */
+omap3_do_wfi_sram_addr:
+ .word omap3_do_wfi_sram
+kernel_flush:
+ .word v7_flush_dcache_all
+
+/* ===================================
+ * == WFI instruction => Enter idle ==
+ * ===================================
+ */
+
+/*
+ * Do WFI instruction
+ * Includes the resume path for non-OFF modes
+ *
+ * This code gets copied to internal SRAM and is accessible
+ * from both SDRAM and SRAM:
+ * - executed from SRAM for non-off modes (omap3_do_wfi_sram),
+ * - executed from SDRAM for OFF mode (omap3_do_wfi).
+ */
+ .align 3
+ENTRY(omap3_do_wfi)
ldr r4, sdrc_power @ read the SDRC_POWER register
ldr r5, [r4] @ read the contents of SDRC_POWER
orr r5, r5, #0x40 @ enable self refresh on idle req
@@ -316,8 +270,86 @@ omap3_do_wfi:
nop
nop
nop
- bl wait_sdrc_ok
+/*
+ * This function implements the erratum ID i581 WA:
+ * SDRC state restore before accessing the SDRAM
+ *
+ * Only used at return from non-OFF mode. For OFF
+ * mode the ROM code configures the SDRC and
+ * the DPLL before calling the restore code directly
+ * from DDR.
+ */
+
+/* Make sure SDRC accesses are ok */
+wait_sdrc_ok:
+
+/* DPLL3 must be locked before accessing the SDRC. Maybe the HW ensures this */
+ ldr r4, cm_idlest_ckgen
+wait_dpll3_lock:
+ ldr r5, [r4]
+ tst r5, #1
+ beq wait_dpll3_lock
+
+ ldr r4, cm_idlest1_core
+wait_sdrc_ready:
+ ldr r5, [r4]
+ tst r5, #0x2
+ bne wait_sdrc_ready
+ /* allow DLL powerdown upon hw idle req */
+ ldr r4, sdrc_power
+ ldr r5, [r4]
+ bic r5, r5, #0x40
+ str r5, [r4]
+
+/*
+ * PC-relative stores lead to undefined behaviour in Thumb-2: use a r7 as a
+ * base instead.
+ * Be careful not to clobber r7 when maintaing this code.
+ */
+
+is_dll_in_lock_mode:
+ /* Is dll in lock mode? */
+ ldr r4, sdrc_dlla_ctrl
+ ldr r5, [r4]
+ tst r5, #0x4
+ bne exit_nonoff_modes @ Return if locked
+ /* wait till dll locks */
+ adr r7, kick_counter
+wait_dll_lock_timed:
+ ldr r4, wait_dll_lock_counter
+ add r4, r4, #1
+ str r4, [r7, #wait_dll_lock_counter - kick_counter]
+ ldr r4, sdrc_dlla_status
+ /* Wait 20uS for lock */
+ mov r6, #8
+wait_dll_lock:
+ subs r6, r6, #0x1
+ beq kick_dll
+ ldr r5, [r4]
+ and r5, r5, #0x4
+ cmp r5, #0x4
+ bne wait_dll_lock
+ b exit_nonoff_modes @ Return when locked
+
+ /* disable/reenable DLL if not locked */
+kick_dll:
+ ldr r4, sdrc_dlla_ctrl
+ ldr r5, [r4]
+ mov r6, r5
+ bic r6, #(1<<3) @ disable dll
+ str r6, [r4]
+ dsb
+ orr r6, r6, #(1<<3) @ enable dll
+ str r6, [r4]
+ dsb
+ ldr r4, kick_counter
+ add r4, r4, #1
+ str r4, [r7] @ kick_counter
+ b wait_dll_lock_timed
+
+exit_nonoff_modes:
+ /* Re-enable C-bit if needed */
mrc p15, 0, r0, c1, c0, 0
tst r0, #(1 << 2) @ Check C bit enabled?
orreq r0, r0, #(1 << 2) @ Enable the C bit if cleared
@@ -329,7 +361,32 @@ omap3_do_wfi:
* == Exit point from non-OFF modes ==
* ===================================
*/
- ldmfd sp!, {r0-r12, pc} @ restore regs and return
+ ldmfd sp!, {r4 - r11, pc} @ restore regs and return
+
+/*
+ * Local variables
+ */
+sdrc_power:
+ .word SDRC_POWER_V
+cm_idlest1_core:
+ .word CM_IDLEST1_CORE_V
+cm_idlest_ckgen:
+ .word CM_IDLEST_CKGEN_V
+sdrc_dlla_status:
+ .word SDRC_DLLA_STATUS_V
+sdrc_dlla_ctrl:
+ .word SDRC_DLLA_CTRL_V
+ /*
+ * When exporting to userspace while the counters are in SRAM,
+ * these 2 words need to be at the end to facilitate retrival!
+ */
+kick_counter:
+ .word 0
+wait_dll_lock_counter:
+ .word 0
+
+ENTRY(omap3_do_wfi_sz)
+ .word . - omap3_do_wfi
/*
@@ -346,13 +403,17 @@ omap3_do_wfi:
* restore_es3: applies to 34xx >= ES3.0
* restore_3630: applies to 36xx
* restore: common code for 3xxx
+ *
+ * Note: when back from CORE and MPU OFF mode we are running
+ * from SDRAM, without MMU, without the caches and prediction.
+ * Also the SRAM content has been cleared.
*/
-restore_es3:
+ENTRY(omap3_restore_es3)
ldr r5, pm_prepwstst_core_p
ldr r4, [r5]
and r4, r4, #0x3
cmp r4, #0x0 @ Check if previous power state of CORE is OFF
- bne restore
+ bne omap3_restore @ Fall through to OMAP3 common code
adr r0, es3_sdrc_fix
ldr r1, sram_base
ldr r2, es3_sdrc_fix_sz
@@ -364,35 +425,32 @@ copy_to_sram:
bne copy_to_sram
ldr r1, sram_base
blx r1
- b restore
+ b omap3_restore @ Fall through to OMAP3 common code
+ENDPROC(omap3_restore_es3)
-restore_3630:
+ENTRY(omap3_restore_3630)
ldr r1, pm_prepwstst_core_p
ldr r2, [r1]
and r2, r2, #0x3
cmp r2, #0x0 @ Check if previous power state of CORE is OFF
- bne restore
+ bne omap3_restore @ Fall through to OMAP3 common code
/* Disable RTA before giving control */
ldr r1, control_mem_rta
mov r2, #OMAP36XX_RTA_DISABLE
str r2, [r1]
+ENDPROC(omap3_restore_3630)
/* Fall through to common code for the remaining logic */
-restore:
+ENTRY(omap3_restore)
/*
- * Check what was the reason for mpu reset and store the reason in r9:
- * 0 - No context lost
- * 1 - Only L1 and logic lost
- * 2 - Only L2 lost - In this case, we wont be here
- * 3 - Both L1 and L2 lost
+ * Read the pwstctrl register to check the reason for mpu reset.
+ * This tells us what was lost.
*/
ldr r1, pm_pwstctrl_mpu
ldr r2, [r1]
and r2, r2, #0x3
cmp r2, #0x0 @ Check if target power state was OFF or RET
- moveq r9, #0x3 @ MPU OFF => L1 and L2 lost
- movne r9, #0x1 @ Only L1 and L2 lost => avoid L2 invalidation
bne logic_l1_restore
ldr r0, l2dis_3630
@@ -471,115 +529,39 @@ logic_l1_restore:
orr r1, r1, #2 @ re-enable L2 cache
mcr p15, 0, r1, c1, c0, 1
skipl2reen:
- mov r1, #0
- /*
- * Invalidate all instruction caches to PoU
- * and flush branch target cache
- */
- mcr p15, 0, r1, c7, c5, 0
- ldr r4, scratchpad_base
- ldr r3, [r4,#0xBC]
- adds r3, r3, #16
-
- ldmia r3!, {r4-r6}
- mov sp, r4 @ Restore sp
- msr spsr_cxsf, r5 @ Restore spsr
- mov lr, r6 @ Restore lr
-
- ldmia r3!, {r4-r7}
- mcr p15, 0, r4, c1, c0, 2 @ Coprocessor access Control Register
- mcr p15, 0, r5, c2, c0, 0 @ TTBR0
- mcr p15, 0, r6, c2, c0, 1 @ TTBR1
- mcr p15, 0, r7, c2, c0, 2 @ TTBCR
-
- ldmia r3!,{r4-r6}
- mcr p15, 0, r4, c3, c0, 0 @ Domain access Control Register
- mcr p15, 0, r5, c10, c2, 0 @ PRRR
- mcr p15, 0, r6, c10, c2, 1 @ NMRR
-
-
- ldmia r3!,{r4-r7}
- mcr p15, 0, r4, c13, c0, 1 @ Context ID
- mcr p15, 0, r5, c13, c0, 2 @ User r/w thread and process ID
- mrc p15, 0, r6, c12, c0, 0 @ Secure or NS vector base address
- msr cpsr, r7 @ store cpsr
-
- /* Enabling MMU here */
- mrc p15, 0, r7, c2, c0, 2 @ Read TTBRControl
- /* Extract N (0:2) bits and decide whether to use TTBR0 or TTBR1 */
- and r7, #0x7
- cmp r7, #0x0
- beq usettbr0
-ttbr_error:
- /*
- * More work needs to be done to support N[0:2] value other than 0
- * So looping here so that the error can be detected
- */
- b ttbr_error
-usettbr0:
- mrc p15, 0, r2, c2, c0, 0
- ldr r5, ttbrbit_mask
- and r2, r5
- mov r4, pc
- ldr r5, table_index_mask
- and r4, r5 @ r4 = 31 to 20 bits of pc
- /* Extract the value to be written to table entry */
- ldr r1, table_entry
- /* r1 has the value to be written to table entry*/
- add r1, r1, r4
- /* Getting the address of table entry to modify */
- lsr r4, #18
- /* r2 has the location which needs to be modified */
- add r2, r4
- /* Storing previous entry of location being modified */
- ldr r5, scratchpad_base
- ldr r4, [r2]
- str r4, [r5, #0xC0]
- /* Modify the table entry */
- str r1, [r2]
- /*
- * Storing address of entry being modified
- * - will be restored after enabling MMU
- */
- ldr r5, scratchpad_base
- str r2, [r5, #0xC4]
-
- mov r0, #0
- mcr p15, 0, r0, c7, c5, 4 @ Flush prefetch buffer
- mcr p15, 0, r0, c7, c5, 6 @ Invalidate branch predictor array
- mcr p15, 0, r0, c8, c5, 0 @ Invalidate instruction TLB
- mcr p15, 0, r0, c8, c6, 0 @ Invalidate data TLB
- /*
- * Restore control register. This enables the MMU.
- * The caches and prediction are not enabled here, they
- * will be enabled after restoring the MMU table entry.
- */
- ldmia r3!, {r4}
- /* Store previous value of control register in scratchpad */
- str r4, [r5, #0xC8]
- ldr r2, cache_pred_disable_mask
- and r4, r2
- mcr p15, 0, r4, c1, c0, 0
- dsb
- isb
- ldr r0, =restoremmu_on
- bx r0
+ /* Now branch to the common CPU resume function */
+ b cpu_resume
+ENDPROC(omap3_restore)
+
+ .ltorg
/*
- * ==============================
- * == Exit point from OFF mode ==
- * ==============================
+ * Local variables
*/
-restoremmu_on:
- ldmfd sp!, {r0-r12, pc} @ restore regs and return
-
+pm_prepwstst_core_p:
+ .word PM_PREPWSTST_CORE_P
+pm_pwstctrl_mpu:
+ .word PM_PWSTCTRL_MPU_P
+scratchpad_base:
+ .word SCRATCHPAD_BASE_P
+sram_base:
+ .word SRAM_BASE_P + 0x8000
+control_stat:
+ .word CONTROL_STAT
+control_mem_rta:
+ .word CONTROL_MEM_RTA_CTRL
+l2dis_3630:
+ .word 0
/*
* Internal functions
*/
-/* This function implements the erratum ID i443 WA, applies to 34xx >= ES3.0 */
+/*
+ * This function implements the erratum ID i443 WA, applies to 34xx >= ES3.0
+ * Copied to and run from SRAM in order to reconfigure the SDRC parameters.
+ */
.text
.align 3
ENTRY(es3_sdrc_fix)
@@ -609,6 +591,9 @@ ENTRY(es3_sdrc_fix)
str r5, [r4] @ kick off refreshes
bx lr
+/*
+ * Local variables
+ */
.align
sdrc_syscfg:
.word SDRC_SYSCONFIG_P
@@ -627,128 +612,3 @@ sdrc_manual_1:
ENDPROC(es3_sdrc_fix)
ENTRY(es3_sdrc_fix_sz)
.word . - es3_sdrc_fix
-
-/*
- * This function implements the erratum ID i581 WA:
- * SDRC state restore before accessing the SDRAM
- *
- * Only used at return from non-OFF mode. For OFF
- * mode the ROM code configures the SDRC and
- * the DPLL before calling the restore code directly
- * from DDR.
- */
-
-/* Make sure SDRC accesses are ok */
-wait_sdrc_ok:
-
-/* DPLL3 must be locked before accessing the SDRC. Maybe the HW ensures this */
- ldr r4, cm_idlest_ckgen
-wait_dpll3_lock:
- ldr r5, [r4]
- tst r5, #1
- beq wait_dpll3_lock
-
- ldr r4, cm_idlest1_core
-wait_sdrc_ready:
- ldr r5, [r4]
- tst r5, #0x2
- bne wait_sdrc_ready
- /* allow DLL powerdown upon hw idle req */
- ldr r4, sdrc_power
- ldr r5, [r4]
- bic r5, r5, #0x40
- str r5, [r4]
-
-/*
- * PC-relative stores lead to undefined behaviour in Thumb-2: use a r7 as a
- * base instead.
- * Be careful not to clobber r7 when maintaing this code.
- */
-
-is_dll_in_lock_mode:
- /* Is dll in lock mode? */
- ldr r4, sdrc_dlla_ctrl
- ldr r5, [r4]
- tst r5, #0x4
- bxne lr @ Return if locked
- /* wait till dll locks */
- adr r7, kick_counter
-wait_dll_lock_timed:
- ldr r4, wait_dll_lock_counter
- add r4, r4, #1
- str r4, [r7, #wait_dll_lock_counter - kick_counter]
- ldr r4, sdrc_dlla_status
- /* Wait 20uS for lock */
- mov r6, #8
-wait_dll_lock:
- subs r6, r6, #0x1
- beq kick_dll
- ldr r5, [r4]
- and r5, r5, #0x4
- cmp r5, #0x4
- bne wait_dll_lock
- bx lr @ Return when locked
-
- /* disable/reenable DLL if not locked */
-kick_dll:
- ldr r4, sdrc_dlla_ctrl
- ldr r5, [r4]
- mov r6, r5
- bic r6, #(1<<3) @ disable dll
- str r6, [r4]
- dsb
- orr r6, r6, #(1<<3) @ enable dll
- str r6, [r4]
- dsb
- ldr r4, kick_counter
- add r4, r4, #1
- str r4, [r7] @ kick_counter
- b wait_dll_lock_timed
-
- .align
-cm_idlest1_core:
- .word CM_IDLEST1_CORE_V
-cm_idlest_ckgen:
- .word CM_IDLEST_CKGEN_V
-sdrc_dlla_status:
- .word SDRC_DLLA_STATUS_V
-sdrc_dlla_ctrl:
- .word SDRC_DLLA_CTRL_V
-pm_prepwstst_core_p:
- .word PM_PREPWSTST_CORE_P
-pm_pwstctrl_mpu:
- .word PM_PWSTCTRL_MPU_P
-scratchpad_base:
- .word SCRATCHPAD_BASE_P
-sram_base:
- .word SRAM_BASE_P + 0x8000
-sdrc_power:
- .word SDRC_POWER_V
-ttbrbit_mask:
- .word 0xFFFFC000
-table_index_mask:
- .word 0xFFF00000
-table_entry:
- .word 0x00000C02
-cache_pred_disable_mask:
- .word 0xFFFFE7FB
-control_stat:
- .word CONTROL_STAT
-control_mem_rta:
- .word CONTROL_MEM_RTA_CTRL
-kernel_flush:
- .word v7_flush_dcache_all
-l2dis_3630:
- .word 0
- /*
- * When exporting to userspace while the counters are in SRAM,
- * these 2 words need to be at the end to facilitate retrival!
- */
-kick_counter:
- .word 0
-wait_dll_lock_counter:
- .word 0
-ENDPROC(omap34xx_cpu_suspend)
-
-ENTRY(omap34xx_cpu_suspend_sz)
- .word . - omap34xx_cpu_suspend
diff --git a/arch/arm/mach-omap2/smartreflex.c b/arch/arm/mach-omap2/smartreflex.c
index fb7dc52394a8..2ce2fb7664bc 100644
--- a/arch/arm/mach-omap2/smartreflex.c
+++ b/arch/arm/mach-omap2/smartreflex.c
@@ -143,7 +143,7 @@ static irqreturn_t sr_interrupt(int irq, void *data)
sr_write_reg(sr_info, IRQSTATUS, status);
}
- if (sr_class->class_type == SR_CLASS2 && sr_class->notify)
+ if (sr_class->notify)
sr_class->notify(sr_info->voltdm, status);
return IRQ_HANDLED;
@@ -258,9 +258,7 @@ static int sr_late_init(struct omap_sr *sr_info)
struct resource *mem;
int ret = 0;
- if (sr_class->class_type == SR_CLASS2 &&
- sr_class->notify_flags && sr_info->irq) {
-
+ if (sr_class->notify && sr_class->notify_flags && sr_info->irq) {
name = kasprintf(GFP_KERNEL, "sr_%s", sr_info->voltdm->name);
if (name == NULL) {
ret = -ENOMEM;
@@ -270,6 +268,7 @@ static int sr_late_init(struct omap_sr *sr_info)
0, name, (void *)sr_info);
if (ret)
goto error;
+ disable_irq(sr_info->irq);
}
if (pdata && pdata->enable_on_init)
@@ -278,16 +277,16 @@ static int sr_late_init(struct omap_sr *sr_info)
return ret;
error:
- iounmap(sr_info->base);
- mem = platform_get_resource(sr_info->pdev, IORESOURCE_MEM, 0);
- release_mem_region(mem->start, resource_size(mem));
- list_del(&sr_info->node);
- dev_err(&sr_info->pdev->dev, "%s: ERROR in registering"
- "interrupt handler. Smartreflex will"
- "not function as desired\n", __func__);
- kfree(name);
- kfree(sr_info);
- return ret;
+ iounmap(sr_info->base);
+ mem = platform_get_resource(sr_info->pdev, IORESOURCE_MEM, 0);
+ release_mem_region(mem->start, resource_size(mem));
+ list_del(&sr_info->node);
+ dev_err(&sr_info->pdev->dev, "%s: ERROR in registering"
+ "interrupt handler. Smartreflex will"
+ "not function as desired\n", __func__);
+ kfree(name);
+ kfree(sr_info);
+ return ret;
}
static void sr_v1_disable(struct omap_sr *sr)
@@ -808,10 +807,13 @@ static int omap_sr_autocomp_store(void *data, u64 val)
return -EINVAL;
}
- if (!val)
- sr_stop_vddautocomp(sr_info);
- else
- sr_start_vddautocomp(sr_info);
+ /* control enable/disable only if there is a delta in value */
+ if (sr_info->autocomp_active != val) {
+ if (!val)
+ sr_stop_vddautocomp(sr_info);
+ else
+ sr_start_vddautocomp(sr_info);
+ }
return 0;
}
diff --git a/arch/arm/mach-omap2/timer-gp.c b/arch/arm/mach-omap2/timer-gp.c
deleted file mode 100644
index 3b9cf85f4bb9..000000000000
--- a/arch/arm/mach-omap2/timer-gp.c
+++ /dev/null
@@ -1,266 +0,0 @@
-/*
- * linux/arch/arm/mach-omap2/timer-gp.c
- *
- * OMAP2 GP timer support.
- *
- * Copyright (C) 2009 Nokia Corporation
- *
- * Update to use new clocksource/clockevent layers
- * Author: Kevin Hilman, MontaVista Software, Inc. <source@mvista.com>
- * Copyright (C) 2007 MontaVista Software, Inc.
- *
- * Original driver:
- * Copyright (C) 2005 Nokia Corporation
- * Author: Paul Mundt <paul.mundt@nokia.com>
- * Juha Yrjölä <juha.yrjola@nokia.com>
- * OMAP Dual-mode timer framework support by Timo Teras
- *
- * Some parts based off of TI's 24xx code:
- *
- * Copyright (C) 2004-2009 Texas Instruments, Inc.
- *
- * Roughly modelled after the OMAP1 MPU timer code.
- * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/init.h>
-#include <linux/time.h>
-#include <linux/interrupt.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/irq.h>
-#include <linux/clocksource.h>
-#include <linux/clockchips.h>
-
-#include <asm/mach/time.h>
-#include <plat/dmtimer.h>
-#include <asm/localtimer.h>
-#include <asm/sched_clock.h>
-#include <plat/common.h>
-#include <plat/omap_hwmod.h>
-
-#include "timer-gp.h"
-
-
-/* MAX_GPTIMER_ID: number of GPTIMERs on the chip */
-#define MAX_GPTIMER_ID 12
-
-static struct omap_dm_timer *gptimer;
-static struct clock_event_device clockevent_gpt;
-static u8 __initdata gptimer_id = 1;
-static u8 __initdata inited;
-struct omap_dm_timer *gptimer_wakeup;
-
-static irqreturn_t omap2_gp_timer_interrupt(int irq, void *dev_id)
-{
- struct omap_dm_timer *gpt = (struct omap_dm_timer *)dev_id;
- struct clock_event_device *evt = &clockevent_gpt;
-
- omap_dm_timer_write_status(gpt, OMAP_TIMER_INT_OVERFLOW);
-
- evt->event_handler(evt);
- return IRQ_HANDLED;
-}
-
-static struct irqaction omap2_gp_timer_irq = {
- .name = "gp timer",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
- .handler = omap2_gp_timer_interrupt,
-};
-
-static int omap2_gp_timer_set_next_event(unsigned long cycles,
- struct clock_event_device *evt)
-{
- omap_dm_timer_set_load_start(gptimer, 0, 0xffffffff - cycles);
-
- return 0;
-}
-
-static void omap2_gp_timer_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
-{
- u32 period;
-
- omap_dm_timer_stop(gptimer);
-
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- period = clk_get_rate(omap_dm_timer_get_fclk(gptimer)) / HZ;
- period -= 1;
- omap_dm_timer_set_load_start(gptimer, 1, 0xffffffff - period);
- break;
- case CLOCK_EVT_MODE_ONESHOT:
- break;
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_SHUTDOWN:
- case CLOCK_EVT_MODE_RESUME:
- break;
- }
-}
-
-static struct clock_event_device clockevent_gpt = {
- .name = "gp timer",
- .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
- .shift = 32,
- .set_next_event = omap2_gp_timer_set_next_event,
- .set_mode = omap2_gp_timer_set_mode,
-};
-
-/**
- * omap2_gp_clockevent_set_gptimer - set which GPTIMER is used for clockevents
- * @id: GPTIMER to use (1..MAX_GPTIMER_ID)
- *
- * Define the GPTIMER that the system should use for the tick timer.
- * Meant to be called from board-*.c files in the event that GPTIMER1, the
- * default, is unsuitable. Returns -EINVAL on error or 0 on success.
- */
-int __init omap2_gp_clockevent_set_gptimer(u8 id)
-{
- if (id < 1 || id > MAX_GPTIMER_ID)
- return -EINVAL;
-
- BUG_ON(inited);
-
- gptimer_id = id;
-
- return 0;
-}
-
-static void __init omap2_gp_clockevent_init(void)
-{
- u32 tick_rate;
- int src;
- char clockevent_hwmod_name[8]; /* 8 = sizeof("timerXX0") */
-
- inited = 1;
-
- sprintf(clockevent_hwmod_name, "timer%d", gptimer_id);
- omap_hwmod_setup_one(clockevent_hwmod_name);
-
- gptimer = omap_dm_timer_request_specific(gptimer_id);
- BUG_ON(gptimer == NULL);
- gptimer_wakeup = gptimer;
-
-#if defined(CONFIG_OMAP_32K_TIMER)
- src = OMAP_TIMER_SRC_32_KHZ;
-#else
- src = OMAP_TIMER_SRC_SYS_CLK;
- WARN(gptimer_id == 12, "WARNING: GPTIMER12 can only use the "
- "secure 32KiHz clock source\n");
-#endif
-
- if (gptimer_id != 12)
- WARN(IS_ERR_VALUE(omap_dm_timer_set_source(gptimer, src)),
- "timer-gp: omap_dm_timer_set_source() failed\n");
-
- tick_rate = clk_get_rate(omap_dm_timer_get_fclk(gptimer));
-
- pr_info("OMAP clockevent source: GPTIMER%d at %u Hz\n",
- gptimer_id, tick_rate);
-
- omap2_gp_timer_irq.dev_id = (void *)gptimer;
- setup_irq(omap_dm_timer_get_irq(gptimer), &omap2_gp_timer_irq);
- omap_dm_timer_set_int_enable(gptimer, OMAP_TIMER_INT_OVERFLOW);
-
- clockevent_gpt.mult = div_sc(tick_rate, NSEC_PER_SEC,
- clockevent_gpt.shift);
- clockevent_gpt.max_delta_ns =
- clockevent_delta2ns(0xffffffff, &clockevent_gpt);
- clockevent_gpt.min_delta_ns =
- clockevent_delta2ns(3, &clockevent_gpt);
- /* Timer internal resynch latency. */
-
- clockevent_gpt.cpumask = cpumask_of(0);
- clockevents_register_device(&clockevent_gpt);
-}
-
-/* Clocksource code */
-
-#ifdef CONFIG_OMAP_32K_TIMER
-/*
- * When 32k-timer is enabled, don't use GPTimer for clocksource
- * instead, just leave default clocksource which uses the 32k
- * sync counter. See clocksource setup in plat-omap/counter_32k.c
- */
-
-static void __init omap2_gp_clocksource_init(void)
-{
- omap_init_clocksource_32k();
-}
-
-#else
-/*
- * clocksource
- */
-static DEFINE_CLOCK_DATA(cd);
-static struct omap_dm_timer *gpt_clocksource;
-static cycle_t clocksource_read_cycles(struct clocksource *cs)
-{
- return (cycle_t)omap_dm_timer_read_counter(gpt_clocksource);
-}
-
-static struct clocksource clocksource_gpt = {
- .name = "gp timer",
- .rating = 300,
- .read = clocksource_read_cycles,
- .mask = CLOCKSOURCE_MASK(32),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
-};
-
-static void notrace dmtimer_update_sched_clock(void)
-{
- u32 cyc;
-
- cyc = omap_dm_timer_read_counter(gpt_clocksource);
-
- update_sched_clock(&cd, cyc, (u32)~0);
-}
-
-/* Setup free-running counter for clocksource */
-static void __init omap2_gp_clocksource_init(void)
-{
- static struct omap_dm_timer *gpt;
- u32 tick_rate;
- static char err1[] __initdata = KERN_ERR
- "%s: failed to request dm-timer\n";
- static char err2[] __initdata = KERN_ERR
- "%s: can't register clocksource!\n";
-
- gpt = omap_dm_timer_request();
- if (!gpt)
- printk(err1, clocksource_gpt.name);
- gpt_clocksource = gpt;
-
- omap_dm_timer_set_source(gpt, OMAP_TIMER_SRC_SYS_CLK);
- tick_rate = clk_get_rate(omap_dm_timer_get_fclk(gpt));
-
- omap_dm_timer_set_load_start(gpt, 1, 0);
-
- init_sched_clock(&cd, dmtimer_update_sched_clock, 32, tick_rate);
-
- if (clocksource_register_hz(&clocksource_gpt, tick_rate))
- printk(err2, clocksource_gpt.name);
-}
-#endif
-
-static void __init omap2_gp_timer_init(void)
-{
-#ifdef CONFIG_LOCAL_TIMERS
- if (cpu_is_omap44xx()) {
- twd_base = ioremap(OMAP44XX_LOCAL_TWD_BASE, SZ_256);
- BUG_ON(!twd_base);
- }
-#endif
- omap_dm_timer_init();
-
- omap2_gp_clockevent_init();
- omap2_gp_clocksource_init();
-}
-
-struct sys_timer omap_timer = {
- .init = omap2_gp_timer_init,
-};
diff --git a/arch/arm/mach-omap2/timer-gp.h b/arch/arm/mach-omap2/timer-gp.h
deleted file mode 100644
index 5c1072c6783b..000000000000
--- a/arch/arm/mach-omap2/timer-gp.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * OMAP2/3 GPTIMER support.headers
- *
- * Copyright (C) 2009 Nokia Corporation
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#ifndef __ARCH_ARM_PLAT_OMAP_INCLUDE_MACH_TIMER_GP_H
-#define __ARCH_ARM_PLAT_OMAP_INCLUDE_MACH_TIMER_GP_H
-
-extern int __init omap2_gp_clockevent_set_gptimer(u8 id);
-
-#endif
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
new file mode 100644
index 000000000000..e9640728239b
--- /dev/null
+++ b/arch/arm/mach-omap2/timer.c
@@ -0,0 +1,342 @@
+/*
+ * linux/arch/arm/mach-omap2/timer.c
+ *
+ * OMAP2 GP timer support.
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ *
+ * Update to use new clocksource/clockevent layers
+ * Author: Kevin Hilman, MontaVista Software, Inc. <source@mvista.com>
+ * Copyright (C) 2007 MontaVista Software, Inc.
+ *
+ * Original driver:
+ * Copyright (C) 2005 Nokia Corporation
+ * Author: Paul Mundt <paul.mundt@nokia.com>
+ * Juha Yrjölä <juha.yrjola@nokia.com>
+ * OMAP Dual-mode timer framework support by Timo Teras
+ *
+ * Some parts based off of TI's 24xx code:
+ *
+ * Copyright (C) 2004-2009 Texas Instruments, Inc.
+ *
+ * Roughly modelled after the OMAP1 MPU timer code.
+ * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/time.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+
+#include <asm/mach/time.h>
+#include <plat/dmtimer.h>
+#include <asm/localtimer.h>
+#include <asm/sched_clock.h>
+#include <plat/common.h>
+#include <plat/omap_hwmod.h>
+
+/* Parent clocks, eventually these will come from the clock framework */
+
+#define OMAP2_MPU_SOURCE "sys_ck"
+#define OMAP3_MPU_SOURCE OMAP2_MPU_SOURCE
+#define OMAP4_MPU_SOURCE "sys_clkin_ck"
+#define OMAP2_32K_SOURCE "func_32k_ck"
+#define OMAP3_32K_SOURCE "omap_32k_fck"
+#define OMAP4_32K_SOURCE "sys_32k_ck"
+
+#ifdef CONFIG_OMAP_32K_TIMER
+#define OMAP2_CLKEV_SOURCE OMAP2_32K_SOURCE
+#define OMAP3_CLKEV_SOURCE OMAP3_32K_SOURCE
+#define OMAP4_CLKEV_SOURCE OMAP4_32K_SOURCE
+#define OMAP3_SECURE_TIMER 12
+#else
+#define OMAP2_CLKEV_SOURCE OMAP2_MPU_SOURCE
+#define OMAP3_CLKEV_SOURCE OMAP3_MPU_SOURCE
+#define OMAP4_CLKEV_SOURCE OMAP4_MPU_SOURCE
+#define OMAP3_SECURE_TIMER 1
+#endif
+
+/* MAX_GPTIMER_ID: number of GPTIMERs on the chip */
+#define MAX_GPTIMER_ID 12
+
+u32 sys_timer_reserved;
+
+/* Clockevent code */
+
+static struct omap_dm_timer clkev;
+static struct clock_event_device clockevent_gpt;
+
+static irqreturn_t omap2_gp_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = &clockevent_gpt;
+
+ __omap_dm_timer_write_status(clkev.io_base, OMAP_TIMER_INT_OVERFLOW);
+
+ evt->event_handler(evt);
+ return IRQ_HANDLED;
+}
+
+static struct irqaction omap2_gp_timer_irq = {
+ .name = "gp timer",
+ .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .handler = omap2_gp_timer_interrupt,
+};
+
+static int omap2_gp_timer_set_next_event(unsigned long cycles,
+ struct clock_event_device *evt)
+{
+ __omap_dm_timer_load_start(clkev.io_base, OMAP_TIMER_CTRL_ST,
+ 0xffffffff - cycles, 1);
+
+ return 0;
+}
+
+static void omap2_gp_timer_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ u32 period;
+
+ __omap_dm_timer_stop(clkev.io_base, 1, clkev.rate);
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ period = clkev.rate / HZ;
+ period -= 1;
+ /* Looks like we need to first set the load value separately */
+ __omap_dm_timer_write(clkev.io_base, OMAP_TIMER_LOAD_REG,
+ 0xffffffff - period, 1);
+ __omap_dm_timer_load_start(clkev.io_base,
+ OMAP_TIMER_CTRL_AR | OMAP_TIMER_CTRL_ST,
+ 0xffffffff - period, 1);
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ break;
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ case CLOCK_EVT_MODE_RESUME:
+ break;
+ }
+}
+
+static struct clock_event_device clockevent_gpt = {
+ .name = "gp timer",
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+ .shift = 32,
+ .set_next_event = omap2_gp_timer_set_next_event,
+ .set_mode = omap2_gp_timer_set_mode,
+};
+
+static int __init omap_dm_timer_init_one(struct omap_dm_timer *timer,
+ int gptimer_id,
+ const char *fck_source)
+{
+ char name[10]; /* 10 = sizeof("gptXX_Xck0") */
+ struct omap_hwmod *oh;
+ size_t size;
+ int res = 0;
+
+ sprintf(name, "timer%d", gptimer_id);
+ omap_hwmod_setup_one(name);
+ oh = omap_hwmod_lookup(name);
+ if (!oh)
+ return -ENODEV;
+
+ timer->irq = oh->mpu_irqs[0].irq;
+ timer->phys_base = oh->slaves[0]->addr->pa_start;
+ size = oh->slaves[0]->addr->pa_end - timer->phys_base;
+
+ /* Static mapping, never released */
+ timer->io_base = ioremap(timer->phys_base, size);
+ if (!timer->io_base)
+ return -ENXIO;
+
+ /* After the dmtimer is using hwmod these clocks won't be needed */
+ sprintf(name, "gpt%d_fck", gptimer_id);
+ timer->fclk = clk_get(NULL, name);
+ if (IS_ERR(timer->fclk))
+ return -ENODEV;
+
+ sprintf(name, "gpt%d_ick", gptimer_id);
+ timer->iclk = clk_get(NULL, name);
+ if (IS_ERR(timer->iclk)) {
+ clk_put(timer->fclk);
+ return -ENODEV;
+ }
+
+ omap_hwmod_enable(oh);
+
+ sys_timer_reserved |= (1 << (gptimer_id - 1));
+
+ if (gptimer_id != 12) {
+ struct clk *src;
+
+ src = clk_get(NULL, fck_source);
+ if (IS_ERR(src)) {
+ res = -EINVAL;
+ } else {
+ res = __omap_dm_timer_set_source(timer->fclk, src);
+ if (IS_ERR_VALUE(res))
+ pr_warning("%s: timer%i cannot set source\n",
+ __func__, gptimer_id);
+ clk_put(src);
+ }
+ }
+ __omap_dm_timer_reset(timer->io_base, 1, 1);
+ timer->posted = 1;
+
+ timer->rate = clk_get_rate(timer->fclk);
+
+ timer->reserved = 1;
+
+ return res;
+}
+
+static void __init omap2_gp_clockevent_init(int gptimer_id,
+ const char *fck_source)
+{
+ int res;
+
+ res = omap_dm_timer_init_one(&clkev, gptimer_id, fck_source);
+ BUG_ON(res);
+
+ omap2_gp_timer_irq.dev_id = (void *)&clkev;
+ setup_irq(clkev.irq, &omap2_gp_timer_irq);
+
+ __omap_dm_timer_int_enable(clkev.io_base, OMAP_TIMER_INT_OVERFLOW);
+
+ clockevent_gpt.mult = div_sc(clkev.rate, NSEC_PER_SEC,
+ clockevent_gpt.shift);
+ clockevent_gpt.max_delta_ns =
+ clockevent_delta2ns(0xffffffff, &clockevent_gpt);
+ clockevent_gpt.min_delta_ns =
+ clockevent_delta2ns(3, &clockevent_gpt);
+ /* Timer internal resynch latency. */
+
+ clockevent_gpt.cpumask = cpumask_of(0);
+ clockevents_register_device(&clockevent_gpt);
+
+ pr_info("OMAP clockevent source: GPTIMER%d at %lu Hz\n",
+ gptimer_id, clkev.rate);
+}
+
+/* Clocksource code */
+
+#ifdef CONFIG_OMAP_32K_TIMER
+/*
+ * When 32k-timer is enabled, don't use GPTimer for clocksource
+ * instead, just leave default clocksource which uses the 32k
+ * sync counter. See clocksource setup in plat-omap/counter_32k.c
+ */
+
+static void __init omap2_gp_clocksource_init(int unused, const char *dummy)
+{
+ omap_init_clocksource_32k();
+}
+
+#else
+
+static struct omap_dm_timer clksrc;
+
+/*
+ * clocksource
+ */
+static DEFINE_CLOCK_DATA(cd);
+static cycle_t clocksource_read_cycles(struct clocksource *cs)
+{
+ return (cycle_t)__omap_dm_timer_read_counter(clksrc.io_base, 1);
+}
+
+static struct clocksource clocksource_gpt = {
+ .name = "gp timer",
+ .rating = 300,
+ .read = clocksource_read_cycles,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static void notrace dmtimer_update_sched_clock(void)
+{
+ u32 cyc;
+
+ cyc = __omap_dm_timer_read_counter(clksrc.io_base, 1);
+
+ update_sched_clock(&cd, cyc, (u32)~0);
+}
+
+unsigned long long notrace sched_clock(void)
+{
+ u32 cyc = 0;
+
+ if (clksrc.reserved)
+ cyc = __omap_dm_timer_read_counter(clksrc.io_base, 1);
+
+ return cyc_to_sched_clock(&cd, cyc, (u32)~0);
+}
+
+/* Setup free-running counter for clocksource */
+static void __init omap2_gp_clocksource_init(int gptimer_id,
+ const char *fck_source)
+{
+ int res;
+
+ res = omap_dm_timer_init_one(&clksrc, gptimer_id, fck_source);
+ BUG_ON(res);
+
+ pr_info("OMAP clocksource: GPTIMER%d at %lu Hz\n",
+ gptimer_id, clksrc.rate);
+
+ __omap_dm_timer_load_start(clksrc.io_base, OMAP_TIMER_CTRL_ST, 0, 1);
+ init_sched_clock(&cd, dmtimer_update_sched_clock, 32, clksrc.rate);
+
+ if (clocksource_register_hz(&clocksource_gpt, clksrc.rate))
+ pr_err("Could not register clocksource %s\n",
+ clocksource_gpt.name);
+}
+#endif
+
+#define OMAP_SYS_TIMER_INIT(name, clkev_nr, clkev_src, \
+ clksrc_nr, clksrc_src) \
+static void __init omap##name##_timer_init(void) \
+{ \
+ omap2_gp_clockevent_init((clkev_nr), clkev_src); \
+ omap2_gp_clocksource_init((clksrc_nr), clksrc_src); \
+}
+
+#define OMAP_SYS_TIMER(name) \
+struct sys_timer omap##name##_timer = { \
+ .init = omap##name##_timer_init, \
+};
+
+#ifdef CONFIG_ARCH_OMAP2
+OMAP_SYS_TIMER_INIT(2, 1, OMAP2_CLKEV_SOURCE, 2, OMAP2_MPU_SOURCE)
+OMAP_SYS_TIMER(2)
+#endif
+
+#ifdef CONFIG_ARCH_OMAP3
+OMAP_SYS_TIMER_INIT(3, 1, OMAP3_CLKEV_SOURCE, 2, OMAP3_MPU_SOURCE)
+OMAP_SYS_TIMER(3)
+OMAP_SYS_TIMER_INIT(3_secure, OMAP3_SECURE_TIMER, OMAP3_CLKEV_SOURCE,
+ 2, OMAP3_MPU_SOURCE)
+OMAP_SYS_TIMER(3_secure)
+#endif
+
+#ifdef CONFIG_ARCH_OMAP4
+static void __init omap4_timer_init(void)
+{
+#ifdef CONFIG_LOCAL_TIMERS
+ twd_base = ioremap(OMAP44XX_LOCAL_TWD_BASE, SZ_256);
+ BUG_ON(!twd_base);
+#endif
+ omap2_gp_clockevent_init(1, OMAP4_CLKEV_SOURCE);
+ omap2_gp_clocksource_init(2, OMAP4_MPU_SOURCE);
+}
+OMAP_SYS_TIMER(4)
+#endif
diff --git a/arch/arm/mach-omap2/twl-common.c b/arch/arm/mach-omap2/twl-common.c
new file mode 100644
index 000000000000..3aaa46f6cd12
--- /dev/null
+++ b/arch/arm/mach-omap2/twl-common.c
@@ -0,0 +1,304 @@
+/*
+ * twl-common.c
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc..
+ * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/i2c.h>
+#include <linux/i2c/twl.h>
+#include <linux/gpio.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/fixed.h>
+
+#include <plat/i2c.h>
+#include <plat/usb.h>
+
+#include "twl-common.h"
+
+static struct i2c_board_info __initdata pmic_i2c_board_info = {
+ .addr = 0x48,
+ .flags = I2C_CLIENT_WAKE,
+};
+
+void __init omap_pmic_init(int bus, u32 clkrate,
+ const char *pmic_type, int pmic_irq,
+ struct twl4030_platform_data *pmic_data)
+{
+ strncpy(pmic_i2c_board_info.type, pmic_type,
+ sizeof(pmic_i2c_board_info.type));
+ pmic_i2c_board_info.irq = pmic_irq;
+ pmic_i2c_board_info.platform_data = pmic_data;
+
+ omap_register_i2c_bus(bus, clkrate, &pmic_i2c_board_info, 1);
+}
+
+static struct twl4030_usb_data omap4_usb_pdata = {
+ .phy_init = omap4430_phy_init,
+ .phy_exit = omap4430_phy_exit,
+ .phy_power = omap4430_phy_power,
+ .phy_set_clock = omap4430_phy_set_clk,
+ .phy_suspend = omap4430_phy_suspend,
+};
+
+static struct twl4030_usb_data omap3_usb_pdata = {
+ .usb_mode = T2_USB_MODE_ULPI,
+};
+
+static int omap3_batt_table[] = {
+/* 0 C */
+30800, 29500, 28300, 27100,
+26000, 24900, 23900, 22900, 22000, 21100, 20300, 19400, 18700, 17900,
+17200, 16500, 15900, 15300, 14700, 14100, 13600, 13100, 12600, 12100,
+11600, 11200, 10800, 10400, 10000, 9630, 9280, 8950, 8620, 8310,
+8020, 7730, 7460, 7200, 6950, 6710, 6470, 6250, 6040, 5830,
+5640, 5450, 5260, 5090, 4920, 4760, 4600, 4450, 4310, 4170,
+4040, 3910, 3790, 3670, 3550
+};
+
+static struct twl4030_bci_platform_data omap3_bci_pdata = {
+ .battery_tmp_tbl = omap3_batt_table,
+ .tblsize = ARRAY_SIZE(omap3_batt_table),
+};
+
+static struct twl4030_madc_platform_data omap3_madc_pdata = {
+ .irq_line = 1,
+};
+
+static struct twl4030_codec_audio_data omap3_audio;
+
+static struct twl4030_codec_data omap3_codec_pdata = {
+ .audio_mclk = 26000000,
+ .audio = &omap3_audio,
+};
+
+static struct regulator_consumer_supply omap3_vdda_dac_supplies[] = {
+ REGULATOR_SUPPLY("vdda_dac", "omapdss_venc"),
+};
+
+static struct regulator_init_data omap3_vdac_idata = {
+ .constraints = {
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(omap3_vdda_dac_supplies),
+ .consumer_supplies = omap3_vdda_dac_supplies,
+};
+
+static struct regulator_consumer_supply omap3_vpll2_supplies[] = {
+ REGULATOR_SUPPLY("vdds_dsi", "omapdss"),
+ REGULATOR_SUPPLY("vdds_dsi", "omapdss_dsi1"),
+};
+
+static struct regulator_init_data omap3_vpll2_idata = {
+ .constraints = {
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(omap3_vpll2_supplies),
+ .consumer_supplies = omap3_vpll2_supplies,
+};
+
+static struct regulator_init_data omap4_vdac_idata = {
+ .constraints = {
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+};
+
+static struct regulator_init_data omap4_vaux2_idata = {
+ .constraints = {
+ .min_uV = 1200000,
+ .max_uV = 2800000,
+ .apply_uV = true,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
+ | REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+};
+
+static struct regulator_init_data omap4_vaux3_idata = {
+ .constraints = {
+ .min_uV = 1000000,
+ .max_uV = 3000000,
+ .apply_uV = true,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
+ | REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+};
+
+static struct regulator_consumer_supply omap4_vmmc_supply[] = {
+ REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
+};
+
+/* VMMC1 for MMC1 card */
+static struct regulator_init_data omap4_vmmc_idata = {
+ .constraints = {
+ .min_uV = 1200000,
+ .max_uV = 3000000,
+ .apply_uV = true,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
+ | REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(omap4_vmmc_supply),
+ .consumer_supplies = omap4_vmmc_supply,
+};
+
+static struct regulator_init_data omap4_vpp_idata = {
+ .constraints = {
+ .min_uV = 1800000,
+ .max_uV = 2500000,
+ .apply_uV = true,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
+ | REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+};
+
+static struct regulator_init_data omap4_vana_idata = {
+ .constraints = {
+ .min_uV = 2100000,
+ .max_uV = 2100000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+};
+
+static struct regulator_init_data omap4_vcxio_idata = {
+ .constraints = {
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+};
+
+static struct regulator_init_data omap4_vusb_idata = {
+ .constraints = {
+ .min_uV = 3300000,
+ .max_uV = 3300000,
+ .apply_uV = true,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+};
+
+static struct regulator_init_data omap4_clk32kg_idata = {
+ .constraints = {
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+};
+
+void __init omap4_pmic_get_config(struct twl4030_platform_data *pmic_data,
+ u32 pdata_flags, u32 regulators_flags)
+{
+ if (!pmic_data->irq_base)
+ pmic_data->irq_base = TWL6030_IRQ_BASE;
+ if (!pmic_data->irq_end)
+ pmic_data->irq_end = TWL6030_IRQ_END;
+
+ /* Common platform data configurations */
+ if (pdata_flags & TWL_COMMON_PDATA_USB && !pmic_data->usb)
+ pmic_data->usb = &omap4_usb_pdata;
+
+ /* Common regulator configurations */
+ if (regulators_flags & TWL_COMMON_REGULATOR_VDAC && !pmic_data->vdac)
+ pmic_data->vdac = &omap4_vdac_idata;
+
+ if (regulators_flags & TWL_COMMON_REGULATOR_VAUX2 && !pmic_data->vaux2)
+ pmic_data->vaux2 = &omap4_vaux2_idata;
+
+ if (regulators_flags & TWL_COMMON_REGULATOR_VAUX3 && !pmic_data->vaux3)
+ pmic_data->vaux3 = &omap4_vaux3_idata;
+
+ if (regulators_flags & TWL_COMMON_REGULATOR_VMMC && !pmic_data->vmmc)
+ pmic_data->vmmc = &omap4_vmmc_idata;
+
+ if (regulators_flags & TWL_COMMON_REGULATOR_VPP && !pmic_data->vpp)
+ pmic_data->vpp = &omap4_vpp_idata;
+
+ if (regulators_flags & TWL_COMMON_REGULATOR_VANA && !pmic_data->vana)
+ pmic_data->vana = &omap4_vana_idata;
+
+ if (regulators_flags & TWL_COMMON_REGULATOR_VCXIO && !pmic_data->vcxio)
+ pmic_data->vcxio = &omap4_vcxio_idata;
+
+ if (regulators_flags & TWL_COMMON_REGULATOR_VUSB && !pmic_data->vusb)
+ pmic_data->vusb = &omap4_vusb_idata;
+
+ if (regulators_flags & TWL_COMMON_REGULATOR_CLK32KG &&
+ !pmic_data->clk32kg)
+ pmic_data->clk32kg = &omap4_clk32kg_idata;
+}
+
+void __init omap3_pmic_get_config(struct twl4030_platform_data *pmic_data,
+ u32 pdata_flags, u32 regulators_flags)
+{
+ if (!pmic_data->irq_base)
+ pmic_data->irq_base = TWL4030_IRQ_BASE;
+ if (!pmic_data->irq_end)
+ pmic_data->irq_end = TWL4030_IRQ_END;
+
+ /* Common platform data configurations */
+ if (pdata_flags & TWL_COMMON_PDATA_USB && !pmic_data->usb)
+ pmic_data->usb = &omap3_usb_pdata;
+
+ if (pdata_flags & TWL_COMMON_PDATA_BCI && !pmic_data->bci)
+ pmic_data->bci = &omap3_bci_pdata;
+
+ if (pdata_flags & TWL_COMMON_PDATA_MADC && !pmic_data->madc)
+ pmic_data->madc = &omap3_madc_pdata;
+
+ if (pdata_flags & TWL_COMMON_PDATA_AUDIO && !pmic_data->codec)
+ pmic_data->codec = &omap3_codec_pdata;
+
+ /* Common regulator configurations */
+ if (regulators_flags & TWL_COMMON_REGULATOR_VDAC && !pmic_data->vdac)
+ pmic_data->vdac = &omap3_vdac_idata;
+
+ if (regulators_flags & TWL_COMMON_REGULATOR_VPLL2 && !pmic_data->vpll2)
+ pmic_data->vpll2 = &omap3_vpll2_idata;
+}
diff --git a/arch/arm/mach-omap2/twl-common.h b/arch/arm/mach-omap2/twl-common.h
new file mode 100644
index 000000000000..5e83a5bd37fb
--- /dev/null
+++ b/arch/arm/mach-omap2/twl-common.h
@@ -0,0 +1,59 @@
+#ifndef __OMAP_PMIC_COMMON__
+#define __OMAP_PMIC_COMMON__
+
+#define TWL_COMMON_PDATA_USB (1 << 0)
+#define TWL_COMMON_PDATA_BCI (1 << 1)
+#define TWL_COMMON_PDATA_MADC (1 << 2)
+#define TWL_COMMON_PDATA_AUDIO (1 << 3)
+
+/* Common LDO regulators for TWL4030/TWL6030 */
+#define TWL_COMMON_REGULATOR_VDAC (1 << 0)
+#define TWL_COMMON_REGULATOR_VAUX1 (1 << 1)
+#define TWL_COMMON_REGULATOR_VAUX2 (1 << 2)
+#define TWL_COMMON_REGULATOR_VAUX3 (1 << 3)
+
+/* TWL6030 LDO regulators */
+#define TWL_COMMON_REGULATOR_VMMC (1 << 4)
+#define TWL_COMMON_REGULATOR_VPP (1 << 5)
+#define TWL_COMMON_REGULATOR_VUSIM (1 << 6)
+#define TWL_COMMON_REGULATOR_VANA (1 << 7)
+#define TWL_COMMON_REGULATOR_VCXIO (1 << 8)
+#define TWL_COMMON_REGULATOR_VUSB (1 << 9)
+#define TWL_COMMON_REGULATOR_CLK32KG (1 << 10)
+
+/* TWL4030 LDO regulators */
+#define TWL_COMMON_REGULATOR_VPLL1 (1 << 4)
+#define TWL_COMMON_REGULATOR_VPLL2 (1 << 5)
+
+
+struct twl4030_platform_data;
+
+void omap_pmic_init(int bus, u32 clkrate, const char *pmic_type, int pmic_irq,
+ struct twl4030_platform_data *pmic_data);
+
+static inline void omap2_pmic_init(const char *pmic_type,
+ struct twl4030_platform_data *pmic_data)
+{
+ omap_pmic_init(2, 2600, pmic_type, INT_24XX_SYS_NIRQ, pmic_data);
+}
+
+static inline void omap3_pmic_init(const char *pmic_type,
+ struct twl4030_platform_data *pmic_data)
+{
+ omap_pmic_init(1, 2600, pmic_type, INT_34XX_SYS_NIRQ, pmic_data);
+}
+
+static inline void omap4_pmic_init(const char *pmic_type,
+ struct twl4030_platform_data *pmic_data)
+{
+ /* Phoenix Audio IC needs I2C1 to start with 400 KHz or less */
+ omap_pmic_init(1, 400, pmic_type, OMAP44XX_IRQ_SYS_1N, pmic_data);
+}
+
+void omap3_pmic_get_config(struct twl4030_platform_data *pmic_data,
+ u32 pdata_flags, u32 regulators_flags);
+
+void omap4_pmic_get_config(struct twl4030_platform_data *pmic_data,
+ u32 pdata_flags, u32 regulators_flags);
+
+#endif /* __OMAP_PMIC_COMMON__ */
diff --git a/arch/arm/mach-orion5x/mpp.c b/arch/arm/mach-orion5x/mpp.c
index f12c41b98d46..b6ddd7a5db6a 100644
--- a/arch/arm/mach-orion5x/mpp.c
+++ b/arch/arm/mach-orion5x/mpp.c
@@ -24,7 +24,7 @@ static unsigned int __init orion5x_variant(void)
orion5x_pcie_id(&dev, &rev);
- if (dev == MV88F5181_DEV_ID && rev >= MV88F5181L_REV_A0)
+ if (dev == MV88F5181_DEV_ID)
return MPP_F5181_MASK;
if (dev == MV88F5182_DEV_ID)
diff --git a/arch/arm/mach-pnx4008/include/mach/entry-macro.S b/arch/arm/mach-pnx4008/include/mach/entry-macro.S
index 8003037578ed..db7eeebf30d7 100644
--- a/arch/arm/mach-pnx4008/include/mach/entry-macro.S
+++ b/arch/arm/mach-pnx4008/include/mach/entry-macro.S
@@ -120,8 +120,3 @@
1003:
.endm
-
- .macro irq_prio_table
- .endm
-
-
diff --git a/arch/arm/mach-pxa/cm-x2xx.c b/arch/arm/mach-pxa/cm-x2xx.c
index a10996782476..bc55d07566ca 100644
--- a/arch/arm/mach-pxa/cm-x2xx.c
+++ b/arch/arm/mach-pxa/cm-x2xx.c
@@ -518,4 +518,7 @@ MACHINE_START(ARMCORE, "Compulab CM-X2XX")
.init_irq = cmx2xx_init_irq,
.timer = &pxa_timer,
.init_machine = cmx2xx_init,
+#ifdef CONFIG_PCI
+ .dma_zone_size = SZ_64M,
+#endif
MACHINE_END
diff --git a/arch/arm/mach-pxa/cm-x300.c b/arch/arm/mach-pxa/cm-x300.c
index b2248e76ec8b..b199596f9c3d 100644
--- a/arch/arm/mach-pxa/cm-x300.c
+++ b/arch/arm/mach-pxa/cm-x300.c
@@ -12,6 +12,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/module.h>
#include <linux/kernel.h>
@@ -161,10 +162,10 @@ static mfp_cfg_t cm_x3xx_mfp_cfg[] __initdata = {
GPIO99_GPIO, /* Ethernet IRQ */
/* RTC GPIOs */
- GPIO95_GPIO, /* RTC CS */
- GPIO96_GPIO, /* RTC WR */
- GPIO97_GPIO, /* RTC RD */
- GPIO98_GPIO, /* RTC IO */
+ GPIO95_GPIO | MFP_LPM_DRIVE_HIGH, /* RTC CS */
+ GPIO96_GPIO | MFP_LPM_DRIVE_HIGH, /* RTC WR */
+ GPIO97_GPIO | MFP_LPM_DRIVE_HIGH, /* RTC RD */
+ GPIO98_GPIO, /* RTC IO */
/* Standard I2C */
GPIO21_I2C_SCL,
@@ -484,14 +485,13 @@ static int cm_x300_ulpi_phy_reset(void)
int err;
/* reset the PHY */
- err = gpio_request(GPIO_ULPI_PHY_RST, "ulpi reset");
+ err = gpio_request_one(GPIO_ULPI_PHY_RST, GPIOF_OUT_INIT_LOW,
+ "ulpi reset");
if (err) {
- pr_err("%s: failed to request ULPI reset GPIO: %d\n",
- __func__, err);
+ pr_err("failed to request ULPI reset GPIO: %d\n", err);
return err;
}
- gpio_direction_output(GPIO_ULPI_PHY_RST, 0);
msleep(10);
gpio_set_value(GPIO_ULPI_PHY_RST, 1);
msleep(10);
@@ -510,8 +510,7 @@ static inline int cm_x300_u2d_init(struct device *dev)
pout_clk = clk_get(NULL, "CLK_POUT");
if (IS_ERR(pout_clk)) {
err = PTR_ERR(pout_clk);
- pr_err("%s: failed to get CLK_POUT: %d\n",
- __func__, err);
+ pr_err("failed to get CLK_POUT: %d\n", err);
return err;
}
clk_enable(pout_clk);
@@ -768,39 +767,36 @@ static void __init cm_x300_init_da9030(void)
irq_set_irq_wake(IRQ_WAKEUP0, 1);
}
+/* wi2wi gpio setting for system_rev >= 130 */
+static struct gpio cm_x300_wi2wi_gpios[] __initdata = {
+ { 71, GPIOF_OUT_INIT_HIGH, "wlan en" },
+ { 70, GPIOF_OUT_INIT_HIGH, "bt reset" },
+};
+
static void __init cm_x300_init_wi2wi(void)
{
int bt_reset, wlan_en;
int err;
if (system_rev < 130) {
- wlan_en = 77;
- bt_reset = 78;
- } else {
- wlan_en = 71;
- bt_reset = 70;
+ cm_x300_wi2wi_gpios[0].gpio = 77; /* wlan en */
+ cm_x300_wi2wi_gpios[1].gpio = 78; /* bt reset */
}
/* Libertas and CSR reset */
- err = gpio_request(wlan_en, "wlan en");
+ err = gpio_request_array(ARRAY_AND_SIZE(cm_x300_wi2wi_gpios));
if (err) {
- pr_err("CM-X300: failed to request wlan en gpio: %d\n", err);
- } else {
- gpio_direction_output(wlan_en, 1);
- gpio_free(wlan_en);
+ pr_err("failed to request wifi/bt gpios: %d\n", err);
+ return;
}
- err = gpio_request(bt_reset, "bt reset");
- if (err) {
- pr_err("CM-X300: failed to request bt reset gpio: %d\n", err);
- } else {
- gpio_direction_output(bt_reset, 1);
- udelay(10);
- gpio_set_value(bt_reset, 0);
- udelay(10);
- gpio_set_value(bt_reset, 1);
- gpio_free(bt_reset);
- }
+ udelay(10);
+ gpio_set_value(bt_reset, 0);
+ udelay(10);
+ gpio_set_value(bt_reset, 1);
+
+ gpio_free(wlan_en);
+ gpio_free(bt_reset);
}
/* MFP */
diff --git a/arch/arm/mach-pxa/hx4700.c b/arch/arm/mach-pxa/hx4700.c
index f941a495a4a8..99960a1814e0 100644
--- a/arch/arm/mach-pxa/hx4700.c
+++ b/arch/arm/mach-pxa/hx4700.c
@@ -135,42 +135,6 @@ static unsigned long hx4700_pin_config[] __initdata = {
GPIO66_GPIO, /* nSDIO_IRQ */
};
-#define HX4700_GPIO_IN(num, _desc) \
- { .gpio = (num), .dir = 0, .desc = (_desc) }
-#define HX4700_GPIO_OUT(num, _init, _desc) \
- { .gpio = (num), .dir = 1, .init = (_init), .desc = (_desc) }
-struct gpio_ress {
- unsigned gpio : 8;
- unsigned dir : 1;
- unsigned init : 1;
- char *desc;
-};
-
-static int hx4700_gpio_request(struct gpio_ress *gpios, int size)
-{
- int i, rc = 0;
- int gpio;
- int dir;
-
- for (i = 0; (!rc) && (i < size); i++) {
- gpio = gpios[i].gpio;
- dir = gpios[i].dir;
- rc = gpio_request(gpio, gpios[i].desc);
- if (rc) {
- pr_err("Error requesting GPIO %d(%s) : %d\n",
- gpio, gpios[i].desc, rc);
- continue;
- }
- if (dir)
- gpio_direction_output(gpio, gpios[i].init);
- else
- gpio_direction_input(gpio);
- }
- while ((rc) && (--i >= 0))
- gpio_free(gpios[i].gpio);
- return rc;
-}
-
/*
* IRDA
*/
@@ -829,26 +793,30 @@ static struct platform_device *devices[] __initdata = {
&pcmcia,
};
-static struct gpio_ress global_gpios[] = {
- HX4700_GPIO_IN(GPIO12_HX4700_ASIC3_IRQ, "ASIC3_IRQ"),
- HX4700_GPIO_IN(GPIO13_HX4700_W3220_IRQ, "W3220_IRQ"),
- HX4700_GPIO_IN(GPIO14_HX4700_nWLAN_IRQ, "WLAN_IRQ"),
- HX4700_GPIO_OUT(GPIO59_HX4700_LCD_PC1, 1, "LCD_PC1"),
- HX4700_GPIO_OUT(GPIO62_HX4700_LCD_nRESET, 1, "LCD_RESET"),
- HX4700_GPIO_OUT(GPIO70_HX4700_LCD_SLIN1, 1, "LCD_SLIN1"),
- HX4700_GPIO_OUT(GPIO84_HX4700_LCD_SQN, 1, "LCD_SQN"),
- HX4700_GPIO_OUT(GPIO110_HX4700_LCD_LVDD_3V3_ON, 1, "LCD_LVDD"),
- HX4700_GPIO_OUT(GPIO111_HX4700_LCD_AVDD_3V3_ON, 1, "LCD_AVDD"),
- HX4700_GPIO_OUT(GPIO32_HX4700_RS232_ON, 1, "RS232_ON"),
- HX4700_GPIO_OUT(GPIO71_HX4700_ASIC3_nRESET, 1, "ASIC3_nRESET"),
- HX4700_GPIO_OUT(GPIO82_HX4700_EUART_RESET, 1, "EUART_RESET"),
- HX4700_GPIO_OUT(GPIO105_HX4700_nIR_ON, 1, "nIR_EN"),
+static struct gpio global_gpios[] = {
+ { GPIO12_HX4700_ASIC3_IRQ, GPIOF_IN, "ASIC3_IRQ" },
+ { GPIO13_HX4700_W3220_IRQ, GPIOF_IN, "W3220_IRQ" },
+ { GPIO14_HX4700_nWLAN_IRQ, GPIOF_IN, "WLAN_IRQ" },
+ { GPIO59_HX4700_LCD_PC1, GPIOF_OUT_INIT_HIGH, "LCD_PC1" },
+ { GPIO62_HX4700_LCD_nRESET, GPIOF_OUT_INIT_HIGH, "LCD_RESET" },
+ { GPIO70_HX4700_LCD_SLIN1, GPIOF_OUT_INIT_HIGH, "LCD_SLIN1" },
+ { GPIO84_HX4700_LCD_SQN, GPIOF_OUT_INIT_HIGH, "LCD_SQN" },
+ { GPIO110_HX4700_LCD_LVDD_3V3_ON, GPIOF_OUT_INIT_HIGH, "LCD_LVDD" },
+ { GPIO111_HX4700_LCD_AVDD_3V3_ON, GPIOF_OUT_INIT_HIGH, "LCD_AVDD" },
+ { GPIO32_HX4700_RS232_ON, GPIOF_OUT_INIT_HIGH, "RS232_ON" },
+ { GPIO71_HX4700_ASIC3_nRESET, GPIOF_OUT_INIT_HIGH, "ASIC3_nRESET" },
+ { GPIO82_HX4700_EUART_RESET, GPIOF_OUT_INIT_HIGH, "EUART_RESET" },
+ { GPIO105_HX4700_nIR_ON, GPIOF_OUT_INIT_HIGH, "nIR_EN" },
};
static void __init hx4700_init(void)
{
+ int ret;
+
pxa2xx_mfp_config(ARRAY_AND_SIZE(hx4700_pin_config));
- hx4700_gpio_request(ARRAY_AND_SIZE(global_gpios));
+ ret = gpio_request_array(ARRAY_AND_SIZE(global_gpios));
+ if (ret)
+ pr_err ("hx4700: Failed to request GPIOs.\n");
pxa_set_ffuart_info(NULL);
pxa_set_btuart_info(NULL);
diff --git a/arch/arm/mach-pxa/include/mach/magician.h b/arch/arm/mach-pxa/include/mach/magician.h
index 0a2efcf7947c..7cbfc5d3f9df 100644
--- a/arch/arm/mach-pxa/include/mach/magician.h
+++ b/arch/arm/mach-pxa/include/mach/magician.h
@@ -12,6 +12,7 @@
#ifndef _MAGICIAN_H_
#define _MAGICIAN_H_
+#include <linux/gpio.h>
#include <mach/irqs.h>
/*
@@ -77,7 +78,7 @@
* CPLD EGPIOs
*/
-#define MAGICIAN_EGPIO_BASE 0x80 /* GPIO_BOARD_START */
+#define MAGICIAN_EGPIO_BASE NR_BUILTIN_GPIO
#define MAGICIAN_EGPIO(reg,bit) \
(MAGICIAN_EGPIO_BASE + 8*reg + bit)
diff --git a/arch/arm/mach-pxa/include/mach/memory.h b/arch/arm/mach-pxa/include/mach/memory.h
index 07734f37f8fd..d05a59727d66 100644
--- a/arch/arm/mach-pxa/include/mach/memory.h
+++ b/arch/arm/mach-pxa/include/mach/memory.h
@@ -17,8 +17,4 @@
*/
#define PLAT_PHYS_OFFSET UL(0xa0000000)
-#if defined(CONFIG_MACH_ARMCORE) && defined(CONFIG_PCI)
-#define ARM_DMA_ZONE_SIZE SZ_64M
-#endif
-
#endif
diff --git a/arch/arm/mach-pxa/include/mach/pm.h b/arch/arm/mach-pxa/include/mach/pm.h
index f15afe012995..51558bcee999 100644
--- a/arch/arm/mach-pxa/include/mach/pm.h
+++ b/arch/arm/mach-pxa/include/mach/pm.h
@@ -22,8 +22,8 @@ struct pxa_cpu_pm_fns {
extern struct pxa_cpu_pm_fns *pxa_cpu_pm_fns;
/* sleep.S */
-extern void pxa25x_cpu_suspend(unsigned int, long);
-extern void pxa27x_cpu_suspend(unsigned int, long);
+extern int pxa25x_finish_suspend(unsigned long);
+extern int pxa27x_finish_suspend(unsigned long);
extern int pxa_pm_enter(suspend_state_t state);
extern int pxa_pm_prepare(void);
diff --git a/arch/arm/mach-pxa/magician.c b/arch/arm/mach-pxa/magician.c
index e1920572948a..0e42798942f7 100644
--- a/arch/arm/mach-pxa/magician.c
+++ b/arch/arm/mach-pxa/magician.c
@@ -344,22 +344,14 @@ static struct pxafb_mach_info samsung_info = {
* Backlight
*/
+static struct gpio magician_bl_gpios[] = {
+ { EGPIO_MAGICIAN_BL_POWER, GPIOF_DIR_OUT, "Backlight power" },
+ { EGPIO_MAGICIAN_BL_POWER2, GPIOF_DIR_OUT, "Backlight power 2" },
+};
+
static int magician_backlight_init(struct device *dev)
{
- int ret;
-
- ret = gpio_request(EGPIO_MAGICIAN_BL_POWER, "BL_POWER");
- if (ret)
- goto err;
- ret = gpio_request(EGPIO_MAGICIAN_BL_POWER2, "BL_POWER2");
- if (ret)
- goto err2;
- return 0;
-
-err2:
- gpio_free(EGPIO_MAGICIAN_BL_POWER);
-err:
- return ret;
+ return gpio_request_array(ARRAY_AND_SIZE(magician_bl_gpios));
}
static int magician_backlight_notify(struct device *dev, int brightness)
@@ -376,8 +368,7 @@ static int magician_backlight_notify(struct device *dev, int brightness)
static void magician_backlight_exit(struct device *dev)
{
- gpio_free(EGPIO_MAGICIAN_BL_POWER);
- gpio_free(EGPIO_MAGICIAN_BL_POWER2);
+ gpio_free_array(ARRAY_AND_SIZE(magician_bl_gpios));
}
static struct platform_pwm_backlight_data backlight_data = {
@@ -712,16 +703,25 @@ static struct platform_device *devices[] __initdata = {
&leds_gpio,
};
+static struct gpio magician_global_gpios[] = {
+ { GPIO13_MAGICIAN_CPLD_IRQ, GPIOF_IN, "CPLD_IRQ" },
+ { GPIO107_MAGICIAN_DS1WM_IRQ, GPIOF_IN, "DS1WM_IRQ" },
+ { GPIO104_MAGICIAN_LCD_POWER_1, GPIOF_OUT_INIT_LOW, "LCD power 1" },
+ { GPIO105_MAGICIAN_LCD_POWER_2, GPIOF_OUT_INIT_LOW, "LCD power 2" },
+ { GPIO106_MAGICIAN_LCD_POWER_3, GPIOF_OUT_INIT_LOW, "LCD power 3" },
+ { GPIO83_MAGICIAN_nIR_EN, GPIOF_OUT_INIT_HIGH, "nIR_EN" },
+};
+
static void __init magician_init(void)
{
void __iomem *cpld;
int lcd_select;
int err;
- gpio_request(GPIO13_MAGICIAN_CPLD_IRQ, "CPLD_IRQ");
- gpio_request(GPIO107_MAGICIAN_DS1WM_IRQ, "DS1WM_IRQ");
-
pxa2xx_mfp_config(ARRAY_AND_SIZE(magician_pin_config));
+ err = gpio_request_array(ARRAY_AND_SIZE(magician_global_gpios));
+ if (err)
+ pr_err("magician: Failed to request GPIOs: %d\n", err);
pxa_set_ffuart_info(NULL);
pxa_set_btuart_info(NULL);
@@ -729,11 +729,7 @@ static void __init magician_init(void)
platform_add_devices(ARRAY_AND_SIZE(devices));
- err = gpio_request(GPIO83_MAGICIAN_nIR_EN, "nIR_EN");
- if (!err) {
- gpio_direction_output(GPIO83_MAGICIAN_nIR_EN, 1);
- pxa_set_ficp_info(&magician_ficp_info);
- }
+ pxa_set_ficp_info(&magician_ficp_info);
pxa27x_set_i2c_power_info(NULL);
pxa_set_i2c_info(&i2c_info);
pxa_set_mci_info(&magician_mci_info);
@@ -747,16 +743,9 @@ static void __init magician_init(void)
system_rev = board_id & 0x7;
lcd_select = board_id & 0x8;
pr_info("LCD type: %s\n", lcd_select ? "Samsung" : "Toppoly");
- if (lcd_select && (system_rev < 3)) {
- gpio_request(GPIO75_MAGICIAN_SAMSUNG_POWER, "SAMSUNG_POWER");
- gpio_direction_output(GPIO75_MAGICIAN_SAMSUNG_POWER, 0);
- }
- gpio_request(GPIO104_MAGICIAN_LCD_POWER_1, "LCD_POWER_1");
- gpio_request(GPIO105_MAGICIAN_LCD_POWER_2, "LCD_POWER_2");
- gpio_request(GPIO106_MAGICIAN_LCD_POWER_3, "LCD_POWER_3");
- gpio_direction_output(GPIO104_MAGICIAN_LCD_POWER_1, 0);
- gpio_direction_output(GPIO105_MAGICIAN_LCD_POWER_2, 0);
- gpio_direction_output(GPIO106_MAGICIAN_LCD_POWER_3, 0);
+ if (lcd_select && (system_rev < 3))
+ gpio_request_one(GPIO75_MAGICIAN_SAMSUNG_POWER,
+ GPIOF_OUT_INIT_LOW, "SAMSUNG_POWER");
pxa_set_fb_info(NULL, lcd_select ? &samsung_info : &toppoly_info);
} else
pr_err("LCD detection: CPLD mapping failed\n");
diff --git a/arch/arm/mach-pxa/mioa701.c b/arch/arm/mach-pxa/mioa701.c
index e3470137c934..aa67637ae41d 100644
--- a/arch/arm/mach-pxa/mioa701.c
+++ b/arch/arm/mach-pxa/mioa701.c
@@ -177,50 +177,6 @@ static unsigned long mioa701_pin_config[] = {
MFP_CFG_OUT(GPIO116, AF0, DRIVE_HIGH),
};
-#define MIO_GPIO_IN(num, _desc) \
- { .gpio = (num), .dir = 0, .desc = (_desc) }
-#define MIO_GPIO_OUT(num, _init, _desc) \
- { .gpio = (num), .dir = 1, .init = (_init), .desc = (_desc) }
-struct gpio_ress {
- unsigned gpio : 8;
- unsigned dir : 1;
- unsigned init : 1;
- char *desc;
-};
-
-static int mio_gpio_request(struct gpio_ress *gpios, int size)
-{
- int i, rc = 0;
- int gpio;
- int dir;
-
- for (i = 0; (!rc) && (i < size); i++) {
- gpio = gpios[i].gpio;
- dir = gpios[i].dir;
- rc = gpio_request(gpio, gpios[i].desc);
- if (rc) {
- printk(KERN_ERR "Error requesting GPIO %d(%s) : %d\n",
- gpio, gpios[i].desc, rc);
- continue;
- }
- if (dir)
- gpio_direction_output(gpio, gpios[i].init);
- else
- gpio_direction_input(gpio);
- }
- while ((rc) && (--i >= 0))
- gpio_free(gpios[i].gpio);
- return rc;
-}
-
-static void mio_gpio_free(struct gpio_ress *gpios, int size)
-{
- int i;
-
- for (i = 0; i < size; i++)
- gpio_free(gpios[i].gpio);
-}
-
/* LCD Screen and Backlight */
static struct platform_pwm_backlight_data mioa701_backlight_data = {
.pwm_id = 0,
@@ -346,16 +302,16 @@ irqreturn_t gsm_on_irq(int irq, void *p)
return IRQ_HANDLED;
}
-struct gpio_ress gsm_gpios[] = {
- MIO_GPIO_IN(GPIO25_GSM_MOD_ON_STATE, "GSM state"),
- MIO_GPIO_IN(GPIO113_GSM_EVENT, "GSM event"),
+static struct gpio gsm_gpios[] = {
+ { GPIO25_GSM_MOD_ON_STATE, GPIOF_IN, "GSM state" },
+ { GPIO113_GSM_EVENT, GPIOF_IN, "GSM event" },
};
static int __init gsm_init(void)
{
int rc;
- rc = mio_gpio_request(ARRAY_AND_SIZE(gsm_gpios));
+ rc = gpio_request_array(ARRAY_AND_SIZE(gsm_gpios));
if (rc)
goto err_gpio;
rc = request_irq(gpio_to_irq(GPIO25_GSM_MOD_ON_STATE), gsm_on_irq,
@@ -369,7 +325,7 @@ static int __init gsm_init(void)
err_irq:
printk(KERN_ERR "Mioa701: Can't request GSM_ON irq\n");
- mio_gpio_free(ARRAY_AND_SIZE(gsm_gpios));
+ gpio_free_array(ARRAY_AND_SIZE(gsm_gpios));
err_gpio:
printk(KERN_ERR "Mioa701: gsm not available\n");
return rc;
@@ -378,7 +334,7 @@ err_gpio:
static void gsm_exit(void)
{
free_irq(gpio_to_irq(GPIO25_GSM_MOD_ON_STATE), NULL);
- mio_gpio_free(ARRAY_AND_SIZE(gsm_gpios));
+ gpio_free_array(ARRAY_AND_SIZE(gsm_gpios));
}
/*
@@ -749,14 +705,16 @@ static void mioa701_restart(char c, const char *cmd)
arm_machine_restart('s', cmd);
}
-static struct gpio_ress global_gpios[] = {
- MIO_GPIO_OUT(GPIO9_CHARGE_EN, 1, "Charger enable"),
- MIO_GPIO_OUT(GPIO18_POWEROFF, 0, "Power Off"),
- MIO_GPIO_OUT(GPIO87_LCD_POWER, 0, "LCD Power"),
+static struct gpio global_gpios[] = {
+ { GPIO9_CHARGE_EN, GPIOF_OUT_INIT_HIGH, "Charger enable" },
+ { GPIO18_POWEROFF, GPIOF_OUT_INIT_LOW, "Power Off" },
+ { GPIO87_LCD_POWER, GPIOF_OUT_INIT_LOW, "LCD Power" },
};
static void __init mioa701_machine_init(void)
{
+ int rc;
+
PSLR = 0xff100000; /* SYSDEL=125ms, PWRDEL=125ms, PSLR_SL_ROD=1 */
PCFR = PCFR_DC_EN | PCFR_GPR_EN | PCFR_OPDE;
RTTR = 32768 - 1; /* Reset crazy WinCE value */
@@ -766,7 +724,9 @@ static void __init mioa701_machine_init(void)
pxa_set_ffuart_info(NULL);
pxa_set_btuart_info(NULL);
pxa_set_stuart_info(NULL);
- mio_gpio_request(ARRAY_AND_SIZE(global_gpios));
+ rc = gpio_request_array(ARRAY_AND_SIZE(global_gpios));
+ if (rc)
+ pr_err("MioA701: Failed to request GPIOs: %d", rc);
bootstrap_init();
pxa_set_fb_info(NULL, &mioa701_pxafb_info);
pxa_set_mci_info(&mioa701_mci_info);
diff --git a/arch/arm/mach-pxa/palmz72.c b/arch/arm/mach-pxa/palmz72.c
index 65f24f0b77e8..5a5329bc33f1 100644
--- a/arch/arm/mach-pxa/palmz72.c
+++ b/arch/arm/mach-pxa/palmz72.c
@@ -33,6 +33,7 @@
#include <linux/i2c-gpio.h>
#include <asm/mach-types.h>
+#include <asm/suspend.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c
index 51e1583265b2..37178a8559b1 100644
--- a/arch/arm/mach-pxa/pm.c
+++ b/arch/arm/mach-pxa/pm.c
@@ -42,7 +42,6 @@ int pxa_pm_enter(suspend_state_t state)
/* *** go zzz *** */
pxa_cpu_pm_fns->enter(state);
- cpu_init();
if (state != PM_SUSPEND_STANDBY && pxa_cpu_pm_fns->restore) {
/* after sleeping, validate the checksum */
diff --git a/arch/arm/mach-pxa/pxa25x.c b/arch/arm/mach-pxa/pxa25x.c
index fed363cec9c6..9c434d21a271 100644
--- a/arch/arm/mach-pxa/pxa25x.c
+++ b/arch/arm/mach-pxa/pxa25x.c
@@ -25,6 +25,7 @@
#include <linux/irq.h>
#include <asm/mach/map.h>
+#include <asm/suspend.h>
#include <mach/hardware.h>
#include <mach/irqs.h>
#include <mach/gpio.h>
@@ -244,7 +245,7 @@ static void pxa25x_cpu_pm_enter(suspend_state_t state)
switch (state) {
case PM_SUSPEND_MEM:
- pxa25x_cpu_suspend(PWRMODE_SLEEP, PLAT_PHYS_OFFSET - PAGE_OFFSET);
+ cpu_suspend(PWRMODE_SLEEP, pxa25x_finish_suspend);
break;
}
}
diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c
index 2fecbec58d88..9d2400b5f503 100644
--- a/arch/arm/mach-pxa/pxa27x.c
+++ b/arch/arm/mach-pxa/pxa27x.c
@@ -24,6 +24,7 @@
#include <asm/mach/map.h>
#include <mach/hardware.h>
#include <asm/irq.h>
+#include <asm/suspend.h>
#include <mach/irqs.h>
#include <mach/gpio.h>
#include <mach/pxa27x.h>
@@ -284,6 +285,11 @@ void pxa27x_cpu_pm_restore(unsigned long *sleep_save)
void pxa27x_cpu_pm_enter(suspend_state_t state)
{
extern void pxa_cpu_standby(void);
+#ifndef CONFIG_IWMMXT
+ u64 acc0;
+
+ asm volatile("mra %Q0, %R0, acc0" : "=r" (acc0));
+#endif
/* ensure voltage-change sequencer not initiated, which hangs */
PCFR &= ~PCFR_FVC;
@@ -299,7 +305,10 @@ void pxa27x_cpu_pm_enter(suspend_state_t state)
pxa_cpu_standby();
break;
case PM_SUSPEND_MEM:
- pxa27x_cpu_suspend(pwrmode, PLAT_PHYS_OFFSET - PAGE_OFFSET);
+ cpu_suspend(pwrmode, pxa27x_finish_suspend);
+#ifndef CONFIG_IWMMXT
+ asm volatile("mar acc0, %Q0, %R0" : "=r" (acc0));
+#endif
break;
}
}
diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c
index 8521d7d6f1da..ef1c56a67afc 100644
--- a/arch/arm/mach-pxa/pxa3xx.c
+++ b/arch/arm/mach-pxa/pxa3xx.c
@@ -24,6 +24,7 @@
#include <linux/i2c/pxa-i2c.h>
#include <asm/mach/map.h>
+#include <asm/suspend.h>
#include <mach/hardware.h>
#include <mach/gpio.h>
#include <mach/pxa3xx-regs.h>
@@ -141,8 +142,13 @@ static void pxa3xx_cpu_pm_suspend(void)
{
volatile unsigned long *p = (volatile void *)0xc0000000;
unsigned long saved_data = *p;
+#ifndef CONFIG_IWMMXT
+ u64 acc0;
- extern void pxa3xx_cpu_suspend(long);
+ asm volatile("mra %Q0, %R0, acc0" : "=r" (acc0));
+#endif
+
+ extern int pxa3xx_finish_suspend(unsigned long);
/* resuming from D2 requires the HSIO2/BOOT/TPM clocks enabled */
CKENA |= (1 << CKEN_BOOT) | (1 << CKEN_TPM);
@@ -162,11 +168,15 @@ static void pxa3xx_cpu_pm_suspend(void)
/* overwrite with the resume address */
*p = virt_to_phys(cpu_resume);
- pxa3xx_cpu_suspend(PLAT_PHYS_OFFSET - PAGE_OFFSET);
+ cpu_suspend(0, pxa3xx_finish_suspend);
*p = saved_data;
AD3ER = 0;
+
+#ifndef CONFIG_IWMMXT
+ asm volatile("mar acc0, %Q0, %R0" : "=r" (acc0));
+#endif
}
static void pxa3xx_cpu_pm_enter(suspend_state_t state)
diff --git a/arch/arm/mach-pxa/saarb.c b/arch/arm/mach-pxa/saarb.c
index 9322fe527c7f..e53a3334c944 100644
--- a/arch/arm/mach-pxa/saarb.c
+++ b/arch/arm/mach-pxa/saarb.c
@@ -104,7 +104,7 @@ static void __init saarb_init(void)
MACHINE_START(SAARB, "PXA955 Handheld Platform (aka SAARB)")
.boot_params = 0xa0000100,
- .map_io = pxa_map_io,
+ .map_io = pxa3xx_map_io,
.nr_irqs = SAARB_NR_IRQS,
.init_irq = pxa95x_init_irq,
.timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/sleep.S b/arch/arm/mach-pxa/sleep.S
index 6f5368899d84..1e544be9905d 100644
--- a/arch/arm/mach-pxa/sleep.S
+++ b/arch/arm/mach-pxa/sleep.S
@@ -24,20 +24,9 @@
#ifdef CONFIG_PXA3xx
/*
- * pxa3xx_cpu_suspend() - forces CPU into sleep state (S2D3C4)
- *
- * r0 = v:p offset
+ * pxa3xx_finish_suspend() - forces CPU into sleep state (S2D3C4)
*/
-ENTRY(pxa3xx_cpu_suspend)
-
-#ifndef CONFIG_IWMMXT
- mra r2, r3, acc0
-#endif
- stmfd sp!, {r2 - r12, lr} @ save registers on stack
- mov r1, r0
- ldr r3, =pxa_cpu_resume @ resume function
- bl cpu_suspend
-
+ENTRY(pxa3xx_finish_suspend)
mov r0, #0x06 @ S2D3C4 mode
mcr p14, 0, r0, c7, c0, 0 @ enter sleep
@@ -46,28 +35,18 @@ ENTRY(pxa3xx_cpu_suspend)
#ifdef CONFIG_PXA27x
/*
- * pxa27x_cpu_suspend()
+ * pxa27x_finish_suspend()
*
* Forces CPU into sleep state.
*
* r0 = value for PWRMODE M field for desired sleep state
- * r1 = v:p offset
*/
-ENTRY(pxa27x_cpu_suspend)
-
-#ifndef CONFIG_IWMMXT
- mra r2, r3, acc0
-#endif
- stmfd sp!, {r2 - r12, lr} @ save registers on stack
- mov r4, r0 @ save sleep mode
- ldr r3, =pxa_cpu_resume @ resume function
- bl cpu_suspend
-
+ENTRY(pxa27x_finish_suspend)
@ Put the processor to sleep
@ (also workaround for sighting 28071)
@ prepare value for sleep mode
- mov r1, r4 @ sleep mode
+ mov r1, r0 @ sleep mode
@ prepare pointer to physical address 0 (virtual mapping in generic.c)
mov r2, #UNCACHED_PHYS_0
@@ -99,21 +78,16 @@ ENTRY(pxa27x_cpu_suspend)
#ifdef CONFIG_PXA25x
/*
- * pxa25x_cpu_suspend()
+ * pxa25x_finish_suspend()
*
* Forces CPU into sleep state.
*
* r0 = value for PWRMODE M field for desired sleep state
- * r1 = v:p offset
*/
-ENTRY(pxa25x_cpu_suspend)
- stmfd sp!, {r2 - r12, lr} @ save registers on stack
- mov r4, r0 @ save sleep mode
- ldr r3, =pxa_cpu_resume @ resume function
- bl cpu_suspend
+ENTRY(pxa25x_finish_suspend)
@ prepare value for sleep mode
- mov r1, r4 @ sleep mode
+ mov r1, r0 @ sleep mode
@ prepare pointer to physical address 0 (virtual mapping in generic.c)
mov r2, #UNCACHED_PHYS_0
@@ -195,16 +169,3 @@ pxa_cpu_do_suspend:
mcr p14, 0, r1, c7, c0, 0 @ PWRMODE
20: b 20b @ loop waiting for sleep
-
-/*
- * pxa_cpu_resume()
- *
- * entry point from bootloader into kernel during resume
- */
- .align 5
-pxa_cpu_resume:
- ldmfd sp!, {r2, r3}
-#ifndef CONFIG_IWMMXT
- mar acc0, r2, r3
-#endif
- ldmfd sp!, {r4 - r12, pc} @ return to caller
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
index 00363c7ac182..9b99cc164de5 100644
--- a/arch/arm/mach-pxa/zeus.c
+++ b/arch/arm/mach-pxa/zeus.c
@@ -31,6 +31,7 @@
#include <linux/can/platform/mcp251x.h>
#include <asm/mach-types.h>
+#include <asm/suspend.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -676,7 +677,7 @@ static struct pxa2xx_udc_mach_info zeus_udc_info = {
static void zeus_power_off(void)
{
local_irq_disable();
- pxa27x_cpu_suspend(PWRMODE_DEEPSLEEP, PLAT_PHYS_OFFSET - PAGE_OFFSET);
+ cpu_suspend(PWRMODE_DEEPSLEEP, pxa27x_finish_suspend);
}
#else
#define zeus_power_off NULL
diff --git a/arch/arm/mach-realview/Kconfig b/arch/arm/mach-realview/Kconfig
index b9a9805e4828..dba6d0c1fc17 100644
--- a/arch/arm/mach-realview/Kconfig
+++ b/arch/arm/mach-realview/Kconfig
@@ -50,6 +50,7 @@ config MACH_REALVIEW_PB1176
bool "Support RealView(R) Platform Baseboard for ARM1176JZF-S"
select CPU_V6
select ARM_GIC
+ select HAVE_TCM
help
Include support for the ARM(R) RealView(R) Platform Baseboard for
ARM1176JZF-S.
diff --git a/arch/arm/mach-realview/include/mach/memory.h b/arch/arm/mach-realview/include/mach/memory.h
index 1759fa673eea..2022e092f0ca 100644
--- a/arch/arm/mach-realview/include/mach/memory.h
+++ b/arch/arm/mach-realview/include/mach/memory.h
@@ -29,10 +29,6 @@
#define PLAT_PHYS_OFFSET UL(0x00000000)
#endif
-#ifdef CONFIG_ZONE_DMA
-#define ARM_DMA_ZONE_SIZE SZ_256M
-#endif
-
#ifdef CONFIG_SPARSEMEM
/*
diff --git a/arch/arm/mach-realview/platsmp.c b/arch/arm/mach-realview/platsmp.c
index 963bf0d8119a..4ae943bafa92 100644
--- a/arch/arm/mach-realview/platsmp.c
+++ b/arch/arm/mach-realview/platsmp.c
@@ -68,14 +68,6 @@ void __init smp_init_cpus(void)
void __init platform_smp_prepare_cpus(unsigned int max_cpus)
{
- int i;
-
- /*
- * Initialise the present map, which describes the set of CPUs
- * actually populated at the present time.
- */
- for (i = 0; i < max_cpus; i++)
- set_cpu_present(i, true);
scu_enable(scu_base_addr());
diff --git a/arch/arm/mach-realview/realview_eb.c b/arch/arm/mach-realview/realview_eb.c
index 10e75faba4c9..7a4e3b18cb3e 100644
--- a/arch/arm/mach-realview/realview_eb.c
+++ b/arch/arm/mach-realview/realview_eb.c
@@ -470,4 +470,7 @@ MACHINE_START(REALVIEW_EB, "ARM-RealView EB")
.init_irq = gic_init_irq,
.timer = &realview_eb_timer,
.init_machine = realview_eb_init,
+#ifdef CONFIG_ZONE_DMA
+ .dma_zone_size = SZ_256M,
+#endif
MACHINE_END
diff --git a/arch/arm/mach-realview/realview_pb1176.c b/arch/arm/mach-realview/realview_pb1176.c
index eab6070f66d0..ad5671acb66a 100644
--- a/arch/arm/mach-realview/realview_pb1176.c
+++ b/arch/arm/mach-realview/realview_pb1176.c
@@ -365,4 +365,7 @@ MACHINE_START(REALVIEW_PB1176, "ARM-RealView PB1176")
.init_irq = gic_init_irq,
.timer = &realview_pb1176_timer,
.init_machine = realview_pb1176_init,
+#ifdef CONFIG_ZONE_DMA
+ .dma_zone_size = SZ_256M,
+#endif
MACHINE_END
diff --git a/arch/arm/mach-realview/realview_pb11mp.c b/arch/arm/mach-realview/realview_pb11mp.c
index b2985fc7cd4e..b43644b3685e 100644
--- a/arch/arm/mach-realview/realview_pb11mp.c
+++ b/arch/arm/mach-realview/realview_pb11mp.c
@@ -367,4 +367,7 @@ MACHINE_START(REALVIEW_PB11MP, "ARM-RealView PB11MPCore")
.init_irq = gic_init_irq,
.timer = &realview_pb11mp_timer,
.init_machine = realview_pb11mp_init,
+#ifdef CONFIG_ZONE_DMA
+ .dma_zone_size = SZ_256M,
+#endif
MACHINE_END
diff --git a/arch/arm/mach-realview/realview_pba8.c b/arch/arm/mach-realview/realview_pba8.c
index fb6866558760..763e8f38c15d 100644
--- a/arch/arm/mach-realview/realview_pba8.c
+++ b/arch/arm/mach-realview/realview_pba8.c
@@ -317,4 +317,7 @@ MACHINE_START(REALVIEW_PBA8, "ARM-RealView PB-A8")
.init_irq = gic_init_irq,
.timer = &realview_pba8_timer,
.init_machine = realview_pba8_init,
+#ifdef CONFIG_ZONE_DMA
+ .dma_zone_size = SZ_256M,
+#endif
MACHINE_END
diff --git a/arch/arm/mach-realview/realview_pbx.c b/arch/arm/mach-realview/realview_pbx.c
index 92ace2cf2b2c..363b0ab56150 100644
--- a/arch/arm/mach-realview/realview_pbx.c
+++ b/arch/arm/mach-realview/realview_pbx.c
@@ -400,4 +400,7 @@ MACHINE_START(REALVIEW_PBX, "ARM-RealView PBX")
.init_irq = gic_init_irq,
.timer = &realview_pbx_timer,
.init_machine = realview_pbx_init,
+#ifdef CONFIG_ZONE_DMA
+ .dma_zone_size = SZ_256M,
+#endif
MACHINE_END
diff --git a/arch/arm/mach-s3c2400/Kconfig b/arch/arm/mach-s3c2400/Kconfig
deleted file mode 100644
index fdd8f5e96faf..000000000000
--- a/arch/arm/mach-s3c2400/Kconfig
+++ /dev/null
@@ -1,7 +0,0 @@
-# Copyright 2007 Simtec Electronics
-#
-# Licensed under GPLv2
-
-menu "S3C2400 Machines"
-
-endmenu
diff --git a/arch/arm/mach-s3c2400/Makefile b/arch/arm/mach-s3c2400/Makefile
deleted file mode 100644
index 7e23f4e13766..000000000000
--- a/arch/arm/mach-s3c2400/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-# arch/arm/mach-s3c2400/Makefile
-#
-# Copyright 2007 Simtec Electronics
-#
-# Licensed under GPLv2
-
-obj-y :=
-obj-m :=
-obj-n :=
-obj- :=
-
-obj-$(CONFIG_CPU_S3C2400) += gpio.o
-
-# Machine support
-
diff --git a/arch/arm/mach-s3c2400/gpio.c b/arch/arm/mach-s3c2400/gpio.c
deleted file mode 100644
index 6c68e78f3595..000000000000
--- a/arch/arm/mach-s3c2400/gpio.c
+++ /dev/null
@@ -1,42 +0,0 @@
-/* linux/arch/arm/mach-s3c2400/gpio.c
- *
- * Copyright (c) 2006 Lucas Correia Villa Real <lucasvr@gobolinux.org>
- *
- * S3C2400 GPIO support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-*/
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/io.h>
-
-#include <mach/hardware.h>
-#include <asm/irq.h>
-
-#include <mach/regs-gpio.h>
-
-int s3c2400_gpio_getirq(unsigned int pin)
-{
- if (pin < S3C2410_GPE(0) || pin > S3C2400_GPE(7))
- return -EINVAL; /* not valid interrupts */
-
- return (pin - S3C2410_GPE(0)) + IRQ_EINT0;
-}
-
-EXPORT_SYMBOL(s3c2400_gpio_getirq);
diff --git a/arch/arm/mach-s3c2400/include/mach/map.h b/arch/arm/mach-s3c2400/include/mach/map.h
deleted file mode 100644
index 3fd889200e99..000000000000
--- a/arch/arm/mach-s3c2400/include/mach/map.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/* arch/arm/mach-s3c2400/include/mach/map.h
- *
- * Copyright 2003-2007 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- * Ben Dooks <ben@simtec.co.uk>
- *
- * Copyright 2003, Lucas Correia Villa Real
- *
- * S3C2400 - Memory map definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#define S3C2400_PA_MEMCTRL (0x14000000)
-#define S3C2400_PA_USBHOST (0x14200000)
-#define S3C2400_PA_IRQ (0x14400000)
-#define S3C2400_PA_DMA (0x14600000)
-#define S3C2400_PA_CLKPWR (0x14800000)
-#define S3C2400_PA_LCD (0x14A00000)
-#define S3C2400_PA_UART (0x15000000)
-#define S3C2400_PA_TIMER (0x15100000)
-#define S3C2400_PA_USBDEV (0x15200140)
-#define S3C2400_PA_WATCHDOG (0x15300000)
-#define S3C2400_PA_IIC (0x15400000)
-#define S3C2400_PA_IIS (0x15508000)
-#define S3C2400_PA_GPIO (0x15600000)
-#define S3C2400_PA_RTC (0x15700040)
-#define S3C2400_PA_ADC (0x15800000)
-#define S3C2400_PA_SPI (0x15900000)
-
-#define S3C2400_PA_MMC (0x15A00000)
-#define S3C2400_SZ_MMC SZ_1M
-
-/* physical addresses of all the chip-select areas */
-
-#define S3C2400_CS0 (0x00000000)
-#define S3C2400_CS1 (0x02000000)
-#define S3C2400_CS2 (0x04000000)
-#define S3C2400_CS3 (0x06000000)
-#define S3C2400_CS4 (0x08000000)
-#define S3C2400_CS5 (0x0A000000)
-#define S3C2400_CS6 (0x0C000000)
-#define S3C2400_CS7 (0x0E000000)
-
-#define S3C2400_SDRAM_PA (S3C2400_CS6)
-
-/* Use a single interface for common resources between S3C24XX cpus */
-
-#define S3C24XX_PA_IRQ S3C2400_PA_IRQ
-#define S3C24XX_PA_MEMCTRL S3C2400_PA_MEMCTRL
-#define S3C24XX_PA_USBHOST S3C2400_PA_USBHOST
-#define S3C24XX_PA_DMA S3C2400_PA_DMA
-#define S3C24XX_PA_CLKPWR S3C2400_PA_CLKPWR
-#define S3C24XX_PA_LCD S3C2400_PA_LCD
-#define S3C24XX_PA_UART S3C2400_PA_UART
-#define S3C24XX_PA_TIMER S3C2400_PA_TIMER
-#define S3C24XX_PA_USBDEV S3C2400_PA_USBDEV
-#define S3C24XX_PA_WATCHDOG S3C2400_PA_WATCHDOG
-#define S3C24XX_PA_IIC S3C2400_PA_IIC
-#define S3C24XX_PA_IIS S3C2400_PA_IIS
-#define S3C24XX_PA_GPIO S3C2400_PA_GPIO
-#define S3C24XX_PA_RTC S3C2400_PA_RTC
-#define S3C24XX_PA_ADC S3C2400_PA_ADC
-#define S3C24XX_PA_SPI S3C2400_PA_SPI
diff --git a/arch/arm/mach-s3c2410/include/mach/gpio-fns.h b/arch/arm/mach-s3c2410/include/mach/gpio-fns.h
index f453c4f2cb8e..bab139201761 100644
--- a/arch/arm/mach-s3c2410/include/mach/gpio-fns.h
+++ b/arch/arm/mach-s3c2410/include/mach/gpio-fns.h
@@ -52,12 +52,6 @@ extern unsigned int s3c2410_gpio_getcfg(unsigned int pin);
extern int s3c2410_gpio_getirq(unsigned int pin);
-#ifdef CONFIG_CPU_S3C2400
-
-extern int s3c2400_gpio_getirq(unsigned int pin);
-
-#endif /* CONFIG_CPU_S3C2400 */
-
/* s3c2410_gpio_irqfilter
*
* set the irq filtering on the given pin
diff --git a/arch/arm/mach-s3c2410/include/mach/regs-gpio.h b/arch/arm/mach-s3c2410/include/mach/regs-gpio.h
index a0a89d429296..cac1ad6b582c 100644
--- a/arch/arm/mach-s3c2410/include/mach/regs-gpio.h
+++ b/arch/arm/mach-s3c2410/include/mach/regs-gpio.h
@@ -16,11 +16,7 @@
#include <mach/gpio-nrs.h>
-#ifdef CONFIG_CPU_S3C2400
-#define S3C24XX_MISCCR S3C2400_MISCCR
-#else
#define S3C24XX_MISCCR S3C24XX_GPIOREG2(0x80)
-#endif /* CONFIG_CPU_S3C2400 */
/* general configuration options */
@@ -42,67 +38,33 @@
/* configure GPIO ports A..G */
/* port A - S3C2410: 22bits, zero in bit X makes pin X output
- * S3C2400: 18bits, zero in bit X makes pin X output
* 1 makes port special function, this is default
*/
#define S3C2410_GPACON S3C2410_GPIOREG(0x00)
#define S3C2410_GPADAT S3C2410_GPIOREG(0x04)
-#define S3C2400_GPACON S3C2410_GPIOREG(0x00)
-#define S3C2400_GPADAT S3C2410_GPIOREG(0x04)
-
#define S3C2410_GPA0_ADDR0 (1<<0)
-
#define S3C2410_GPA1_ADDR16 (1<<1)
-
#define S3C2410_GPA2_ADDR17 (1<<2)
-
#define S3C2410_GPA3_ADDR18 (1<<3)
-
#define S3C2410_GPA4_ADDR19 (1<<4)
-
#define S3C2410_GPA5_ADDR20 (1<<5)
-
#define S3C2410_GPA6_ADDR21 (1<<6)
-
#define S3C2410_GPA7_ADDR22 (1<<7)
-
#define S3C2410_GPA8_ADDR23 (1<<8)
-
#define S3C2410_GPA9_ADDR24 (1<<9)
-
#define S3C2410_GPA10_ADDR25 (1<<10)
-#define S3C2400_GPA10_SCKE (1<<10)
-
#define S3C2410_GPA11_ADDR26 (1<<11)
-#define S3C2400_GPA11_nCAS0 (1<<11)
-
#define S3C2410_GPA12_nGCS1 (1<<12)
-#define S3C2400_GPA12_nCAS1 (1<<12)
-
#define S3C2410_GPA13_nGCS2 (1<<13)
-#define S3C2400_GPA13_nGCS1 (1<<13)
-
#define S3C2410_GPA14_nGCS3 (1<<14)
-#define S3C2400_GPA14_nGCS2 (1<<14)
-
#define S3C2410_GPA15_nGCS4 (1<<15)
-#define S3C2400_GPA15_nGCS3 (1<<15)
-
#define S3C2410_GPA16_nGCS5 (1<<16)
-#define S3C2400_GPA16_nGCS4 (1<<16)
-
#define S3C2410_GPA17_CLE (1<<17)
-#define S3C2400_GPA17_nGCS5 (1<<17)
-
#define S3C2410_GPA18_ALE (1<<18)
-
#define S3C2410_GPA19_nFWE (1<<19)
-
#define S3C2410_GPA20_nFRE (1<<20)
-
#define S3C2410_GPA21_nRSTOUT (1<<21)
-
#define S3C2410_GPA22_nFCE (1<<22)
/* 0x08 and 0x0c are reserved on S3C2410 */
@@ -111,10 +73,6 @@
* GPB is 10 IO pins, each configured by 2 bits each in GPBCON.
* 00 = input, 01 = output, 10=special function, 11=reserved
- * S3C2400:
- * GPB is 16 IO pins, each configured by 2 bits each in GPBCON.
- * 00 = input, 01 = output, 10=data, 11=special function
-
* bit 0,1 = pin 0, 2,3= pin 1...
*
* CPBUP = pull up resistor control, 1=disabled, 0=enabled
@@ -124,78 +82,35 @@
#define S3C2410_GPBDAT S3C2410_GPIOREG(0x14)
#define S3C2410_GPBUP S3C2410_GPIOREG(0x18)
-#define S3C2400_GPBCON S3C2410_GPIOREG(0x08)
-#define S3C2400_GPBDAT S3C2410_GPIOREG(0x0C)
-#define S3C2400_GPBUP S3C2410_GPIOREG(0x10)
-
/* no i/o pin in port b can have value 3 (unless it is a s3c2443) ! */
#define S3C2410_GPB0_TOUT0 (0x02 << 0)
-#define S3C2400_GPB0_DATA16 (0x02 << 0)
#define S3C2410_GPB1_TOUT1 (0x02 << 2)
-#define S3C2400_GPB1_DATA17 (0x02 << 2)
#define S3C2410_GPB2_TOUT2 (0x02 << 4)
-#define S3C2400_GPB2_DATA18 (0x02 << 4)
-#define S3C2400_GPB2_TCLK1 (0x03 << 4)
#define S3C2410_GPB3_TOUT3 (0x02 << 6)
-#define S3C2400_GPB3_DATA19 (0x02 << 6)
-#define S3C2400_GPB3_TXD1 (0x03 << 6)
#define S3C2410_GPB4_TCLK0 (0x02 << 8)
-#define S3C2400_GPB4_DATA20 (0x02 << 8)
#define S3C2410_GPB4_MASK (0x03 << 8)
-#define S3C2400_GPB4_RXD1 (0x03 << 8)
-#define S3C2400_GPB4_MASK (0x03 << 8)
#define S3C2410_GPB5_nXBACK (0x02 << 10)
#define S3C2443_GPB5_XBACK (0x03 << 10)
-#define S3C2400_GPB5_DATA21 (0x02 << 10)
-#define S3C2400_GPB5_nCTS1 (0x03 << 10)
#define S3C2410_GPB6_nXBREQ (0x02 << 12)
#define S3C2443_GPB6_XBREQ (0x03 << 12)
-#define S3C2400_GPB6_DATA22 (0x02 << 12)
-#define S3C2400_GPB6_nRTS1 (0x03 << 12)
#define S3C2410_GPB7_nXDACK1 (0x02 << 14)
#define S3C2443_GPB7_XDACK1 (0x03 << 14)
-#define S3C2400_GPB7_DATA23 (0x02 << 14)
#define S3C2410_GPB8_nXDREQ1 (0x02 << 16)
-#define S3C2400_GPB8_DATA24 (0x02 << 16)
#define S3C2410_GPB9_nXDACK0 (0x02 << 18)
#define S3C2443_GPB9_XDACK0 (0x03 << 18)
-#define S3C2400_GPB9_DATA25 (0x02 << 18)
-#define S3C2400_GPB9_I2SSDI (0x03 << 18)
#define S3C2410_GPB10_nXDRE0 (0x02 << 20)
#define S3C2443_GPB10_XDREQ0 (0x03 << 20)
-#define S3C2400_GPB10_DATA26 (0x02 << 20)
-#define S3C2400_GPB10_nSS (0x03 << 20)
-
-#define S3C2400_GPB11_INP (0x00 << 22)
-#define S3C2400_GPB11_OUTP (0x01 << 22)
-#define S3C2400_GPB11_DATA27 (0x02 << 22)
-
-#define S3C2400_GPB12_INP (0x00 << 24)
-#define S3C2400_GPB12_OUTP (0x01 << 24)
-#define S3C2400_GPB12_DATA28 (0x02 << 24)
-
-#define S3C2400_GPB13_INP (0x00 << 26)
-#define S3C2400_GPB13_OUTP (0x01 << 26)
-#define S3C2400_GPB13_DATA29 (0x02 << 26)
-
-#define S3C2400_GPB14_INP (0x00 << 28)
-#define S3C2400_GPB14_OUTP (0x01 << 28)
-#define S3C2400_GPB14_DATA30 (0x02 << 28)
-
-#define S3C2400_GPB15_INP (0x00 << 30)
-#define S3C2400_GPB15_OUTP (0x01 << 30)
-#define S3C2400_GPB15_DATA31 (0x02 << 30)
#define S3C2410_GPB_PUPDIS(x) (1<<(x))
@@ -208,59 +123,22 @@
#define S3C2410_GPCCON S3C2410_GPIOREG(0x20)
#define S3C2410_GPCDAT S3C2410_GPIOREG(0x24)
#define S3C2410_GPCUP S3C2410_GPIOREG(0x28)
-
-#define S3C2400_GPCCON S3C2410_GPIOREG(0x14)
-#define S3C2400_GPCDAT S3C2410_GPIOREG(0x18)
-#define S3C2400_GPCUP S3C2410_GPIOREG(0x1C)
-
#define S3C2410_GPC0_LEND (0x02 << 0)
-#define S3C2400_GPC0_VD0 (0x02 << 0)
-
#define S3C2410_GPC1_VCLK (0x02 << 2)
-#define S3C2400_GPC1_VD1 (0x02 << 2)
-
#define S3C2410_GPC2_VLINE (0x02 << 4)
-#define S3C2400_GPC2_VD2 (0x02 << 4)
-
#define S3C2410_GPC3_VFRAME (0x02 << 6)
-#define S3C2400_GPC3_VD3 (0x02 << 6)
-
#define S3C2410_GPC4_VM (0x02 << 8)
-#define S3C2400_GPC4_VD4 (0x02 << 8)
-
#define S3C2410_GPC5_LCDVF0 (0x02 << 10)
-#define S3C2400_GPC5_VD5 (0x02 << 10)
-
#define S3C2410_GPC6_LCDVF1 (0x02 << 12)
-#define S3C2400_GPC6_VD6 (0x02 << 12)
-
#define S3C2410_GPC7_LCDVF2 (0x02 << 14)
-#define S3C2400_GPC7_VD7 (0x02 << 14)
-
#define S3C2410_GPC8_VD0 (0x02 << 16)
-#define S3C2400_GPC8_VD8 (0x02 << 16)
-
#define S3C2410_GPC9_VD1 (0x02 << 18)
-#define S3C2400_GPC9_VD9 (0x02 << 18)
-
#define S3C2410_GPC10_VD2 (0x02 << 20)
-#define S3C2400_GPC10_VD10 (0x02 << 20)
-
#define S3C2410_GPC11_VD3 (0x02 << 22)
-#define S3C2400_GPC11_VD11 (0x02 << 22)
-
#define S3C2410_GPC12_VD4 (0x02 << 24)
-#define S3C2400_GPC12_VD12 (0x02 << 24)
-
#define S3C2410_GPC13_VD5 (0x02 << 26)
-#define S3C2400_GPC13_VD13 (0x02 << 26)
-
#define S3C2410_GPC14_VD6 (0x02 << 28)
-#define S3C2400_GPC14_VD14 (0x02 << 28)
-
#define S3C2410_GPC15_VD7 (0x02 << 30)
-#define S3C2400_GPC15_VD15 (0x02 << 30)
-
#define S3C2410_GPC_PUPDIS(x) (1<<(x))
/*
@@ -269,8 +147,6 @@
* almost identical setup to port b, but the special functions are mostly
* to do with the video system's data.
*
- * S3C2400: Port D consists of 11 GPIO/Special function
- *
* almost identical setup to port c
*/
@@ -278,46 +154,31 @@
#define S3C2410_GPDDAT S3C2410_GPIOREG(0x34)
#define S3C2410_GPDUP S3C2410_GPIOREG(0x38)
-#define S3C2400_GPDCON S3C2410_GPIOREG(0x20)
-#define S3C2400_GPDDAT S3C2410_GPIOREG(0x24)
-#define S3C2400_GPDUP S3C2410_GPIOREG(0x28)
-
#define S3C2410_GPD0_VD8 (0x02 << 0)
-#define S3C2400_GPD0_VFRAME (0x02 << 0)
#define S3C2442_GPD0_nSPICS1 (0x03 << 0)
#define S3C2410_GPD1_VD9 (0x02 << 2)
-#define S3C2400_GPD1_VM (0x02 << 2)
#define S3C2442_GPD1_SPICLK1 (0x03 << 2)
#define S3C2410_GPD2_VD10 (0x02 << 4)
-#define S3C2400_GPD2_VLINE (0x02 << 4)
#define S3C2410_GPD3_VD11 (0x02 << 6)
-#define S3C2400_GPD3_VCLK (0x02 << 6)
#define S3C2410_GPD4_VD12 (0x02 << 8)
-#define S3C2400_GPD4_LEND (0x02 << 8)
#define S3C2410_GPD5_VD13 (0x02 << 10)
-#define S3C2400_GPD5_TOUT0 (0x02 << 10)
#define S3C2410_GPD6_VD14 (0x02 << 12)
-#define S3C2400_GPD6_TOUT1 (0x02 << 12)
#define S3C2410_GPD7_VD15 (0x02 << 14)
-#define S3C2400_GPD7_TOUT2 (0x02 << 14)
#define S3C2410_GPD8_VD16 (0x02 << 16)
-#define S3C2400_GPD8_TOUT3 (0x02 << 16)
#define S3C2440_GPD8_SPIMISO1 (0x03 << 16)
#define S3C2410_GPD9_VD17 (0x02 << 18)
-#define S3C2400_GPD9_TCLK0 (0x02 << 18)
#define S3C2440_GPD9_SPIMOSI1 (0x03 << 18)
#define S3C2410_GPD10_VD18 (0x02 << 20)
-#define S3C2400_GPD10_nWAIT (0x02 << 20)
#define S3C2440_GPD10_SPICLK1 (0x03 << 20)
#define S3C2410_GPD11_VD19 (0x02 << 22)
@@ -340,9 +201,6 @@
* again, the same as port B, but dealing with I2S, SDI, and
* more miscellaneous functions
*
- * S3C2400:
- * Port E consists of 12 GPIO/Special function
- *
* GPIO / interrupt inputs
*/
@@ -350,74 +208,51 @@
#define S3C2410_GPEDAT S3C2410_GPIOREG(0x44)
#define S3C2410_GPEUP S3C2410_GPIOREG(0x48)
-#define S3C2400_GPECON S3C2410_GPIOREG(0x2C)
-#define S3C2400_GPEDAT S3C2410_GPIOREG(0x30)
-#define S3C2400_GPEUP S3C2410_GPIOREG(0x34)
-
#define S3C2410_GPE0_I2SLRCK (0x02 << 0)
#define S3C2443_GPE0_AC_nRESET (0x03 << 0)
-#define S3C2400_GPE0_EINT0 (0x02 << 0)
#define S3C2410_GPE0_MASK (0x03 << 0)
#define S3C2410_GPE1_I2SSCLK (0x02 << 2)
#define S3C2443_GPE1_AC_SYNC (0x03 << 2)
-#define S3C2400_GPE1_EINT1 (0x02 << 2)
-#define S3C2400_GPE1_nSS (0x03 << 2)
#define S3C2410_GPE1_MASK (0x03 << 2)
#define S3C2410_GPE2_CDCLK (0x02 << 4)
#define S3C2443_GPE2_AC_BITCLK (0x03 << 4)
-#define S3C2400_GPE2_EINT2 (0x02 << 4)
-#define S3C2400_GPE2_I2SSDI (0x03 << 4)
#define S3C2410_GPE3_I2SSDI (0x02 << 6)
#define S3C2443_GPE3_AC_SDI (0x03 << 6)
-#define S3C2400_GPE3_EINT3 (0x02 << 6)
-#define S3C2400_GPE3_nCTS1 (0x03 << 6)
#define S3C2410_GPE3_nSS0 (0x03 << 6)
#define S3C2410_GPE3_MASK (0x03 << 6)
#define S3C2410_GPE4_I2SSDO (0x02 << 8)
#define S3C2443_GPE4_AC_SDO (0x03 << 8)
-#define S3C2400_GPE4_EINT4 (0x02 << 8)
-#define S3C2400_GPE4_nRTS1 (0x03 << 8)
#define S3C2410_GPE4_I2SSDI (0x03 << 8)
#define S3C2410_GPE4_MASK (0x03 << 8)
#define S3C2410_GPE5_SDCLK (0x02 << 10)
#define S3C2443_GPE5_SD1_CLK (0x02 << 10)
-#define S3C2400_GPE5_EINT5 (0x02 << 10)
-#define S3C2400_GPE5_TCLK1 (0x03 << 10)
#define S3C2443_GPE5_AC_BITCLK (0x03 << 10)
#define S3C2410_GPE6_SDCMD (0x02 << 12)
#define S3C2443_GPE6_SD1_CMD (0x02 << 12)
#define S3C2443_GPE6_AC_SDI (0x03 << 12)
-#define S3C2400_GPE6_EINT6 (0x02 << 12)
#define S3C2410_GPE7_SDDAT0 (0x02 << 14)
#define S3C2443_GPE5_SD1_DAT0 (0x02 << 14)
#define S3C2443_GPE7_AC_SDO (0x03 << 14)
-#define S3C2400_GPE7_EINT7 (0x02 << 14)
#define S3C2410_GPE8_SDDAT1 (0x02 << 16)
#define S3C2443_GPE8_SD1_DAT1 (0x02 << 16)
#define S3C2443_GPE8_AC_SYNC (0x03 << 16)
-#define S3C2400_GPE8_nXDACK0 (0x02 << 16)
#define S3C2410_GPE9_SDDAT2 (0x02 << 18)
#define S3C2443_GPE9_SD1_DAT2 (0x02 << 18)
#define S3C2443_GPE9_AC_nRESET (0x03 << 18)
-#define S3C2400_GPE9_nXDACK1 (0x02 << 18)
-#define S3C2400_GPE9_nXBACK (0x03 << 18)
#define S3C2410_GPE10_SDDAT3 (0x02 << 20)
#define S3C2443_GPE10_SD1_DAT3 (0x02 << 20)
-#define S3C2400_GPE10_nXDREQ0 (0x02 << 20)
#define S3C2410_GPE11_SPIMISO0 (0x02 << 22)
-#define S3C2400_GPE11_nXDREQ1 (0x02 << 22)
-#define S3C2400_GPE11_nXBREQ (0x03 << 22)
#define S3C2410_GPE12_SPIMOSI0 (0x02 << 24)
@@ -447,9 +282,6 @@
*
* pull up works like all other ports.
*
- * S3C2400:
- * Port F consists of 7 GPIO/Special function
- *
* GPIO/serial/misc pins
*/
@@ -457,37 +289,14 @@
#define S3C2410_GPFDAT S3C2410_GPIOREG(0x54)
#define S3C2410_GPFUP S3C2410_GPIOREG(0x58)
-#define S3C2400_GPFCON S3C2410_GPIOREG(0x38)
-#define S3C2400_GPFDAT S3C2410_GPIOREG(0x3C)
-#define S3C2400_GPFUP S3C2410_GPIOREG(0x40)
-
#define S3C2410_GPF0_EINT0 (0x02 << 0)
-#define S3C2400_GPF0_RXD0 (0x02 << 0)
-
#define S3C2410_GPF1_EINT1 (0x02 << 2)
-#define S3C2400_GPF1_RXD1 (0x02 << 2)
-#define S3C2400_GPF1_IICSDA (0x03 << 2)
-
#define S3C2410_GPF2_EINT2 (0x02 << 4)
-#define S3C2400_GPF2_TXD0 (0x02 << 4)
-
#define S3C2410_GPF3_EINT3 (0x02 << 6)
-#define S3C2400_GPF3_TXD1 (0x02 << 6)
-#define S3C2400_GPF3_IICSCL (0x03 << 6)
-
#define S3C2410_GPF4_EINT4 (0x02 << 8)
-#define S3C2400_GPF4_nRTS0 (0x02 << 8)
-#define S3C2400_GPF4_nXBACK (0x03 << 8)
-
#define S3C2410_GPF5_EINT5 (0x02 << 10)
-#define S3C2400_GPF5_nCTS0 (0x02 << 10)
-#define S3C2400_GPF5_nXBREQ (0x03 << 10)
-
#define S3C2410_GPF6_EINT6 (0x02 << 12)
-#define S3C2400_GPF6_CLKOUT (0x02 << 12)
-
#define S3C2410_GPF7_EINT7 (0x02 << 14)
-
#define S3C2410_GPF_PUPDIS(x) (1<<(x))
/* S3C2410:
@@ -497,62 +306,38 @@
* 00 = 0 input, 1 output, 2 interrupt (EINT0..7), 3 special func
*
* pull up works like all other ports.
- *
- * S3C2400:
- * Port G consists of 10 GPIO/Special function
*/
#define S3C2410_GPGCON S3C2410_GPIOREG(0x60)
#define S3C2410_GPGDAT S3C2410_GPIOREG(0x64)
#define S3C2410_GPGUP S3C2410_GPIOREG(0x68)
-#define S3C2400_GPGCON S3C2410_GPIOREG(0x44)
-#define S3C2400_GPGDAT S3C2410_GPIOREG(0x48)
-#define S3C2400_GPGUP S3C2410_GPIOREG(0x4C)
-
#define S3C2410_GPG0_EINT8 (0x02 << 0)
-#define S3C2400_GPG0_I2SLRCK (0x02 << 0)
#define S3C2410_GPG1_EINT9 (0x02 << 2)
-#define S3C2400_GPG1_I2SSCLK (0x02 << 2)
#define S3C2410_GPG2_EINT10 (0x02 << 4)
#define S3C2410_GPG2_nSS0 (0x03 << 4)
-#define S3C2400_GPG2_CDCLK (0x02 << 4)
#define S3C2410_GPG3_EINT11 (0x02 << 6)
#define S3C2410_GPG3_nSS1 (0x03 << 6)
-#define S3C2400_GPG3_I2SSDO (0x02 << 6)
-#define S3C2400_GPG3_I2SSDI (0x03 << 6)
#define S3C2410_GPG4_EINT12 (0x02 << 8)
-#define S3C2400_GPG4_MMCCLK (0x02 << 8)
-#define S3C2400_GPG4_I2SSDI (0x03 << 8)
#define S3C2410_GPG4_LCDPWREN (0x03 << 8)
#define S3C2443_GPG4_LCDPWRDN (0x03 << 8)
#define S3C2410_GPG5_EINT13 (0x02 << 10)
-#define S3C2400_GPG5_MMCCMD (0x02 << 10)
-#define S3C2400_GPG5_IICSDA (0x03 << 10)
#define S3C2410_GPG5_SPIMISO1 (0x03 << 10) /* not s3c2443 */
#define S3C2410_GPG6_EINT14 (0x02 << 12)
-#define S3C2400_GPG6_MMCDAT (0x02 << 12)
-#define S3C2400_GPG6_IICSCL (0x03 << 12)
#define S3C2410_GPG6_SPIMOSI1 (0x03 << 12)
#define S3C2410_GPG7_EINT15 (0x02 << 14)
#define S3C2410_GPG7_SPICLK1 (0x03 << 14)
-#define S3C2400_GPG7_SPIMISO (0x02 << 14)
-#define S3C2400_GPG7_IICSDA (0x03 << 14)
#define S3C2410_GPG8_EINT16 (0x02 << 16)
-#define S3C2400_GPG8_SPIMOSI (0x02 << 16)
-#define S3C2400_GPG8_IICSCL (0x03 << 16)
#define S3C2410_GPG9_EINT17 (0x02 << 18)
-#define S3C2400_GPG9_SPICLK (0x02 << 18)
-#define S3C2400_GPG9_MMCCLK (0x03 << 18)
#define S3C2410_GPG10_EINT18 (0x02 << 20)
@@ -660,7 +445,6 @@
#define S3C2443_GPMUP S3C2410_GPIOREG(0x108)
/* miscellaneous control */
-#define S3C2400_MISCCR S3C2410_GPIOREG(0x54)
#define S3C2410_MISCCR S3C2410_GPIOREG(0x80)
#define S3C2410_DCLKCON S3C2410_GPIOREG(0x84)
@@ -674,14 +458,6 @@
#define S3C2410_MISCCR_SPUCR_LEN (0<<1)
#define S3C2410_MISCCR_SPUCR_LDIS (1<<1)
-#define S3C2400_MISCCR_SPUCR_LEN (0<<0)
-#define S3C2400_MISCCR_SPUCR_LDIS (1<<0)
-#define S3C2400_MISCCR_SPUCR_HEN (0<<1)
-#define S3C2400_MISCCR_SPUCR_HDIS (1<<1)
-
-#define S3C2400_MISCCR_HZ_STOPEN (0<<2)
-#define S3C2400_MISCCR_HZ_STOPPREV (1<<2)
-
#define S3C2410_MISCCR_USBDEV (0<<3)
#define S3C2410_MISCCR_USBHOST (1<<3)
@@ -728,7 +504,6 @@
*
* Samsung datasheet p9-25
*/
-#define S3C2400_EXTINT0 S3C2410_GPIOREG(0x58)
#define S3C2410_EXTINT0 S3C2410_GPIOREG(0x88)
#define S3C2410_EXTINT1 S3C2410_GPIOREG(0x8C)
#define S3C2410_EXTINT2 S3C2410_GPIOREG(0x90)
@@ -796,22 +571,6 @@
#define S3C2410_GSTATUS2_OFFRESET (1<<1)
#define S3C2410_GSTATUS2_PONRESET (1<<0)
-/* open drain control register */
-#define S3C2400_OPENCR S3C2410_GPIOREG(0x50)
-
-#define S3C2400_OPENCR_OPC_RXD1DIS (0<<0)
-#define S3C2400_OPENCR_OPC_RXD1EN (1<<0)
-#define S3C2400_OPENCR_OPC_TXD1DIS (0<<1)
-#define S3C2400_OPENCR_OPC_TXD1EN (1<<1)
-#define S3C2400_OPENCR_OPC_CMDDIS (0<<2)
-#define S3C2400_OPENCR_OPC_CMDEN (1<<2)
-#define S3C2400_OPENCR_OPC_DATDIS (0<<3)
-#define S3C2400_OPENCR_OPC_DATEN (1<<3)
-#define S3C2400_OPENCR_OPC_MISODIS (0<<4)
-#define S3C2400_OPENCR_OPC_MISOEN (1<<4)
-#define S3C2400_OPENCR_OPC_MOSIDIS (0<<5)
-#define S3C2400_OPENCR_OPC_MOSIEN (1<<5)
-
/* 2412/2413 sleep configuration registers */
#define S3C2412_GPBSLPCON S3C2410_GPIOREG(0x1C)
diff --git a/arch/arm/mach-s3c2410/include/mach/regs-mem.h b/arch/arm/mach-s3c2410/include/mach/regs-mem.h
index 988a6863e54b..e0c67b0163d8 100644
--- a/arch/arm/mach-s3c2410/include/mach/regs-mem.h
+++ b/arch/arm/mach-s3c2410/include/mach/regs-mem.h
@@ -145,29 +145,8 @@
#define S3C2410_BANKCON_Tacs_SHIFT (13)
#define S3C2410_BANKCON_SRAM (0x0 << 15)
-#define S3C2400_BANKCON_EDODRAM (0x2 << 15)
#define S3C2410_BANKCON_SDRAM (0x3 << 15)
-/* next bits only for EDO DRAM in 6,7 */
-#define S3C2400_BANKCON_EDO_Trcd1 (0x00 << 4)
-#define S3C2400_BANKCON_EDO_Trcd2 (0x01 << 4)
-#define S3C2400_BANKCON_EDO_Trcd3 (0x02 << 4)
-#define S3C2400_BANKCON_EDO_Trcd4 (0x03 << 4)
-
-/* CAS pulse width */
-#define S3C2400_BANKCON_EDO_PULSE1 (0x00 << 3)
-#define S3C2400_BANKCON_EDO_PULSE2 (0x01 << 3)
-
-/* CAS pre-charge */
-#define S3C2400_BANKCON_EDO_TCP1 (0x00 << 2)
-#define S3C2400_BANKCON_EDO_TCP2 (0x01 << 2)
-
-/* control column address select */
-#define S3C2400_BANKCON_EDO_SCANb8 (0x00 << 0)
-#define S3C2400_BANKCON_EDO_SCANb9 (0x01 << 0)
-#define S3C2400_BANKCON_EDO_SCANb10 (0x02 << 0)
-#define S3C2400_BANKCON_EDO_SCANb11 (0x03 << 0)
-
/* next bits only for SDRAM in 6,7 */
#define S3C2410_BANKCON_Trcd2 (0x00 << 2)
#define S3C2410_BANKCON_Trcd3 (0x01 << 2)
@@ -194,12 +173,6 @@
#define S3C2410_REFRESH_TRP_3clk (1<<20)
#define S3C2410_REFRESH_TRP_4clk (2<<20)
-#define S3C2400_REFRESH_DRAM_TRP_MASK (3<<20)
-#define S3C2400_REFRESH_DRAM_TRP_1_5clk (0<<20)
-#define S3C2400_REFRESH_DRAM_TRP_2_5clk (1<<20)
-#define S3C2400_REFRESH_DRAM_TRP_3_5clk (2<<20)
-#define S3C2400_REFRESH_DRAM_TRP_4_5clk (3<<20)
-
#define S3C2410_REFRESH_TSRC_MASK (3<<18)
#define S3C2410_REFRESH_TSRC_4clk (0<<18)
#define S3C2410_REFRESH_TSRC_5clk (1<<18)
@@ -222,7 +195,6 @@
#define S3C2410_BANKSIZE_4M (0x5 << 0)
#define S3C2410_BANKSIZE_2M (0x4 << 0)
#define S3C2410_BANKSIZE_MASK (0x7 << 0)
-#define S3C2400_BANKSIZE_MASK (0x4 << 0)
#define S3C2410_BANKSIZE_SCLK_EN (1<<4)
#define S3C2410_BANKSIZE_SCKE_EN (1<<5)
#define S3C2410_BANKSIZE_BURST (1<<7)
diff --git a/arch/arm/mach-s3c2412/Kconfig b/arch/arm/mach-s3c2412/Kconfig
index e82ab4aa7ab9..c2cf4e569989 100644
--- a/arch/arm/mach-s3c2412/Kconfig
+++ b/arch/arm/mach-s3c2412/Kconfig
@@ -15,7 +15,7 @@ config CPU_S3C2412
config CPU_S3C2412_ONLY
bool
- depends on ARCH_S3C2410 && !CPU_S3C2400 && !CPU_S3C2410 && \
+ depends on ARCH_S3C2410 && !CPU_S3C2410 && \
!CPU_S3C2416 && !CPU_S3C2440 && !CPU_S3C2442 && \
!CPU_S3C2443 && CPU_S3C2412
default y if CPU_S3C2412
diff --git a/arch/arm/mach-s3c2412/clock.c b/arch/arm/mach-s3c2412/clock.c
index 0c0505b025cb..140711db6c89 100644
--- a/arch/arm/mach-s3c2412/clock.c
+++ b/arch/arm/mach-s3c2412/clock.c
@@ -95,12 +95,10 @@ static int s3c2412_upll_enable(struct clk *clk, int enable)
static struct clk clk_erefclk = {
.name = "erefclk",
- .id = -1,
};
static struct clk clk_urefclk = {
.name = "urefclk",
- .id = -1,
};
static int s3c2412_setparent_usysclk(struct clk *clk, struct clk *parent)
@@ -122,7 +120,6 @@ static int s3c2412_setparent_usysclk(struct clk *clk, struct clk *parent)
static struct clk clk_usysclk = {
.name = "usysclk",
- .id = -1,
.parent = &clk_xtal,
.ops = &(struct clk_ops) {
.set_parent = s3c2412_setparent_usysclk,
@@ -132,13 +129,11 @@ static struct clk clk_usysclk = {
static struct clk clk_mrefclk = {
.name = "mrefclk",
.parent = &clk_xtal,
- .id = -1,
};
static struct clk clk_mdivclk = {
.name = "mdivclk",
.parent = &clk_xtal,
- .id = -1,
};
static int s3c2412_setparent_usbsrc(struct clk *clk, struct clk *parent)
@@ -200,7 +195,6 @@ static int s3c2412_setrate_usbsrc(struct clk *clk, unsigned long rate)
static struct clk clk_usbsrc = {
.name = "usbsrc",
- .id = -1,
.ops = &(struct clk_ops) {
.get_rate = s3c2412_getrate_usbsrc,
.set_rate = s3c2412_setrate_usbsrc,
@@ -228,7 +222,6 @@ static int s3c2412_setparent_msysclk(struct clk *clk, struct clk *parent)
static struct clk clk_msysclk = {
.name = "msysclk",
- .id = -1,
.ops = &(struct clk_ops) {
.set_parent = s3c2412_setparent_msysclk,
},
@@ -268,7 +261,6 @@ static int s3c2412_setparent_armclk(struct clk *clk, struct clk *parent)
static struct clk clk_armclk = {
.name = "armclk",
- .id = -1,
.parent = &clk_msysclk,
.ops = &(struct clk_ops) {
.set_parent = s3c2412_setparent_armclk,
@@ -344,7 +336,6 @@ static int s3c2412_setrate_uart(struct clk *clk, unsigned long rate)
static struct clk clk_uart = {
.name = "uartclk",
- .id = -1,
.ops = &(struct clk_ops) {
.get_rate = s3c2412_getrate_uart,
.set_rate = s3c2412_setrate_uart,
@@ -397,7 +388,6 @@ static int s3c2412_setrate_i2s(struct clk *clk, unsigned long rate)
static struct clk clk_i2s = {
.name = "i2sclk",
- .id = -1,
.ops = &(struct clk_ops) {
.get_rate = s3c2412_getrate_i2s,
.set_rate = s3c2412_setrate_i2s,
@@ -449,7 +439,6 @@ static int s3c2412_setrate_cam(struct clk *clk, unsigned long rate)
static struct clk clk_cam = {
.name = "camif-upll", /* same as 2440 name */
- .id = -1,
.ops = &(struct clk_ops) {
.get_rate = s3c2412_getrate_cam,
.set_rate = s3c2412_setrate_cam,
@@ -463,37 +452,31 @@ static struct clk clk_cam = {
static struct clk init_clocks_disable[] = {
{
.name = "nand",
- .id = -1,
.parent = &clk_h,
.enable = s3c2412_clkcon_enable,
.ctrlbit = S3C2412_CLKCON_NAND,
}, {
.name = "sdi",
- .id = -1,
.parent = &clk_p,
.enable = s3c2412_clkcon_enable,
.ctrlbit = S3C2412_CLKCON_SDI,
}, {
.name = "adc",
- .id = -1,
.parent = &clk_p,
.enable = s3c2412_clkcon_enable,
.ctrlbit = S3C2412_CLKCON_ADC,
}, {
.name = "i2c",
- .id = -1,
.parent = &clk_p,
.enable = s3c2412_clkcon_enable,
.ctrlbit = S3C2412_CLKCON_IIC,
}, {
.name = "iis",
- .id = -1,
.parent = &clk_p,
.enable = s3c2412_clkcon_enable,
.ctrlbit = S3C2412_CLKCON_IIS,
}, {
.name = "spi",
- .id = -1,
.parent = &clk_p,
.enable = s3c2412_clkcon_enable,
.ctrlbit = S3C2412_CLKCON_SPI,
@@ -503,96 +486,83 @@ static struct clk init_clocks_disable[] = {
static struct clk init_clocks[] = {
{
.name = "dma",
- .id = 0,
.parent = &clk_h,
.enable = s3c2412_clkcon_enable,
.ctrlbit = S3C2412_CLKCON_DMA0,
}, {
.name = "dma",
- .id = 1,
.parent = &clk_h,
.enable = s3c2412_clkcon_enable,
.ctrlbit = S3C2412_CLKCON_DMA1,
}, {
.name = "dma",
- .id = 2,
.parent = &clk_h,
.enable = s3c2412_clkcon_enable,
.ctrlbit = S3C2412_CLKCON_DMA2,
}, {
.name = "dma",
- .id = 3,
.parent = &clk_h,
.enable = s3c2412_clkcon_enable,
.ctrlbit = S3C2412_CLKCON_DMA3,
}, {
.name = "lcd",
- .id = -1,
.parent = &clk_h,
.enable = s3c2412_clkcon_enable,
.ctrlbit = S3C2412_CLKCON_LCDC,
}, {
.name = "gpio",
- .id = -1,
.parent = &clk_p,
.enable = s3c2412_clkcon_enable,
.ctrlbit = S3C2412_CLKCON_GPIO,
}, {
.name = "usb-host",
- .id = -1,
.parent = &clk_h,
.enable = s3c2412_clkcon_enable,
.ctrlbit = S3C2412_CLKCON_USBH,
}, {
.name = "usb-device",
- .id = -1,
.parent = &clk_h,
.enable = s3c2412_clkcon_enable,
.ctrlbit = S3C2412_CLKCON_USBD,
}, {
.name = "timers",
- .id = -1,
.parent = &clk_p,
.enable = s3c2412_clkcon_enable,
.ctrlbit = S3C2412_CLKCON_PWMT,
}, {
.name = "uart",
- .id = 0,
+ .devname = "s3c2412-uart.0",
.parent = &clk_p,
.enable = s3c2412_clkcon_enable,
.ctrlbit = S3C2412_CLKCON_UART0,
}, {
.name = "uart",
- .id = 1,
+ .devname = "s3c2412-uart.1",
.parent = &clk_p,
.enable = s3c2412_clkcon_enable,
.ctrlbit = S3C2412_CLKCON_UART1,
}, {
.name = "uart",
- .id = 2,
+ .devname = "s3c2412-uart.2",
.parent = &clk_p,
.enable = s3c2412_clkcon_enable,
.ctrlbit = S3C2412_CLKCON_UART2,
}, {
.name = "rtc",
- .id = -1,
.parent = &clk_p,
.enable = s3c2412_clkcon_enable,
.ctrlbit = S3C2412_CLKCON_RTC,
}, {
.name = "watchdog",
- .id = -1,
.parent = &clk_p,
.ctrlbit = 0,
}, {
.name = "usb-bus-gadget",
- .id = -1,
.parent = &clk_usb_bus,
.enable = s3c2412_clkcon_enable,
.ctrlbit = S3C2412_CLKCON_USB_DEV48,
}, {
.name = "usb-bus-host",
- .id = -1,
.parent = &clk_usb_bus,
.enable = s3c2412_clkcon_enable,
.ctrlbit = S3C2412_CLKCON_USB_HOST48,
diff --git a/arch/arm/mach-s3c2412/pm.c b/arch/arm/mach-s3c2412/pm.c
index 752b13a7b3db..f4077efa51fa 100644
--- a/arch/arm/mach-s3c2412/pm.c
+++ b/arch/arm/mach-s3c2412/pm.c
@@ -37,12 +37,10 @@
extern void s3c2412_sleep_enter(void);
-static void s3c2412_cpu_suspend(void)
+static int s3c2412_cpu_suspend(unsigned long arg)
{
unsigned long tmp;
- flush_cache_all();
-
/* set our standby method to sleep */
tmp = __raw_readl(S3C2412_PWRCFG);
@@ -50,6 +48,8 @@ static void s3c2412_cpu_suspend(void)
__raw_writel(tmp, S3C2412_PWRCFG);
s3c2412_sleep_enter();
+
+ panic("sleep resumed to originator?");
}
static void s3c2412_pm_prepare(void)
diff --git a/arch/arm/mach-s3c2416/clock.c b/arch/arm/mach-s3c2416/clock.c
index 3b02d8506e25..21a5e81f0ab5 100644
--- a/arch/arm/mach-s3c2416/clock.c
+++ b/arch/arm/mach-s3c2416/clock.c
@@ -42,7 +42,7 @@ static struct clksrc_clk hsmmc_div[] = {
[0] = {
.clk = {
.name = "hsmmc-div",
- .id = 0,
+ .devname = "s3c-sdhci.0",
.parent = &clk_esysclk.clk,
},
.reg_div = { .reg = S3C2416_CLKDIV2, .size = 2, .shift = 6 },
@@ -50,7 +50,7 @@ static struct clksrc_clk hsmmc_div[] = {
[1] = {
.clk = {
.name = "hsmmc-div",
- .id = 1,
+ .devname = "s3c-sdhci.1",
.parent = &clk_esysclk.clk,
},
.reg_div = { .reg = S3C2443_CLKDIV1, .size = 2, .shift = 6 },
@@ -60,8 +60,8 @@ static struct clksrc_clk hsmmc_div[] = {
static struct clksrc_clk hsmmc_mux[] = {
[0] = {
.clk = {
- .id = 0,
.name = "hsmmc-if",
+ .devname = "s3c-sdhci.0",
.ctrlbit = (1 << 6),
.enable = s3c2443_clkcon_enable_s,
},
@@ -76,8 +76,8 @@ static struct clksrc_clk hsmmc_mux[] = {
},
[1] = {
.clk = {
- .id = 1,
.name = "hsmmc-if",
+ .devname = "s3c-sdhci.1",
.ctrlbit = (1 << 12),
.enable = s3c2443_clkcon_enable_s,
},
@@ -94,7 +94,7 @@ static struct clksrc_clk hsmmc_mux[] = {
static struct clk hsmmc0_clk = {
.name = "hsmmc",
- .id = 0,
+ .devname = "s3c-sdhci.0",
.parent = &clk_h,
.enable = s3c2443_clkcon_enable_h,
.ctrlbit = S3C2416_HCLKCON_HSMMC0,
diff --git a/arch/arm/mach-s3c2416/pm.c b/arch/arm/mach-s3c2416/pm.c
index 41db2b21e213..9ec54f1d8e75 100644
--- a/arch/arm/mach-s3c2416/pm.c
+++ b/arch/arm/mach-s3c2416/pm.c
@@ -24,10 +24,8 @@
extern void s3c2412_sleep_enter(void);
-static void s3c2416_cpu_suspend(void)
+static int s3c2416_cpu_suspend(unsigned long arg)
{
- flush_cache_all();
-
/* enable wakeup sources regardless of battery state */
__raw_writel(S3C2443_PWRCFG_SLEEP, S3C2443_PWRCFG);
@@ -35,6 +33,8 @@ static void s3c2416_cpu_suspend(void)
__raw_writel(0x2BED, S3C2443_PWRMODE);
s3c2412_sleep_enter();
+
+ panic("sleep resumed to originator?");
}
static void s3c2416_pm_prepare(void)
diff --git a/arch/arm/mach-s3c2440/clock.c b/arch/arm/mach-s3c2440/clock.c
index 3dc2426e2345..554e0d3ec70b 100644
--- a/arch/arm/mach-s3c2440/clock.c
+++ b/arch/arm/mach-s3c2440/clock.c
@@ -90,14 +90,12 @@ static int s3c2440_camif_upll_setrate(struct clk *clk, unsigned long rate)
static struct clk s3c2440_clk_cam = {
.name = "camif",
- .id = -1,
.enable = s3c2410_clkcon_enable,
.ctrlbit = S3C2440_CLKCON_CAMERA,
};
static struct clk s3c2440_clk_cam_upll = {
.name = "camif-upll",
- .id = -1,
.ops = &(struct clk_ops) {
.set_rate = s3c2440_camif_upll_setrate,
.round_rate = s3c2440_camif_upll_round,
@@ -106,7 +104,6 @@ static struct clk s3c2440_clk_cam_upll = {
static struct clk s3c2440_clk_ac97 = {
.name = "ac97",
- .id = -1,
.enable = s3c2410_clkcon_enable,
.ctrlbit = S3C2440_CLKCON_CAMERA,
};
diff --git a/arch/arm/mach-s3c2443/clock.c b/arch/arm/mach-s3c2443/clock.c
index f4ec6d5715c8..a1a7176675b9 100644
--- a/arch/arm/mach-s3c2443/clock.c
+++ b/arch/arm/mach-s3c2443/clock.c
@@ -59,7 +59,6 @@
static struct clk clk_i2s_ext = {
.name = "i2s-ext",
- .id = -1,
};
/* armdiv
@@ -139,7 +138,6 @@ static int s3c2443_armclk_setrate(struct clk *clk, unsigned long rate)
static struct clk clk_armdiv = {
.name = "armdiv",
- .id = -1,
.parent = &clk_msysclk.clk,
.ops = &(struct clk_ops) {
.round_rate = s3c2443_armclk_roundrate,
@@ -160,7 +158,6 @@ static struct clk *clk_arm_sources[] = {
static struct clksrc_clk clk_arm = {
.clk = {
.name = "armclk",
- .id = -1,
},
.sources = &(struct clksrc_sources) {
.sources = clk_arm_sources,
@@ -177,7 +174,6 @@ static struct clksrc_clk clk_arm = {
static struct clksrc_clk clk_hsspi = {
.clk = {
.name = "hsspi",
- .id = -1,
.parent = &clk_esysclk.clk,
.ctrlbit = S3C2443_SCLKCON_HSSPICLK,
.enable = s3c2443_clkcon_enable_s,
@@ -196,7 +192,7 @@ static struct clksrc_clk clk_hsspi = {
static struct clksrc_clk clk_hsmmc_div = {
.clk = {
.name = "hsmmc-div",
- .id = 1,
+ .devname = "s3c-sdhci.1",
.parent = &clk_esysclk.clk,
},
.reg_div = { .reg = S3C2443_CLKDIV1, .size = 2, .shift = 6 },
@@ -231,7 +227,7 @@ static int s3c2443_enable_hsmmc(struct clk *clk, int enable)
static struct clk clk_hsmmc = {
.name = "hsmmc-if",
- .id = 1,
+ .devname = "s3c-sdhci.1",
.parent = &clk_hsmmc_div.clk,
.enable = s3c2443_enable_hsmmc,
.ops = &(struct clk_ops) {
@@ -248,7 +244,6 @@ static struct clk clk_hsmmc = {
static struct clksrc_clk clk_i2s_eplldiv = {
.clk = {
.name = "i2s-eplldiv",
- .id = -1,
.parent = &clk_esysclk.clk,
},
.reg_div = { .reg = S3C2443_CLKDIV1, .size = 4, .shift = 12, },
@@ -271,7 +266,6 @@ struct clk *clk_i2s_srclist[] = {
static struct clksrc_clk clk_i2s = {
.clk = {
.name = "i2s-if",
- .id = -1,
.ctrlbit = S3C2443_SCLKCON_I2SCLK,
.enable = s3c2443_clkcon_enable_s,
@@ -288,25 +282,23 @@ static struct clksrc_clk clk_i2s = {
static struct clk init_clocks_off[] = {
{
.name = "sdi",
- .id = -1,
.parent = &clk_p,
.enable = s3c2443_clkcon_enable_p,
.ctrlbit = S3C2443_PCLKCON_SDI,
}, {
.name = "iis",
- .id = -1,
.parent = &clk_p,
.enable = s3c2443_clkcon_enable_p,
.ctrlbit = S3C2443_PCLKCON_IIS,
}, {
.name = "spi",
- .id = 0,
+ .devname = "s3c2410-spi.0",
.parent = &clk_p,
.enable = s3c2443_clkcon_enable_p,
.ctrlbit = S3C2443_PCLKCON_SPI0,
}, {
.name = "spi",
- .id = 1,
+ .devname = "s3c2410-spi.1",
.parent = &clk_p,
.enable = s3c2443_clkcon_enable_p,
.ctrlbit = S3C2443_PCLKCON_SPI1,
diff --git a/arch/arm/mach-s3c24a0/include/mach/debug-macro.S b/arch/arm/mach-s3c24a0/include/mach/debug-macro.S
deleted file mode 100644
index 0c5a73805560..000000000000
--- a/arch/arm/mach-s3c24a0/include/mach/debug-macro.S
+++ /dev/null
@@ -1,27 +0,0 @@
-/* arch/arm/mach-s3c2410/include/mach/debug-macro.S
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-/* pull in the relevant register and map files. */
-
-#include <mach/map.h>
-#include <plat/regs-serial.h>
-
- .macro addruart, rp, rv
- ldr \rp, = S3C24XX_PA_UART
- ldr \rv, = S3C24XX_VA_UART
-#if CONFIG_DEBUG_S3C_UART != 0
- add \rp, \rp, #(S3C2410_UART1_OFF * CONFIG_DEBUG_S3C_UART)
- add \rv, \rv, #(S3C2410_UART1_OFF * CONFIG_DEBUG_S3C_UART)
-#endif
- .endm
-
-/* include the reset of the code which will do the work, we're only
- * compiling for a single cpu processor type so the default of s3c2440
- * will be fine with us.
- */
-
-#include <plat/debug-macro.S>
diff --git a/arch/arm/mach-s3c24a0/include/mach/io.h b/arch/arm/mach-s3c24a0/include/mach/io.h
deleted file mode 100644
index 4326c30fabcb..000000000000
--- a/arch/arm/mach-s3c24a0/include/mach/io.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* arch/arm/mach-s3c24a0/include/mach/io.h
- *
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben-linux@fluff.org>
- *
- * Default IO routines for S3C24A0
- */
-
-#ifndef __ASM_ARM_ARCH_IO_H
-#define __ASM_ARM_ARCH_IO_H
-
-/* No current ISA/PCI bus support. */
-#define __io(a) __typesafe_io(a)
-#define __mem_pci(a) (a)
-
-#define IO_SPACE_LIMIT (0xFFFFFFFF)
-
-#endif
diff --git a/arch/arm/mach-s3c24a0/include/mach/irqs.h b/arch/arm/mach-s3c24a0/include/mach/irqs.h
deleted file mode 100644
index 83ce2a7a9dae..000000000000
--- a/arch/arm/mach-s3c24a0/include/mach/irqs.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/* linux/arch/arm/mach-s3c24a0/include/mach/irqs.h
- *
- * Copyright (c) 2003-2005 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-
-#ifndef __ASM_ARCH_24A0_IRQS_H
-#define __ASM_ARCH_24A0_IRQS_H __FILE__
-
-#define IRQ_EINT0t2 S3C2410_IRQ(0) /* 16 */
-/* for generic entry-macro.S */
-#define IRQ_EINT0 IRQ_EINT0t2
-
-#define IRQ_EINT3t6 S3C2410_IRQ(1)
-#define IRQ_EINT7t10 S3C2410_IRQ(2)
-#define IRQ_EINT11t14 S3C2410_IRQ(3)
-#define IRQ_EINT15t18 S3C2410_IRQ(4) /* 20 */
-#define IRQ_TICK S3C2410_IRQ(5)
-#define IRQ_DCTQ S3C2410_IRQ(6)
-#define IRQ_MC S3C2410_IRQ(7)
-#define IRQ_ME S3C2410_IRQ(8) /* 24 */
-#define IRQ_KEYPAD S3C2410_IRQ(9)
-#define IRQ_TIMER0 S3C2410_IRQ(10)
-#define IRQ_TIMER1 S3C2410_IRQ(11)
-#define IRQ_TIMER2 S3C2410_IRQ(12)
-#define IRQ_TIMER3_4 S3C2410_IRQ(13)
-#define IRQ_OS_TIMER IRQ_TIMER3_4
-#define IRQ_LCD S3C2410_IRQ(14)
-#define IRQ_CAM_C S3C2410_IRQ(15)
-#define IRQ_WDT_BATFLT S3C2410_IRQ(16) /* 32 */
-#define IRQ_UART0 S3C2410_IRQ(17)
-#define IRQ_CAM_P S3C2410_IRQ(18)
-#define IRQ_MODEM S3C2410_IRQ(19)
-#define IRQ_DMA S3C2410_IRQ(20)
-#define IRQ_SDI S3C2410_IRQ(21)
-#define IRQ_SPI0 S3C2410_IRQ(22)
-#define IRQ_UART1 S3C2410_IRQ(23)
-#define IRQ_AC97_NFLASH S3C2410_IRQ(24) /* 40 */
-#define IRQ_USBD S3C2410_IRQ(25)
-#define IRQ_USBH S3C2410_IRQ(26)
-#define IRQ_IIC S3C2410_IRQ(27)
-#define IRQ_IRDA_MSTICK S3C2410_IRQ(28) /* 44 */
-#define IRQ_VLX_SPI1 S3C2410_IRQ(29)
-#define IRQ_RTC S3C2410_IRQ(30) /* 46 */
-#define IRQ_ADC_PEN S3C2410_IRQ(31)
-
-/* interrupts generated from the external interrupts sources */
-#define IRQ_EINT00 S3C2410_IRQ(32) /* 48 */
-#define IRQ_EINT1 S3C2410_IRQ(33)
-#define IRQ_EINT2 S3C2410_IRQ(34)
-#define IRQ_EINT3 S3C2410_IRQ(35)
-#define IRQ_EINT4 S3C2410_IRQ(36)
-#define IRQ_EINT5 S3C2410_IRQ(37)
-#define IRQ_EINT6 S3C2410_IRQ(38)
-#define IRQ_EINT7 S3C2410_IRQ(39)
-#define IRQ_EINT8 S3C2410_IRQ(40)
-#define IRQ_EINT9 S3C2410_IRQ(41)
-#define IRQ_EINT10 S3C2410_IRQ(42)
-#define IRQ_EINT11 S3C2410_IRQ(43)
-#define IRQ_EINT12 S3C2410_IRQ(44)
-#define IRQ_EINT13 S3C2410_IRQ(45)
-#define IRQ_EINT14 S3C2410_IRQ(46)
-#define IRQ_EINT15 S3C2410_IRQ(47)
-#define IRQ_EINT16 S3C2410_IRQ(48)
-#define IRQ_EINT17 S3C2410_IRQ(49)
-#define IRQ_EINT18 S3C2410_IRQ(50)
-
-#define IRQ_EINT_BIT(x) ((x) - IRQ_EINT00)
-
-/* SUB IRQS */
-#define IRQ_S3CUART_RX0 S3C2410_IRQ(51) /* 67 */
-#define IRQ_S3CUART_TX0 S3C2410_IRQ(52)
-#define IRQ_S3CUART_ERR0 S3C2410_IRQ(53)
-
-#define IRQ_S3CUART_RX1 S3C2410_IRQ(54)
-#define IRQ_S3CUART_TX1 S3C2410_IRQ(55)
-#define IRQ_S3CUART_ERR1 S3C2410_IRQ(56)
-
-#define IRQ_S3CUART_RX2 (0x0)
-#define IRQ_S3CUART_TX2 (0x0)
-#define IRQ_S3CUART_ERR2 (0x0)
-
-
-#define IRQ_IRDA S3C2410_IRQ(57)
-#define IRQ_MSTICK S3C2410_IRQ(58)
-#define IRQ_RESERVED0 S3C2410_IRQ(59)
-#define IRQ_RESERVED1 S3C2410_IRQ(60)
-#define IRQ_RESERVED2 S3C2410_IRQ(61)
-#define IRQ_TIMER3 S3C2410_IRQ(62)
-#define IRQ_TIMER4 S3C2410_IRQ(63)
-#define IRQ_WDT S3C2410_IRQ(64)
-#define IRQ_BATFLT S3C2410_IRQ(65)
-#define IRQ_POST S3C2410_IRQ(66)
-#define IRQ_DISP_FIFO S3C2410_IRQ(67)
-#define IRQ_PENUP S3C2410_IRQ(68)
-#define IRQ_PENDN S3C2410_IRQ(69)
-#define IRQ_ADC S3C2410_IRQ(70)
-#define IRQ_DISP_FRAME S3C2410_IRQ(71)
-#define IRQ_NFLASH S3C2410_IRQ(72)
-#define IRQ_AC97 S3C2410_IRQ(73)
-#define IRQ_SPI1 S3C2410_IRQ(74)
-#define IRQ_VLX S3C2410_IRQ(75)
-#define IRQ_DMA0 S3C2410_IRQ(76)
-#define IRQ_DMA1 S3C2410_IRQ(77)
-#define IRQ_DMA2 S3C2410_IRQ(78)
-#define IRQ_DMA3 S3C2410_IRQ(79)
-
-#define IRQ_TC (0x0)
-
-#define NR_IRQS (IRQ_DMA3+1)
-
-#endif /* __ASM_ARCH_24A0_IRQS_H */
diff --git a/arch/arm/mach-s3c24a0/include/mach/map.h b/arch/arm/mach-s3c24a0/include/mach/map.h
deleted file mode 100644
index d88c8b24fe34..000000000000
--- a/arch/arm/mach-s3c24a0/include/mach/map.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/* linux/arch/arm/mach-s3c24a0/include/mach/map.h
- *
- * Copyright 2003-2007 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- * Ben Dooks <ben@simtec.co.uk>
- *
- * S3C24A0 - Memory map definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_24A0_MAP_H
-#define __ASM_ARCH_24A0_MAP_H __FILE__
-
-#include <plat/map-base.h>
-#include <plat/map.h>
-
-#define S3C24A0_PA_IO_BASE (0x40000000)
-#define S3C24A0_PA_CLKPWR (0x40000000)
-#define S3C24A0_PA_IRQ (0x40200000)
-#define S3C24A0_PA_DMA (0x40400000)
-#define S3C24A0_PA_MEMCTRL (0x40C00000)
-#define S3C24A0_PA_NAND (0x40C00000)
-#define S3C24A0_PA_SROM (0x40C20000)
-#define S3C24A0_PA_SDRAM (0x40C40000)
-#define S3C24A0_PA_BUSM (0x40CE0000)
-#define S3C24A0_PA_USBHOST (0x41000000)
-#define S3C24A0_PA_MODEMIF (0x41180000)
-#define S3C24A0_PA_IRDA (0x41800000)
-#define S3C24A0_PA_TIMER (0x44000000)
-#define S3C24A0_PA_WATCHDOG (0x44100000)
-#define S3C24A0_PA_RTC (0x44200000)
-#define S3C24A0_PA_UART (0x44400000)
-#define S3C24A0_PA_UART0 (S3C24A0_PA_UART)
-#define S3C24A0_PA_UART1 (S3C24A0_PA_UART + 0x4000)
-#define S3C24A0_PA_SPI (0x44500000)
-#define S3C24A0_PA_IIC (0x44600000)
-#define S3C24A0_PA_IIS (0x44700000)
-#define S3C24A0_PA_GPIO (0x44800000)
-#define S3C24A0_PA_KEYIF (0x44900000)
-#define S3C24A0_PA_USBDEV (0x44A00000)
-#define S3C24A0_PA_AC97 (0x45000000)
-#define S3C24A0_PA_ADC (0x45800000)
-#define S3C24A0_PA_SDI (0x46000000)
-#define S3C24A0_PA_MS (0x46100000)
-#define S3C24A0_PA_LCD (0x4A000000)
-#define S3C24A0_PA_VPOST (0x4A100000)
-
-/* physical addresses of all the chip-select areas */
-
-#define S3C24A0_CS0 (0x00000000)
-#define S3C24A0_CS1 (0x04000000)
-#define S3C24A0_CS2 (0x08000000)
-#define S3C24A0_CS3 (0x0C000000)
-#define S3C24A0_CS4 (0x10000000)
-#define S3C24A0_CS5 (0x40000000)
-
-#define S3C24A0_SDRAM_PA (S3C24A0_CS4)
-
-/* Use a single interface for common resources between S3C24XX cpus */
-
-#define S3C24XX_PA_IRQ S3C24A0_PA_IRQ
-#define S3C24XX_PA_MEMCTRL S3C24A0_PA_MEMCTRL
-#define S3C24XX_PA_USBHOST S3C24A0_PA_USBHOST
-#define S3C24XX_PA_DMA S3C24A0_PA_DMA
-#define S3C24XX_PA_CLKPWR S3C24A0_PA_CLKPWR
-#define S3C24XX_PA_LCD S3C24A0_PA_LCD
-#define S3C24XX_PA_UART S3C24A0_PA_UART
-#define S3C24XX_PA_TIMER S3C24A0_PA_TIMER
-#define S3C24XX_PA_USBDEV S3C24A0_PA_USBDEV
-#define S3C24XX_PA_WATCHDOG S3C24A0_PA_WATCHDOG
-#define S3C24XX_PA_IIS S3C24A0_PA_IIS
-#define S3C24XX_PA_GPIO S3C24A0_PA_GPIO
-#define S3C24XX_PA_RTC S3C24A0_PA_RTC
-#define S3C24XX_PA_ADC S3C24A0_PA_ADC
-#define S3C24XX_PA_SPI S3C24A0_PA_SPI
-#define S3C24XX_PA_SDI S3C24A0_PA_SDI
-#define S3C24XX_PA_NAND S3C24A0_PA_NAND
-
-#define S3C_PA_UART S3C24A0_PA_UART
-#define S3C_PA_IIC S3C24A0_PA_IIC
-#define S3C_PA_NAND S3C24XX_PA_NAND
-
-#endif /* __ASM_ARCH_24A0_MAP_H */
diff --git a/arch/arm/mach-s3c24a0/include/mach/memory.h b/arch/arm/mach-s3c24a0/include/mach/memory.h
deleted file mode 100644
index 7d208a71b172..000000000000
--- a/arch/arm/mach-s3c24a0/include/mach/memory.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* linux/arch/arm/mach-s3c24a0/include/mach/memory.h
- * from linux/include/asm-arm/arch-rpc/memory.h
- *
- * Copyright (C) 1996,1997,1998 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_24A0_MEMORY_H
-#define __ASM_ARCH_24A0_MEMORY_H __FILE__
-
-#define PLAT_PHYS_OFFSET UL(0x10000000)
-
-#define __virt_to_bus(x) __virt_to_phys(x)
-#define __bus_to_virt(x) __phys_to_virt(x)
-#define __pfn_to_bus(x) __pfn_to_phys(x)
-#define __bus_to_pfn(x) __phys_to_pfn(x)
-
-#endif
diff --git a/arch/arm/mach-s3c24a0/include/mach/regs-clock.h b/arch/arm/mach-s3c24a0/include/mach/regs-clock.h
deleted file mode 100644
index be0af518b488..000000000000
--- a/arch/arm/mach-s3c24a0/include/mach/regs-clock.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/* linux/arch/arm/mach-s3c24a0/include/mach/regs-clock.h
- *
- * Copyright (c) 2003-2006 Simtec Electronics <linux@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * S3C24A0 clock register definitions
-*/
-
-#ifndef __ASM_ARCH_24A0_REGS_CLOCK_H
-#define __ASM_ARCH_24A0_REGS_CLOCK_H __FILE__
-
-#define S3C24A0_MPLLCON S3C2410_CLKREG(0x10)
-#define S3C24A0_UPLLCON S3C2410_CLKREG(0x14)
-#define S3C24A0_CLKCON S3C2410_CLKREG(0x20)
-#define S3C24A0_CLKSRC S3C2410_CLKREG(0x24)
-#define S3C24A0_CLKDIVN S3C2410_CLKREG(0x28)
-
-/* CLKCON register bits */
-
-#define S3C24A0_CLKCON_VLX (1<<29)
-#define S3C24A0_CLKCON_VPOST (1<<28)
-#define S3C24A0_CLKCON_WDT (1<<27) /* reserved */
-#define S3C24A0_CLKCON_MPEGDCTQ (1<<26)
-#define S3C24A0_CLKCON_VPOSTIF (1<<25)
-#define S3C24A0_CLKCON_MPEG4IF (1<<24)
-#define S3C24A0_CLKCON_CAM_UPLL (1<<23)
-#define S3C24A0_CLKCON_LCDC (1<<22)
-#define S3C24A0_CLKCON_CAM_HCLK (1<<21)
-#define S3C24A0_CLKCON_MPEG4 (1<<20)
-#define S3C24A0_CLKCON_KEYPAD (1<<19)
-#define S3C24A0_CLKCON_ADC (1<<18)
-#define S3C24A0_CLKCON_SDI (1<<17)
-#define S3C24A0_CLKCON_MS (1<<16) /* memory stick */
-#define S3C24A0_CLKCON_USBD (1<<15)
-#define S3C24A0_CLKCON_GPIO (1<<14)
-#define S3C24A0_CLKCON_IIS (1<<13)
-#define S3C24A0_CLKCON_IIC (1<<12)
-#define S3C24A0_CLKCON_SPI (1<<11)
-#define S3C24A0_CLKCON_UART1 (1<<10)
-#define S3C24A0_CLKCON_UART0 (1<<9)
-#define S3C24A0_CLKCON_PWMT (1<<8)
-#define S3C24A0_CLKCON_USBH (1<<7)
-#define S3C24A0_CLKCON_AC97 (1<<6)
-#define S3C24A0_CLKCON_IrDA (1<<4)
-#define S3C24A0_CLKCON_IDLE (1<<2)
-#define S3C24A0_CLKCON_MON (1<<1)
-#define S3C24A0_CLKCON_STOP (1<<0)
-
-/* CLKSRC register bits */
-
-#define S3C24A0_CLKSRC_OSC (1<<8) /* CLKSRC */
-#define S3C24A0_CLKSRC_UPLL (1<<7)
-#define S3C24A0_CLKSRC_MPLL (1<<5)
-#define S3C24A0_CLKSRC_EXT (1<<4)
-
-/* Use a single interface with the common code, for s3c24xx */
-
-#define S3C2410_MPLLCON S3C24A0_MPLLCON
-#define S3C2410_UPLLCON S3C24A0_UPLLCON
-#define S3C2410_CLKCON S3C24A0_CLKCON
-#define S3C2410_CLKSLOW S3C24A0_CLKSRC
-#define S3C2410_CLKDIVN S3C24A0_CLKDIVN
-
-#define S3C2410_CLKCON_IDLE S3C24A0_CLKCON_IDLE
-#define S3C2410_CLKCON_POWER S3C24A0_CLKCON_STOP
-#define S3C2410_CLKCON_LCDC S3C24A0_CLKCON_LCDC
-#define S3C2410_CLKCON_USBH S3C24A0_CLKCON_USBH
-#define S3C2410_CLKCON_USBD S3C24A0_CLKCON_USBD
-#define S3C2410_CLKCON_PWMT S3C24A0_CLKCON_PWMT
-#define S3C2410_CLKCON_SDI S3C24A0_CLKCON_SDI
-#define S3C2410_CLKCON_UART0 S3C24A0_CLKCON_UART0
-#define S3C2410_CLKCON_UART1 S3C24A0_CLKCON_UART1
-#define S3C2410_CLKCON_GPIO S3C24A0_CLKCON_GPIO
-#define S3C2410_CLKCON_ADC S3C24A0_CLKCON_ADC
-#define S3C2410_CLKCON_IIC S3C24A0_CLKCON_IIC
-#define S3C2410_CLKCON_IIS S3C24A0_CLKCON_IIS
-#define S3C2410_CLKCON_SPI S3C24A0_CLKCON_SPI
-
-#define S3C2410_CLKSLOW_UCLK_OFF S3C24A0_CLKSRC_UPLL
-#define S3C2410_CLKSLOW_MPLL_OFF S3C24A0_CLKSRC_MPLL
-#define S3C2410_CLKSLOW_SLOW (0xFF)
-#define S3C2410_CLKSLOW_GET_SLOWVAL(x) (0x1)
-
-#endif /* __ASM_ARCH_24A0_REGS_CLOCK_H */
diff --git a/arch/arm/mach-s3c24a0/include/mach/regs-irq.h b/arch/arm/mach-s3c24a0/include/mach/regs-irq.h
deleted file mode 100644
index 6086f6f189eb..000000000000
--- a/arch/arm/mach-s3c24a0/include/mach/regs-irq.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* linux/arch/arm/mach-s3c24a0/include/mach/regs-irq.h
- *
- * Copyright (c) 2003 Simtec Electronics <linux@simtec.co.uk>
- * http://www.simtec.co.uk/products/SWLINUX/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-
-#ifndef ___ASM_ARCH_24A0_REGS_IRQ_H
-#define ___ASM_ARCH_24A0_REGS_IRQ_H __FILE__
-
-
-#define S3C2410_EINTMASK S3C2410_EINTREG(0x034)
-#define S3C2410_EINTPEND S3C2410_EINTREG(0X038)
-
-#define S3C24XX_EINTMASK S3C24XX_EINTREG(0x034)
-#define S3C24XX_EINTPEND S3C24XX_EINTREG(0X038)
-
-#endif /* __ASM_ARCH_24A0_REGS_IRQ_H */
-
-
-
diff --git a/arch/arm/mach-s3c24a0/include/mach/system.h b/arch/arm/mach-s3c24a0/include/mach/system.h
deleted file mode 100644
index bd1bd1957656..000000000000
--- a/arch/arm/mach-s3c24a0/include/mach/system.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* linux/arch/arm/mach-s3c24a0/include/mach/system.h
- *
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- *
- * S3C24A0 - System function defines and includes
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <mach/hardware.h>
-#include <asm/io.h>
-
-#include <mach/map.h>
-
-static void arch_idle(void)
-{
- /* currently no specific idle support. */
-}
-
-void (*s3c24xx_reset_hook)(void);
-
-#include <asm/plat-s3c24xx/system-reset.h>
diff --git a/arch/arm/mach-s3c24a0/include/mach/tick.h b/arch/arm/mach-s3c24a0/include/mach/tick.h
deleted file mode 100644
index 9dea8ba6fb72..000000000000
--- a/arch/arm/mach-s3c24a0/include/mach/tick.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* linux/arch/arm/mach-s3c24a0/include/mach/tick.h
- *
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * S3C24A0 - timer tick support
- */
-
-#define SUBSRC_TIMER4 (1 << (IRQ_TIMER4 - IRQ_S3CUART_RX0))
-
-static inline int s3c24xx_ostimer_pending(void)
-{
- return __raw_readl(S3C2410_SUBSRCPND) & SUBSRC_TIMER4;
-}
diff --git a/arch/arm/mach-s3c24a0/include/mach/timex.h b/arch/arm/mach-s3c24a0/include/mach/timex.h
deleted file mode 100644
index 98573424a016..000000000000
--- a/arch/arm/mach-s3c24a0/include/mach/timex.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* linux/arch/arm/mach-s3c24a0/include/mach/timex.h
- *
- * Copyright (c) 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- *
- * S3C2410 - time parameters
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_TIMEX_H
-#define __ASM_ARCH_TIMEX_H
-
-#define CLOCK_TICK_RATE 12000000
-
-#endif /* __ASM_ARCH_TIMEX_H */
diff --git a/arch/arm/mach-s3c24a0/include/mach/vmalloc.h b/arch/arm/mach-s3c24a0/include/mach/vmalloc.h
deleted file mode 100644
index 6480b15277f3..000000000000
--- a/arch/arm/mach-s3c24a0/include/mach/vmalloc.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* linux/include/asm-arm/arch-s3c24ao/vmalloc.h
- *
- * Copyright 2008 Simtec Electronics <linux@simtec.co.uk>
-
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * S3C24A0 vmalloc definition
-*/
-
-#ifndef __ASM_ARCH_VMALLOC_H
-#define __ASM_ARCH_VMALLOC_H
-
-#define VMALLOC_END 0xF6000000UL
-
-#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-s3c64xx/Kconfig b/arch/arm/mach-s3c64xx/Kconfig
index e4177e22557b..fdc89fc3b464 100644
--- a/arch/arm/mach-s3c64xx/Kconfig
+++ b/arch/arm/mach-s3c64xx/Kconfig
@@ -142,6 +142,7 @@ config MACH_SMDK6410
select S3C_DEV_USB_HOST
select S3C_DEV_USB_HSOTG
select S3C_DEV_WDT
+ select SAMSUNG_DEV_BACKLIGHT
select SAMSUNG_DEV_KEYPAD
select SAMSUNG_DEV_PWM
select HAVE_S3C2410_WATCHDOG if WATCHDOG
diff --git a/arch/arm/mach-s3c64xx/Makefile b/arch/arm/mach-s3c64xx/Makefile
index 4657363f0674..f5a7144a052f 100644
--- a/arch/arm/mach-s3c64xx/Makefile
+++ b/arch/arm/mach-s3c64xx/Makefile
@@ -23,10 +23,6 @@ obj-$(CONFIG_CPU_S3C6410) += s3c6410.o
obj-y += irq.o
obj-y += irq-eint.o
-# CPU frequency scaling
-
-obj-$(CONFIG_CPU_FREQ_S3C64XX) += cpufreq.o
-
# DMA support
obj-$(CONFIG_S3C64XX_DMA) += dma.o
diff --git a/arch/arm/mach-s3c64xx/clock.c b/arch/arm/mach-s3c64xx/clock.c
index fdfc4d5e37a1..8cf39e33579e 100644
--- a/arch/arm/mach-s3c64xx/clock.c
+++ b/arch/arm/mach-s3c64xx/clock.c
@@ -39,7 +39,6 @@
static struct clk clk_ext_xtal_mux = {
.name = "ext_xtal",
- .id = -1,
};
#define clk_fin_apll clk_ext_xtal_mux
@@ -51,13 +50,11 @@ static struct clk clk_ext_xtal_mux = {
struct clk clk_h2 = {
.name = "hclk2",
- .id = -1,
.rate = 0,
};
struct clk clk_27m = {
.name = "clk_27m",
- .id = -1,
.rate = 27000000,
};
@@ -83,14 +80,12 @@ static int clk_48m_ctrl(struct clk *clk, int enable)
struct clk clk_48m = {
.name = "clk_48m",
- .id = -1,
.rate = 48000000,
.enable = clk_48m_ctrl,
};
struct clk clk_xusbxti = {
.name = "xusbxti",
- .id = -1,
.rate = 48000000,
};
@@ -130,109 +125,101 @@ int s3c64xx_sclk_ctrl(struct clk *clk, int enable)
static struct clk init_clocks_off[] = {
{
.name = "nand",
- .id = -1,
.parent = &clk_h,
}, {
.name = "rtc",
- .id = -1,
.parent = &clk_p,
.enable = s3c64xx_pclk_ctrl,
.ctrlbit = S3C_CLKCON_PCLK_RTC,
}, {
.name = "adc",
- .id = -1,
.parent = &clk_p,
.enable = s3c64xx_pclk_ctrl,
.ctrlbit = S3C_CLKCON_PCLK_TSADC,
}, {
.name = "i2c",
- .id = -1,
.parent = &clk_p,
.enable = s3c64xx_pclk_ctrl,
.ctrlbit = S3C_CLKCON_PCLK_IIC,
}, {
.name = "i2c",
- .id = 1,
+ .devname = "s3c2440-i2c.1",
.parent = &clk_p,
.enable = s3c64xx_pclk_ctrl,
.ctrlbit = S3C6410_CLKCON_PCLK_I2C1,
}, {
.name = "iis",
- .id = 0,
+ .devname = "samsung-i2s.0",
.parent = &clk_p,
.enable = s3c64xx_pclk_ctrl,
.ctrlbit = S3C_CLKCON_PCLK_IIS0,
}, {
.name = "iis",
- .id = 1,
+ .devname = "samsung-i2s.1",
.parent = &clk_p,
.enable = s3c64xx_pclk_ctrl,
.ctrlbit = S3C_CLKCON_PCLK_IIS1,
}, {
#ifdef CONFIG_CPU_S3C6410
.name = "iis",
- .id = -1, /* There's only one IISv4 port */
.parent = &clk_p,
.enable = s3c64xx_pclk_ctrl,
.ctrlbit = S3C6410_CLKCON_PCLK_IIS2,
}, {
#endif
.name = "keypad",
- .id = -1,
.parent = &clk_p,
.enable = s3c64xx_pclk_ctrl,
.ctrlbit = S3C_CLKCON_PCLK_KEYPAD,
}, {
.name = "spi",
- .id = 0,
+ .devname = "s3c64xx-spi.0",
.parent = &clk_p,
.enable = s3c64xx_pclk_ctrl,
.ctrlbit = S3C_CLKCON_PCLK_SPI0,
}, {
.name = "spi",
- .id = 1,
+ .devname = "s3c64xx-spi.1",
.parent = &clk_p,
.enable = s3c64xx_pclk_ctrl,
.ctrlbit = S3C_CLKCON_PCLK_SPI1,
}, {
.name = "spi_48m",
- .id = 0,
+ .devname = "s3c64xx-spi.0",
.parent = &clk_48m,
.enable = s3c64xx_sclk_ctrl,
.ctrlbit = S3C_CLKCON_SCLK_SPI0_48,
}, {
.name = "spi_48m",
- .id = 1,
+ .devname = "s3c64xx-spi.1",
.parent = &clk_48m,
.enable = s3c64xx_sclk_ctrl,
.ctrlbit = S3C_CLKCON_SCLK_SPI1_48,
}, {
.name = "48m",
- .id = 0,
+ .devname = "s3c-sdhci.0",
.parent = &clk_48m,
.enable = s3c64xx_sclk_ctrl,
.ctrlbit = S3C_CLKCON_SCLK_MMC0_48,
}, {
.name = "48m",
- .id = 1,
+ .devname = "s3c-sdhci.1",
.parent = &clk_48m,
.enable = s3c64xx_sclk_ctrl,
.ctrlbit = S3C_CLKCON_SCLK_MMC1_48,
}, {
.name = "48m",
- .id = 2,
+ .devname = "s3c-sdhci.2",
.parent = &clk_48m,
.enable = s3c64xx_sclk_ctrl,
.ctrlbit = S3C_CLKCON_SCLK_MMC2_48,
}, {
.name = "dma0",
- .id = -1,
.parent = &clk_h,
.enable = s3c64xx_hclk_ctrl,
.ctrlbit = S3C_CLKCON_HCLK_DMA0,
}, {
.name = "dma1",
- .id = -1,
.parent = &clk_h,
.enable = s3c64xx_hclk_ctrl,
.ctrlbit = S3C_CLKCON_HCLK_DMA1,
@@ -242,89 +229,81 @@ static struct clk init_clocks_off[] = {
static struct clk init_clocks[] = {
{
.name = "lcd",
- .id = -1,
.parent = &clk_h,
.enable = s3c64xx_hclk_ctrl,
.ctrlbit = S3C_CLKCON_HCLK_LCD,
}, {
.name = "gpio",
- .id = -1,
.parent = &clk_p,
.enable = s3c64xx_pclk_ctrl,
.ctrlbit = S3C_CLKCON_PCLK_GPIO,
}, {
.name = "usb-host",
- .id = -1,
.parent = &clk_h,
.enable = s3c64xx_hclk_ctrl,
.ctrlbit = S3C_CLKCON_HCLK_UHOST,
}, {
.name = "hsmmc",
- .id = 0,
+ .devname = "s3c-sdhci.0",
.parent = &clk_h,
.enable = s3c64xx_hclk_ctrl,
.ctrlbit = S3C_CLKCON_HCLK_HSMMC0,
}, {
.name = "hsmmc",
- .id = 1,
+ .devname = "s3c-sdhci.1",
.parent = &clk_h,
.enable = s3c64xx_hclk_ctrl,
.ctrlbit = S3C_CLKCON_HCLK_HSMMC1,
}, {
.name = "hsmmc",
- .id = 2,
+ .devname = "s3c-sdhci.2",
.parent = &clk_h,
.enable = s3c64xx_hclk_ctrl,
.ctrlbit = S3C_CLKCON_HCLK_HSMMC2,
}, {
.name = "otg",
- .id = -1,
.parent = &clk_h,
.enable = s3c64xx_hclk_ctrl,
.ctrlbit = S3C_CLKCON_HCLK_USB,
}, {
.name = "timers",
- .id = -1,
.parent = &clk_p,
.enable = s3c64xx_pclk_ctrl,
.ctrlbit = S3C_CLKCON_PCLK_PWM,
}, {
.name = "uart",
- .id = 0,
+ .devname = "s3c6400-uart.0",
.parent = &clk_p,
.enable = s3c64xx_pclk_ctrl,
.ctrlbit = S3C_CLKCON_PCLK_UART0,
}, {
.name = "uart",
- .id = 1,
+ .devname = "s3c6400-uart.1",
.parent = &clk_p,
.enable = s3c64xx_pclk_ctrl,
.ctrlbit = S3C_CLKCON_PCLK_UART1,
}, {
.name = "uart",
- .id = 2,
+ .devname = "s3c6400-uart.2",
.parent = &clk_p,
.enable = s3c64xx_pclk_ctrl,
.ctrlbit = S3C_CLKCON_PCLK_UART2,
}, {
.name = "uart",
- .id = 3,
+ .devname = "s3c6400-uart.3",
.parent = &clk_p,
.enable = s3c64xx_pclk_ctrl,
.ctrlbit = S3C_CLKCON_PCLK_UART3,
}, {
.name = "watchdog",
- .id = -1,
.parent = &clk_p,
.ctrlbit = S3C_CLKCON_PCLK_WDT,
}, {
.name = "ac97",
- .id = -1,
.parent = &clk_p,
.ctrlbit = S3C_CLKCON_PCLK_AC97,
}, {
.name = "cfcon",
- .id = -1,
.parent = &clk_h,
.enable = s3c64xx_hclk_ctrl,
.ctrlbit = S3C_CLKCON_HCLK_IHOST,
@@ -334,7 +313,6 @@ static struct clk init_clocks[] = {
static struct clk clk_fout_apll = {
.name = "fout_apll",
- .id = -1,
};
static struct clk *clk_src_apll_list[] = {
@@ -350,7 +328,6 @@ static struct clksrc_sources clk_src_apll = {
static struct clksrc_clk clk_mout_apll = {
.clk = {
.name = "mout_apll",
- .id = -1,
},
.reg_src = { .reg = S3C_CLK_SRC, .shift = 0, .size = 1 },
.sources = &clk_src_apll,
@@ -369,7 +346,6 @@ static struct clksrc_sources clk_src_epll = {
static struct clksrc_clk clk_mout_epll = {
.clk = {
.name = "mout_epll",
- .id = -1,
},
.reg_src = { .reg = S3C_CLK_SRC, .shift = 2, .size = 1 },
.sources = &clk_src_epll,
@@ -388,7 +364,6 @@ static struct clksrc_sources clk_src_mpll = {
static struct clksrc_clk clk_mout_mpll = {
.clk = {
.name = "mout_mpll",
- .id = -1,
},
.reg_src = { .reg = S3C_CLK_SRC, .shift = 1, .size = 1 },
.sources = &clk_src_mpll,
@@ -446,7 +421,6 @@ static int s3c64xx_clk_arm_set_rate(struct clk *clk, unsigned long rate)
static struct clk clk_arm = {
.name = "armclk",
- .id = -1,
.parent = &clk_mout_apll.clk,
.ops = &(struct clk_ops) {
.get_rate = s3c64xx_clk_arm_get_rate,
@@ -473,7 +447,6 @@ static struct clk_ops clk_dout_ops = {
static struct clk clk_dout_mpll = {
.name = "dout_mpll",
- .id = -1,
.parent = &clk_mout_mpll.clk,
.ops = &clk_dout_ops,
};
@@ -540,22 +513,18 @@ static struct clksrc_sources clkset_uhost = {
static struct clk clk_iis_cd0 = {
.name = "iis_cdclk0",
- .id = -1,
};
static struct clk clk_iis_cd1 = {
.name = "iis_cdclk1",
- .id = -1,
};
static struct clk clk_iisv4_cd = {
.name = "iis_cdclk_v4",
- .id = -1,
};
static struct clk clk_pcm_cd = {
.name = "pcm_cdclk",
- .id = -1,
};
static struct clk *clkset_audio0_list[] = {
@@ -610,7 +579,7 @@ static struct clksrc_clk clksrcs[] = {
{
.clk = {
.name = "mmc_bus",
- .id = 0,
+ .devname = "s3c-sdhci.0",
.ctrlbit = S3C_CLKCON_SCLK_MMC0,
.enable = s3c64xx_sclk_ctrl,
},
@@ -620,7 +589,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "mmc_bus",
- .id = 1,
+ .devname = "s3c-sdhci.1",
.ctrlbit = S3C_CLKCON_SCLK_MMC1,
.enable = s3c64xx_sclk_ctrl,
},
@@ -630,7 +599,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "mmc_bus",
- .id = 2,
+ .devname = "s3c-sdhci.2",
.ctrlbit = S3C_CLKCON_SCLK_MMC2,
.enable = s3c64xx_sclk_ctrl,
},
@@ -640,7 +609,6 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "usb-bus-host",
- .id = -1,
.ctrlbit = S3C_CLKCON_SCLK_UHOST,
.enable = s3c64xx_sclk_ctrl,
},
@@ -650,7 +618,6 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "uclk1",
- .id = -1,
.ctrlbit = S3C_CLKCON_SCLK_UART,
.enable = s3c64xx_sclk_ctrl,
},
@@ -661,7 +628,7 @@ static struct clksrc_clk clksrcs[] = {
/* Where does UCLK0 come from? */
.clk = {
.name = "spi-bus",
- .id = 0,
+ .devname = "s3c64xx-spi.0",
.ctrlbit = S3C_CLKCON_SCLK_SPI0,
.enable = s3c64xx_sclk_ctrl,
},
@@ -671,8 +638,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "spi-bus",
- .id = 1,
- .ctrlbit = S3C_CLKCON_SCLK_SPI1,
+ .devname = "s3c64xx-spi.1",
.enable = s3c64xx_sclk_ctrl,
},
.reg_src = { .reg = S3C_CLK_SRC, .shift = 16, .size = 2 },
@@ -681,7 +647,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "audio-bus",
- .id = 0,
+ .devname = "samsung-i2s.0",
.ctrlbit = S3C_CLKCON_SCLK_AUDIO0,
.enable = s3c64xx_sclk_ctrl,
},
@@ -691,7 +657,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "audio-bus",
- .id = 1,
+ .devname = "samsung-i2s.1",
.ctrlbit = S3C_CLKCON_SCLK_AUDIO1,
.enable = s3c64xx_sclk_ctrl,
},
@@ -701,7 +667,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "audio-bus",
- .id = 2,
+ .devname = "samsung-i2s.2",
.ctrlbit = S3C6410_CLKCON_SCLK_AUDIO2,
.enable = s3c64xx_sclk_ctrl,
},
@@ -711,7 +677,6 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "irda-bus",
- .id = 0,
.ctrlbit = S3C_CLKCON_SCLK_IRDA,
.enable = s3c64xx_sclk_ctrl,
},
@@ -721,7 +686,6 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "camera",
- .id = -1,
.ctrlbit = S3C_CLKCON_SCLK_CAM,
.enable = s3c64xx_sclk_ctrl,
},
diff --git a/arch/arm/mach-s3c64xx/cpufreq.c b/arch/arm/mach-s3c64xx/cpufreq.c
deleted file mode 100644
index 4375b97588b8..000000000000
--- a/arch/arm/mach-s3c64xx/cpufreq.c
+++ /dev/null
@@ -1,270 +0,0 @@
-/* linux/arch/arm/plat-s3c64xx/cpufreq.c
- *
- * Copyright 2009 Wolfson Microelectronics plc
- *
- * S3C64xx CPUfreq Support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/cpufreq.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/regulator/consumer.h>
-
-static struct clk *armclk;
-static struct regulator *vddarm;
-static unsigned long regulator_latency;
-
-#ifdef CONFIG_CPU_S3C6410
-struct s3c64xx_dvfs {
- unsigned int vddarm_min;
- unsigned int vddarm_max;
-};
-
-static struct s3c64xx_dvfs s3c64xx_dvfs_table[] = {
- [0] = { 1000000, 1150000 },
- [1] = { 1050000, 1150000 },
- [2] = { 1100000, 1150000 },
- [3] = { 1200000, 1350000 },
-};
-
-static struct cpufreq_frequency_table s3c64xx_freq_table[] = {
- { 0, 66000 },
- { 0, 133000 },
- { 1, 222000 },
- { 1, 266000 },
- { 2, 333000 },
- { 2, 400000 },
- { 2, 532000 },
- { 2, 533000 },
- { 3, 667000 },
- { 0, CPUFREQ_TABLE_END },
-};
-#endif
-
-static int s3c64xx_cpufreq_verify_speed(struct cpufreq_policy *policy)
-{
- if (policy->cpu != 0)
- return -EINVAL;
-
- return cpufreq_frequency_table_verify(policy, s3c64xx_freq_table);
-}
-
-static unsigned int s3c64xx_cpufreq_get_speed(unsigned int cpu)
-{
- if (cpu != 0)
- return 0;
-
- return clk_get_rate(armclk) / 1000;
-}
-
-static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
-{
- int ret;
- unsigned int i;
- struct cpufreq_freqs freqs;
- struct s3c64xx_dvfs *dvfs;
-
- ret = cpufreq_frequency_table_target(policy, s3c64xx_freq_table,
- target_freq, relation, &i);
- if (ret != 0)
- return ret;
-
- freqs.cpu = 0;
- freqs.old = clk_get_rate(armclk) / 1000;
- freqs.new = s3c64xx_freq_table[i].frequency;
- freqs.flags = 0;
- dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[i].index];
-
- if (freqs.old == freqs.new)
- return 0;
-
- pr_debug("cpufreq: Transition %d-%dkHz\n", freqs.old, freqs.new);
-
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
-
-#ifdef CONFIG_REGULATOR
- if (vddarm && freqs.new > freqs.old) {
- ret = regulator_set_voltage(vddarm,
- dvfs->vddarm_min,
- dvfs->vddarm_max);
- if (ret != 0) {
- pr_err("cpufreq: Failed to set VDDARM for %dkHz: %d\n",
- freqs.new, ret);
- goto err;
- }
- }
-#endif
-
- ret = clk_set_rate(armclk, freqs.new * 1000);
- if (ret < 0) {
- pr_err("cpufreq: Failed to set rate %dkHz: %d\n",
- freqs.new, ret);
- goto err;
- }
-
-#ifdef CONFIG_REGULATOR
- if (vddarm && freqs.new < freqs.old) {
- ret = regulator_set_voltage(vddarm,
- dvfs->vddarm_min,
- dvfs->vddarm_max);
- if (ret != 0) {
- pr_err("cpufreq: Failed to set VDDARM for %dkHz: %d\n",
- freqs.new, ret);
- goto err_clk;
- }
- }
-#endif
-
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
-
- pr_debug("cpufreq: Set actual frequency %lukHz\n",
- clk_get_rate(armclk) / 1000);
-
- return 0;
-
-err_clk:
- if (clk_set_rate(armclk, freqs.old * 1000) < 0)
- pr_err("Failed to restore original clock rate\n");
-err:
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
-
- return ret;
-}
-
-#ifdef CONFIG_REGULATOR
-static void __init s3c64xx_cpufreq_config_regulator(void)
-{
- int count, v, i, found;
- struct cpufreq_frequency_table *freq;
- struct s3c64xx_dvfs *dvfs;
-
- count = regulator_count_voltages(vddarm);
- if (count < 0) {
- pr_err("cpufreq: Unable to check supported voltages\n");
- }
-
- freq = s3c64xx_freq_table;
- while (count > 0 && freq->frequency != CPUFREQ_TABLE_END) {
- if (freq->frequency == CPUFREQ_ENTRY_INVALID)
- continue;
-
- dvfs = &s3c64xx_dvfs_table[freq->index];
- found = 0;
-
- for (i = 0; i < count; i++) {
- v = regulator_list_voltage(vddarm, i);
- if (v >= dvfs->vddarm_min && v <= dvfs->vddarm_max)
- found = 1;
- }
-
- if (!found) {
- pr_debug("cpufreq: %dkHz unsupported by regulator\n",
- freq->frequency);
- freq->frequency = CPUFREQ_ENTRY_INVALID;
- }
-
- freq++;
- }
-
- /* Guess based on having to do an I2C/SPI write; in future we
- * will be able to query the regulator performance here. */
- regulator_latency = 1 * 1000 * 1000;
-}
-#endif
-
-static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
-{
- int ret;
- struct cpufreq_frequency_table *freq;
-
- if (policy->cpu != 0)
- return -EINVAL;
-
- if (s3c64xx_freq_table == NULL) {
- pr_err("cpufreq: No frequency information for this CPU\n");
- return -ENODEV;
- }
-
- armclk = clk_get(NULL, "armclk");
- if (IS_ERR(armclk)) {
- pr_err("cpufreq: Unable to obtain ARMCLK: %ld\n",
- PTR_ERR(armclk));
- return PTR_ERR(armclk);
- }
-
-#ifdef CONFIG_REGULATOR
- vddarm = regulator_get(NULL, "vddarm");
- if (IS_ERR(vddarm)) {
- ret = PTR_ERR(vddarm);
- pr_err("cpufreq: Failed to obtain VDDARM: %d\n", ret);
- pr_err("cpufreq: Only frequency scaling available\n");
- vddarm = NULL;
- } else {
- s3c64xx_cpufreq_config_regulator();
- }
-#endif
-
- freq = s3c64xx_freq_table;
- while (freq->frequency != CPUFREQ_TABLE_END) {
- unsigned long r;
-
- /* Check for frequencies we can generate */
- r = clk_round_rate(armclk, freq->frequency * 1000);
- r /= 1000;
- if (r != freq->frequency) {
- pr_debug("cpufreq: %dkHz unsupported by clock\n",
- freq->frequency);
- freq->frequency = CPUFREQ_ENTRY_INVALID;
- }
-
- /* If we have no regulator then assume startup
- * frequency is the maximum we can support. */
- if (!vddarm && freq->frequency > s3c64xx_cpufreq_get_speed(0))
- freq->frequency = CPUFREQ_ENTRY_INVALID;
-
- freq++;
- }
-
- policy->cur = clk_get_rate(armclk) / 1000;
-
- /* Datasheet says PLL stabalisation time (if we were to use
- * the PLLs, which we don't currently) is ~300us worst case,
- * but add some fudge.
- */
- policy->cpuinfo.transition_latency = (500 * 1000) + regulator_latency;
-
- ret = cpufreq_frequency_table_cpuinfo(policy, s3c64xx_freq_table);
- if (ret != 0) {
- pr_err("cpufreq: Failed to configure frequency table: %d\n",
- ret);
- regulator_put(vddarm);
- clk_put(armclk);
- }
-
- return ret;
-}
-
-static struct cpufreq_driver s3c64xx_cpufreq_driver = {
- .owner = THIS_MODULE,
- .flags = 0,
- .verify = s3c64xx_cpufreq_verify_speed,
- .target = s3c64xx_cpufreq_set_target,
- .get = s3c64xx_cpufreq_get_speed,
- .init = s3c64xx_cpufreq_driver_init,
- .name = "s3c",
-};
-
-static int __init s3c64xx_cpufreq_init(void)
-{
- return cpufreq_register_driver(&s3c64xx_cpufreq_driver);
-}
-module_init(s3c64xx_cpufreq_init);
diff --git a/arch/arm/mach-s3c64xx/dev-onenand1.c b/arch/arm/mach-s3c64xx/dev-onenand1.c
index 92ffd5bac104..999f9e17a1e4 100644
--- a/arch/arm/mach-s3c64xx/dev-onenand1.c
+++ b/arch/arm/mach-s3c64xx/dev-onenand1.c
@@ -19,6 +19,8 @@
#include <mach/irqs.h>
#include <mach/map.h>
+#include <plat/devs.h>
+
static struct resource s3c64xx_onenand1_resources[] = {
[0] = {
.start = S3C64XX_PA_ONENAND1,
@@ -46,10 +48,6 @@ struct platform_device s3c64xx_device_onenand1 = {
void s3c64xx_onenand1_set_platdata(struct onenand_platform_data *pdata)
{
- struct onenand_platform_data *pd;
-
- pd = kmemdup(pdata, sizeof(struct onenand_platform_data), GFP_KERNEL);
- if (!pd)
- printk(KERN_ERR "%s: no memory for platform data\n", __func__);
- s3c64xx_device_onenand1.dev.platform_data = pd;
+ s3c_set_platdata(pdata, sizeof(struct onenand_platform_data),
+ &s3c64xx_device_onenand1);
}
diff --git a/arch/arm/mach-s3c64xx/include/mach/clkdev.h b/arch/arm/mach-s3c64xx/include/mach/clkdev.h
new file mode 100644
index 000000000000..7dffa83d23ff
--- /dev/null
+++ b/arch/arm/mach-s3c64xx/include/mach/clkdev.h
@@ -0,0 +1,7 @@
+#ifndef __MACH_CLKDEV_H__
+#define __MACH_CLKDEV_H__
+
+#define __clk_get(clk) ({ 1; })
+#define __clk_put(clk) do {} while (0)
+
+#endif
diff --git a/arch/arm/mach-s3c64xx/include/mach/regs-fb.h b/arch/arm/mach-s3c64xx/include/mach/regs-fb.h
deleted file mode 100644
index a06ee0af9a4b..000000000000
--- a/arch/arm/mach-s3c64xx/include/mach/regs-fb.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * Copyright 2009 Samsung Electronics Co.
- *
- * Pawel Osciak <p.osciak@samsung.com>
- * Based on plat-s3c/include/plat/regs-fb.h by Ben Dooks <ben@simtec.co.uk>
- *
- * Framebuffer register definitions for Samsung S3C64xx.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_MACH_REGS_FB_H
-#define __ASM_ARCH_MACH_REGS_FB_H __FILE__
-
-#include <plat/regs-fb-v4.h>
-
-#endif /* __ASM_ARCH_MACH_REGS_FB_H */
diff --git a/arch/arm/mach-s3c64xx/mach-anw6410.c b/arch/arm/mach-s3c64xx/mach-anw6410.c
index a53cf149476e..cb8864327ac4 100644
--- a/arch/arm/mach-s3c64xx/mach-anw6410.c
+++ b/arch/arm/mach-s3c64xx/mach-anw6410.c
@@ -35,7 +35,6 @@
#include <asm/mach/irq.h>
#include <mach/hardware.h>
-#include <mach/regs-fb.h>
#include <mach/map.h>
#include <asm/irq.h>
@@ -44,6 +43,7 @@
#include <plat/regs-serial.h>
#include <plat/iic.h>
#include <plat/fb.h>
+#include <plat/regs-fb-v4.h>
#include <mach/s3c6410.h>
#include <plat/clock.h>
diff --git a/arch/arm/mach-s3c64xx/mach-hmt.c b/arch/arm/mach-s3c64xx/mach-hmt.c
index b2639582caca..b3d93cc8dde0 100644
--- a/arch/arm/mach-s3c64xx/mach-hmt.c
+++ b/arch/arm/mach-s3c64xx/mach-hmt.c
@@ -27,7 +27,6 @@
#include <asm/mach/irq.h>
#include <mach/hardware.h>
-#include <mach/regs-fb.h>
#include <mach/map.h>
#include <asm/irq.h>
@@ -42,6 +41,7 @@
#include <plat/clock.h>
#include <plat/devs.h>
#include <plat/cpu.h>
+#include <plat/regs-fb-v4.h>
#define UCON S3C2410_UCON_DEFAULT
#define ULCON (S3C2410_LCON_CS8 | S3C2410_LCON_PNONE)
diff --git a/arch/arm/mach-s3c64xx/mach-mini6410.c b/arch/arm/mach-s3c64xx/mach-mini6410.c
index 89f35e02e883..527f49bd1b57 100644
--- a/arch/arm/mach-s3c64xx/mach-mini6410.c
+++ b/arch/arm/mach-s3c64xx/mach-mini6410.c
@@ -29,7 +29,6 @@
#include <asm/mach/map.h>
#include <mach/map.h>
-#include <mach/regs-fb.h>
#include <mach/regs-gpio.h>
#include <mach/regs-modem.h>
#include <mach/regs-srom.h>
@@ -42,6 +41,7 @@
#include <plat/nand.h>
#include <plat/regs-serial.h>
#include <plat/ts.h>
+#include <plat/regs-fb-v4.h>
#include <video/platform_lcd.h>
diff --git a/arch/arm/mach-s3c64xx/mach-ncp.c b/arch/arm/mach-s3c64xx/mach-ncp.c
index c4986498cd12..01c6857c5b63 100644
--- a/arch/arm/mach-s3c64xx/mach-ncp.c
+++ b/arch/arm/mach-s3c64xx/mach-ncp.c
@@ -30,7 +30,6 @@
#include <asm/mach/irq.h>
#include <mach/hardware.h>
-#include <mach/regs-fb.h>
#include <mach/map.h>
#include <asm/irq.h>
@@ -44,6 +43,7 @@
#include <plat/clock.h>
#include <plat/devs.h>
#include <plat/cpu.h>
+#include <plat/regs-fb-v4.h>
#define UCON S3C2410_UCON_DEFAULT
#define ULCON S3C2410_LCON_CS8 | S3C2410_LCON_PNONE
diff --git a/arch/arm/mach-s3c64xx/mach-real6410.c b/arch/arm/mach-s3c64xx/mach-real6410.c
index 4957ab0a0d4a..95b04b1729e3 100644
--- a/arch/arm/mach-s3c64xx/mach-real6410.c
+++ b/arch/arm/mach-s3c64xx/mach-real6410.c
@@ -30,7 +30,6 @@
#include <asm/mach/map.h>
#include <mach/map.h>
-#include <mach/regs-fb.h>
#include <mach/regs-gpio.h>
#include <mach/regs-modem.h>
#include <mach/regs-srom.h>
@@ -43,6 +42,7 @@
#include <plat/nand.h>
#include <plat/regs-serial.h>
#include <plat/ts.h>
+#include <plat/regs-fb-v4.h>
#include <video/platform_lcd.h>
diff --git a/arch/arm/mach-s3c64xx/mach-smartq5.c b/arch/arm/mach-s3c64xx/mach-smartq5.c
index 3a3e5acde523..342e8dfddf8b 100644
--- a/arch/arm/mach-s3c64xx/mach-smartq5.c
+++ b/arch/arm/mach-s3c64xx/mach-smartq5.c
@@ -21,7 +21,6 @@
#include <asm/mach/arch.h>
#include <mach/map.h>
-#include <mach/regs-fb.h>
#include <mach/regs-gpio.h>
#include <mach/s3c6410.h>
@@ -29,6 +28,7 @@
#include <plat/devs.h>
#include <plat/fb.h>
#include <plat/gpio-cfg.h>
+#include <plat/regs-fb-v4.h>
#include "mach-smartq.h"
diff --git a/arch/arm/mach-s3c64xx/mach-smartq7.c b/arch/arm/mach-s3c64xx/mach-smartq7.c
index e65375877d53..57963977da8e 100644
--- a/arch/arm/mach-s3c64xx/mach-smartq7.c
+++ b/arch/arm/mach-s3c64xx/mach-smartq7.c
@@ -21,7 +21,6 @@
#include <asm/mach/arch.h>
#include <mach/map.h>
-#include <mach/regs-fb.h>
#include <mach/regs-gpio.h>
#include <mach/s3c6410.h>
@@ -29,6 +28,7 @@
#include <plat/devs.h>
#include <plat/fb.h>
#include <plat/gpio-cfg.h>
+#include <plat/regs-fb-v4.h>
#include "mach-smartq.h"
diff --git a/arch/arm/mach-s3c64xx/mach-smdk6410.c b/arch/arm/mach-s3c64xx/mach-smdk6410.c
index 2c0353a80906..ecbea92bf83b 100644
--- a/arch/arm/mach-s3c64xx/mach-smdk6410.c
+++ b/arch/arm/mach-s3c64xx/mach-smdk6410.c
@@ -48,7 +48,6 @@
#include <asm/mach/irq.h>
#include <mach/hardware.h>
-#include <mach/regs-fb.h>
#include <mach/map.h>
#include <asm/irq.h>
@@ -71,6 +70,8 @@
#include <plat/adc.h>
#include <plat/ts.h>
#include <plat/keypad.h>
+#include <plat/backlight.h>
+#include <plat/regs-fb-v4.h>
#define UCON S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK
#define ULCON S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB
@@ -209,17 +210,9 @@ static struct platform_device smdk6410_smsc911x = {
};
#ifdef CONFIG_REGULATOR
-static struct regulator_consumer_supply smdk6410_b_pwr_5v_consumers[] = {
- {
- /* WM8580 */
- .supply = "PVDD",
- .dev_name = "0-001b",
- },
- {
- /* WM8580 */
- .supply = "AVDD",
- .dev_name = "0-001b",
- },
+static struct regulator_consumer_supply smdk6410_b_pwr_5v_consumers[] __initdata = {
+ REGULATOR_SUPPLY("PVDD", "0-001b"),
+ REGULATOR_SUPPLY("AVDD", "0-001b"),
};
static struct regulator_init_data smdk6410_b_pwr_5v_data = {
@@ -337,16 +330,12 @@ static struct platform_device *smdk6410_devices[] __initdata = {
&s3c_device_rtc,
&s3c_device_ts,
&s3c_device_wdt,
- &s3c_device_timer[1],
- &smdk6410_backlight_device,
};
#ifdef CONFIG_REGULATOR
/* ARM core */
static struct regulator_consumer_supply smdk6410_vddarm_consumers[] = {
- {
- .supply = "vddarm",
- }
+ REGULATOR_SUPPLY("vddarm", NULL),
};
/* VDDARM, BUCK1 on J5 */
@@ -484,11 +473,7 @@ static struct regulator_init_data wm8350_dcdc3_data = {
/* USB, EXT, PCM, ADC/DAC, USB, MMC */
static struct regulator_consumer_supply wm8350_dcdc4_consumers[] = {
- {
- /* WM8580 */
- .supply = "DVDD",
- .dev_name = "0-001b",
- },
+ REGULATOR_SUPPLY("DVDD", "0-001b"),
};
static struct regulator_init_data wm8350_dcdc4_data = {
@@ -599,7 +584,7 @@ static struct regulator_init_data wm1192_dcdc3 = {
};
static struct regulator_consumer_supply wm1192_ldo1_consumers[] = {
- { .supply = "DVDD", .dev_name = "0-001b", }, /* WM8580 */
+ REGULATOR_SUPPLY("DVDD", "0-001b"), /* WM8580 */
};
static struct regulator_init_data wm1192_ldo1 = {
@@ -679,6 +664,16 @@ static struct s3c2410_ts_mach_info s3c_ts_platform __initdata = {
.oversampling_shift = 2,
};
+/* LCD Backlight data */
+static struct samsung_bl_gpio_info smdk6410_bl_gpio_info = {
+ .no = S3C64XX_GPF(15),
+ .func = S3C_GPIO_SFN(2),
+};
+
+static struct platform_pwm_backlight_data smdk6410_bl_data = {
+ .pwm_id = 1,
+};
+
static void __init smdk6410_map_io(void)
{
u32 tmp;
@@ -740,6 +735,8 @@ static void __init smdk6410_machine_init(void)
s3c_ide_set_platdata(&smdk6410_ide_pdata);
+ samsung_bl_set(&smdk6410_bl_gpio_info, &smdk6410_bl_data);
+
platform_add_devices(smdk6410_devices, ARRAY_SIZE(smdk6410_devices));
}
diff --git a/arch/arm/mach-s3c64xx/pm.c b/arch/arm/mach-s3c64xx/pm.c
index bc1c470b7de6..8bad64370689 100644
--- a/arch/arm/mach-s3c64xx/pm.c
+++ b/arch/arm/mach-s3c64xx/pm.c
@@ -112,7 +112,7 @@ void s3c_pm_save_core(void)
* this.
*/
-static void s3c64xx_cpu_suspend(void)
+static int s3c64xx_cpu_suspend(unsigned long arg)
{
unsigned long tmp;
diff --git a/arch/arm/mach-s3c64xx/setup-fb-24bpp.c b/arch/arm/mach-s3c64xx/setup-fb-24bpp.c
index 8f3091182f9c..83d2afb79e9f 100644
--- a/arch/arm/mach-s3c64xx/setup-fb-24bpp.c
+++ b/arch/arm/mach-s3c64xx/setup-fb-24bpp.c
@@ -17,7 +17,6 @@
#include <linux/fb.h>
#include <linux/gpio.h>
-#include <mach/regs-fb.h>
#include <plat/fb.h>
#include <plat/gpio-cfg.h>
diff --git a/arch/arm/mach-s3c64xx/sleep.S b/arch/arm/mach-s3c64xx/sleep.S
index 1f87732b2320..34313f9c8792 100644
--- a/arch/arm/mach-s3c64xx/sleep.S
+++ b/arch/arm/mach-s3c64xx/sleep.S
@@ -25,29 +25,6 @@
.text
- /* s3c_cpu_save
- *
- * Save enough processor state to allow the restart of the pm.c
- * code after resume.
- *
- * entry:
- * r1 = v:p offset
- */
-
-ENTRY(s3c_cpu_save)
- stmfd sp!, { r4 - r12, lr }
- ldr r3, =resume_with_mmu
- bl cpu_suspend
-
- @@ call final suspend code
- ldr r0, =pm_cpu_sleep
- ldr pc, [r0]
-
- @@ return to the caller, after the MMU is turned on.
- @@ restore the last bits of the stack and return.
-resume_with_mmu:
- ldmfd sp!, { r4 - r12, pc } @ return, from sp from s3c_cpu_save
-
/* Sleep magic, the word before the resume entry point so that the
* bootloader can check for a resumeable image. */
diff --git a/arch/arm/mach-s5p64x0/Kconfig b/arch/arm/mach-s5p64x0/Kconfig
index 017af4c4293c..65c7518dad7f 100644
--- a/arch/arm/mach-s5p64x0/Kconfig
+++ b/arch/arm/mach-s5p64x0/Kconfig
@@ -36,6 +36,7 @@ config MACH_SMDK6440
select S3C_DEV_WDT
select S3C64XX_DEV_SPI
select SAMSUNG_DEV_ADC
+ select SAMSUNG_DEV_BACKLIGHT
select SAMSUNG_DEV_PWM
select SAMSUNG_DEV_TS
select S5P64X0_SETUP_I2C1
@@ -50,6 +51,7 @@ config MACH_SMDK6450
select S3C_DEV_WDT
select S3C64XX_DEV_SPI
select SAMSUNG_DEV_ADC
+ select SAMSUNG_DEV_BACKLIGHT
select SAMSUNG_DEV_PWM
select SAMSUNG_DEV_TS
select S5P64X0_SETUP_I2C1
diff --git a/arch/arm/mach-s5p64x0/clock-s5p6440.c b/arch/arm/mach-s5p64x0/clock-s5p6440.c
index 9f12c2ebf416..0e9cd3092dd2 100644
--- a/arch/arm/mach-s5p64x0/clock-s5p6440.c
+++ b/arch/arm/mach-s5p64x0/clock-s5p6440.c
@@ -95,7 +95,6 @@ static struct clk_ops s5p6440_epll_ops = {
static struct clksrc_clk clk_hclk = {
.clk = {
.name = "clk_hclk",
- .id = -1,
.parent = &clk_armclk.clk,
},
.reg_div = { .reg = S5P64X0_CLK_DIV0, .shift = 8, .size = 4 },
@@ -104,7 +103,6 @@ static struct clksrc_clk clk_hclk = {
static struct clksrc_clk clk_pclk = {
.clk = {
.name = "clk_pclk",
- .id = -1,
.parent = &clk_hclk.clk,
},
.reg_div = { .reg = S5P64X0_CLK_DIV0, .shift = 12, .size = 4 },
@@ -112,7 +110,6 @@ static struct clksrc_clk clk_pclk = {
static struct clksrc_clk clk_hclk_low = {
.clk = {
.name = "clk_hclk_low",
- .id = -1,
},
.sources = &clkset_hclk_low,
.reg_src = { .reg = S5P64X0_SYS_OTHERS, .shift = 6, .size = 1 },
@@ -122,7 +119,6 @@ static struct clksrc_clk clk_hclk_low = {
static struct clksrc_clk clk_pclk_low = {
.clk = {
.name = "clk_pclk_low",
- .id = -1,
.parent = &clk_hclk_low.clk,
},
.reg_div = { .reg = S5P64X0_CLK_DIV3, .shift = 12, .size = 4 },
@@ -136,187 +132,167 @@ static struct clksrc_clk clk_pclk_low = {
static struct clk init_clocks_off[] = {
{
.name = "nand",
- .id = -1,
.parent = &clk_hclk.clk,
.enable = s5p64x0_mem_ctrl,
.ctrlbit = (1 << 2),
}, {
.name = "post",
- .id = -1,
.parent = &clk_hclk_low.clk,
.enable = s5p64x0_hclk0_ctrl,
.ctrlbit = (1 << 5)
}, {
.name = "2d",
- .id = -1,
.parent = &clk_hclk.clk,
.enable = s5p64x0_hclk0_ctrl,
.ctrlbit = (1 << 8),
}, {
.name = "pdma",
- .id = -1,
.parent = &clk_hclk_low.clk,
.enable = s5p64x0_hclk0_ctrl,
.ctrlbit = (1 << 12),
}, {
.name = "hsmmc",
- .id = 0,
+ .devname = "s3c-sdhci.0",
.parent = &clk_hclk_low.clk,
.enable = s5p64x0_hclk0_ctrl,
.ctrlbit = (1 << 17),
}, {
.name = "hsmmc",
- .id = 1,
+ .devname = "s3c-sdhci.1",
.parent = &clk_hclk_low.clk,
.enable = s5p64x0_hclk0_ctrl,
.ctrlbit = (1 << 18),
}, {
.name = "hsmmc",
- .id = 2,
+ .devname = "s3c-sdhci.2",
.parent = &clk_hclk_low.clk,
.enable = s5p64x0_hclk0_ctrl,
.ctrlbit = (1 << 19),
}, {
.name = "otg",
- .id = -1,
.parent = &clk_hclk_low.clk,
.enable = s5p64x0_hclk0_ctrl,
.ctrlbit = (1 << 20)
}, {
.name = "irom",
- .id = -1,
.parent = &clk_hclk.clk,
.enable = s5p64x0_hclk0_ctrl,
.ctrlbit = (1 << 25),
}, {
.name = "lcd",
- .id = -1,
.parent = &clk_hclk_low.clk,
.enable = s5p64x0_hclk1_ctrl,
.ctrlbit = (1 << 1),
}, {
.name = "hclk_fimgvg",
- .id = -1,
.parent = &clk_hclk.clk,
.enable = s5p64x0_hclk1_ctrl,
.ctrlbit = (1 << 2),
}, {
.name = "tsi",
- .id = -1,
.parent = &clk_hclk_low.clk,
.enable = s5p64x0_hclk1_ctrl,
.ctrlbit = (1 << 0),
}, {
.name = "watchdog",
- .id = -1,
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 5),
}, {
.name = "rtc",
- .id = -1,
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 6),
}, {
.name = "timers",
- .id = -1,
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 7),
}, {
.name = "pcm",
- .id = -1,
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 8),
}, {
.name = "adc",
- .id = -1,
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 12),
}, {
.name = "i2c",
- .id = -1,
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 17),
}, {
.name = "spi",
- .id = 0,
+ .devname = "s3c64xx-spi.0",
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 21),
}, {
.name = "spi",
- .id = 1,
+ .devname = "s3c64xx-spi.1",
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 22),
}, {
.name = "gps",
- .id = -1,
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 25),
}, {
.name = "iis",
- .id = 0,
+ .devname = "samsung-i2s.0",
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 26),
}, {
.name = "dsim",
- .id = -1,
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 28),
}, {
.name = "etm",
- .id = -1,
.parent = &clk_pclk.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 29),
}, {
.name = "dmc0",
- .id = -1,
.parent = &clk_pclk.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 30),
}, {
.name = "pclk_fimgvg",
- .id = -1,
.parent = &clk_pclk.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 31),
}, {
.name = "sclk_spi_48",
- .id = 0,
+ .devname = "s3c64xx-spi.0",
.parent = &clk_48m,
.enable = s5p64x0_sclk_ctrl,
.ctrlbit = (1 << 22),
}, {
.name = "sclk_spi_48",
- .id = 1,
+ .devname = "s3c64xx-spi.1",
.parent = &clk_48m,
.enable = s5p64x0_sclk_ctrl,
.ctrlbit = (1 << 23),
}, {
.name = "mmc_48m",
- .id = 0,
+ .devname = "s3c-sdhci.0",
.parent = &clk_48m,
.enable = s5p64x0_sclk_ctrl,
.ctrlbit = (1 << 27),
}, {
.name = "mmc_48m",
- .id = 1,
+ .devname = "s3c-sdhci.1",
.parent = &clk_48m,
.enable = s5p64x0_sclk_ctrl,
.ctrlbit = (1 << 28),
}, {
.name = "mmc_48m",
- .id = 2,
+ .devname = "s3c-sdhci.2",
.parent = &clk_48m,
.enable = s5p64x0_sclk_ctrl,
.ctrlbit = (1 << 29),
@@ -329,43 +305,40 @@ static struct clk init_clocks_off[] = {
static struct clk init_clocks[] = {
{
.name = "intc",
- .id = -1,
.parent = &clk_hclk.clk,
.enable = s5p64x0_hclk0_ctrl,
.ctrlbit = (1 << 1),
}, {
.name = "mem",
- .id = -1,
.parent = &clk_hclk.clk,
.enable = s5p64x0_hclk0_ctrl,
.ctrlbit = (1 << 21),
}, {
.name = "uart",
- .id = 0,
+ .devname = "s3c6400-uart.0",
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 1),
}, {
.name = "uart",
- .id = 1,
+ .devname = "s3c6400-uart.1",
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 2),
}, {
.name = "uart",
- .id = 2,
+ .devname = "s3c6400-uart.2",
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 3),
}, {
.name = "uart",
- .id = 3,
+ .devname = "s3c6400-uart.3",
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 4),
}, {
.name = "gpio",
- .id = -1,
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 18),
@@ -374,12 +347,10 @@ static struct clk init_clocks[] = {
static struct clk clk_iis_cd_v40 = {
.name = "iis_cdclk_v40",
- .id = -1,
};
static struct clk clk_pcm_cd = {
.name = "pcm_cdclk",
- .id = -1,
};
static struct clk *clkset_group1_list[] = {
@@ -420,7 +391,7 @@ static struct clksrc_clk clksrcs[] = {
{
.clk = {
.name = "sclk_mmc",
- .id = 0,
+ .devname = "s3c-sdhci.0",
.ctrlbit = (1 << 24),
.enable = s5p64x0_sclk_ctrl,
},
@@ -430,7 +401,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_mmc",
- .id = 1,
+ .devname = "s3c-sdhci.1",
.ctrlbit = (1 << 25),
.enable = s5p64x0_sclk_ctrl,
},
@@ -440,7 +411,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_mmc",
- .id = 2,
+ .devname = "s3c-sdhci.2",
.ctrlbit = (1 << 26),
.enable = s5p64x0_sclk_ctrl,
},
@@ -450,7 +421,6 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "uclk1",
- .id = -1,
.ctrlbit = (1 << 5),
.enable = s5p64x0_sclk_ctrl,
},
@@ -460,7 +430,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_spi",
- .id = 0,
+ .devname = "s3c64xx-spi.0",
.ctrlbit = (1 << 20),
.enable = s5p64x0_sclk_ctrl,
},
@@ -470,7 +440,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_spi",
- .id = 1,
+ .devname = "s3c64xx-spi.1",
.ctrlbit = (1 << 21),
.enable = s5p64x0_sclk_ctrl,
},
@@ -480,7 +450,6 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_post",
- .id = -1,
.ctrlbit = (1 << 10),
.enable = s5p64x0_sclk_ctrl,
},
@@ -490,7 +459,6 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_dispcon",
- .id = -1,
.ctrlbit = (1 << 1),
.enable = s5p64x0_sclk1_ctrl,
},
@@ -500,7 +468,6 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_fimgvg",
- .id = -1,
.ctrlbit = (1 << 2),
.enable = s5p64x0_sclk1_ctrl,
},
@@ -510,7 +477,6 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_audio2",
- .id = -1,
.ctrlbit = (1 << 11),
.enable = s5p64x0_sclk_ctrl,
},
diff --git a/arch/arm/mach-s5p64x0/clock-s5p6450.c b/arch/arm/mach-s5p64x0/clock-s5p6450.c
index 4eec457ddccc..d9dc16cde109 100644
--- a/arch/arm/mach-s5p64x0/clock-s5p6450.c
+++ b/arch/arm/mach-s5p64x0/clock-s5p6450.c
@@ -36,7 +36,6 @@
static struct clksrc_clk clk_mout_dpll = {
.clk = {
.name = "mout_dpll",
- .id = -1,
},
.sources = &clk_src_dpll,
.reg_src = { .reg = S5P64X0_CLK_SRC0, .shift = 5, .size = 1 },
@@ -96,7 +95,6 @@ static struct clk_ops s5p6450_epll_ops = {
static struct clksrc_clk clk_dout_epll = {
.clk = {
.name = "dout_epll",
- .id = -1,
.parent = &clk_mout_epll.clk,
},
.reg_div = { .reg = S5P64X0_CLK_DIV1, .shift = 24, .size = 4 },
@@ -105,7 +103,6 @@ static struct clksrc_clk clk_dout_epll = {
static struct clksrc_clk clk_mout_hclk_sel = {
.clk = {
.name = "mout_hclk_sel",
- .id = -1,
},
.sources = &clkset_hclk_low,
.reg_src = { .reg = S5P64X0_OTHERS, .shift = 15, .size = 1 },
@@ -124,7 +121,6 @@ static struct clksrc_sources clkset_hclk = {
static struct clksrc_clk clk_hclk = {
.clk = {
.name = "clk_hclk",
- .id = -1,
},
.sources = &clkset_hclk,
.reg_src = { .reg = S5P64X0_OTHERS, .shift = 14, .size = 1 },
@@ -134,7 +130,6 @@ static struct clksrc_clk clk_hclk = {
static struct clksrc_clk clk_pclk = {
.clk = {
.name = "clk_pclk",
- .id = -1,
.parent = &clk_hclk.clk,
},
.reg_div = { .reg = S5P64X0_CLK_DIV0, .shift = 12, .size = 4 },
@@ -142,7 +137,6 @@ static struct clksrc_clk clk_pclk = {
static struct clksrc_clk clk_dout_pwm_ratio0 = {
.clk = {
.name = "clk_dout_pwm_ratio0",
- .id = -1,
.parent = &clk_mout_hclk_sel.clk,
},
.reg_div = { .reg = S5P64X0_CLK_DIV3, .shift = 16, .size = 4 },
@@ -151,7 +145,6 @@ static struct clksrc_clk clk_dout_pwm_ratio0 = {
static struct clksrc_clk clk_pclk_to_wdt_pwm = {
.clk = {
.name = "clk_pclk_to_wdt_pwm",
- .id = -1,
.parent = &clk_dout_pwm_ratio0.clk,
},
.reg_div = { .reg = S5P64X0_CLK_DIV3, .shift = 20, .size = 4 },
@@ -160,7 +153,6 @@ static struct clksrc_clk clk_pclk_to_wdt_pwm = {
static struct clksrc_clk clk_hclk_low = {
.clk = {
.name = "clk_hclk_low",
- .id = -1,
},
.sources = &clkset_hclk_low,
.reg_src = { .reg = S5P64X0_OTHERS, .shift = 6, .size = 1 },
@@ -170,7 +162,6 @@ static struct clksrc_clk clk_hclk_low = {
static struct clksrc_clk clk_pclk_low = {
.clk = {
.name = "clk_pclk_low",
- .id = -1,
.parent = &clk_hclk_low.clk,
},
.reg_div = { .reg = S5P64X0_CLK_DIV3, .shift = 12, .size = 4 },
@@ -184,109 +175,101 @@ static struct clksrc_clk clk_pclk_low = {
static struct clk init_clocks_off[] = {
{
.name = "usbhost",
- .id = -1,
.parent = &clk_hclk_low.clk,
.enable = s5p64x0_hclk0_ctrl,
.ctrlbit = (1 << 3),
}, {
.name = "pdma",
- .id = -1,
.parent = &clk_hclk_low.clk,
.enable = s5p64x0_hclk0_ctrl,
.ctrlbit = (1 << 12),
}, {
.name = "hsmmc",
- .id = 0,
+ .devname = "s3c-sdhci.0",
.parent = &clk_hclk_low.clk,
.enable = s5p64x0_hclk0_ctrl,
.ctrlbit = (1 << 17),
}, {
.name = "hsmmc",
- .id = 1,
+ .devname = "s3c-sdhci.1",
.parent = &clk_hclk_low.clk,
.enable = s5p64x0_hclk0_ctrl,
.ctrlbit = (1 << 18),
}, {
.name = "hsmmc",
- .id = 2,
+ .devname = "s3c-sdhci.2",
.parent = &clk_hclk_low.clk,
.enable = s5p64x0_hclk0_ctrl,
.ctrlbit = (1 << 19),
}, {
.name = "usbotg",
- .id = -1,
.parent = &clk_hclk_low.clk,
.enable = s5p64x0_hclk0_ctrl,
.ctrlbit = (1 << 20),
}, {
.name = "lcd",
- .id = -1,
.parent = &clk_h,
.enable = s5p64x0_hclk1_ctrl,
.ctrlbit = (1 << 1),
}, {
.name = "watchdog",
- .id = -1,
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 5),
}, {
.name = "rtc",
- .id = -1,
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 6),
}, {
.name = "adc",
- .id = -1,
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 12),
}, {
.name = "i2c",
- .id = 0,
+ .devname = "s3c2440-i2c.0",
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 17),
}, {
.name = "spi",
- .id = 0,
+ .devname = "s3c64xx-spi.0",
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 21),
}, {
.name = "spi",
- .id = 1,
+ .devname = "s3c64xx-spi.1",
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 22),
}, {
.name = "iis",
- .id = 0,
+ .devname = "samsung-i2s.0",
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 26),
}, {
.name = "iis",
- .id = 1,
+ .devname = "samsung-i2s.1",
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 15),
}, {
.name = "iis",
- .id = 2,
+ .devname = "samsung-i2s.2",
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 16),
}, {
.name = "i2c",
- .id = 1,
+ .devname = "s3c2440-i2c.1",
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 27),
}, {
.name = "dmc0",
- .id = -1,
.parent = &clk_pclk.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 30),
@@ -299,49 +282,45 @@ static struct clk init_clocks_off[] = {
static struct clk init_clocks[] = {
{
.name = "intc",
- .id = -1,
.parent = &clk_hclk.clk,
.enable = s5p64x0_hclk0_ctrl,
.ctrlbit = (1 << 1),
}, {
.name = "mem",
- .id = -1,
.parent = &clk_hclk.clk,
.enable = s5p64x0_hclk0_ctrl,
.ctrlbit = (1 << 21),
}, {
.name = "uart",
- .id = 0,
+ .devname = "s3c6400-uart.0",
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 1),
}, {
.name = "uart",
- .id = 1,
+ .devname = "s3c6400-uart.1",
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 2),
}, {
.name = "uart",
- .id = 2,
+ .devname = "s3c6400-uart.2",
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 3),
}, {
.name = "uart",
- .id = 3,
+ .devname = "s3c6400-uart.3",
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 4),
}, {
.name = "timers",
- .id = -1,
.parent = &clk_pclk_to_wdt_pwm.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 7),
}, {
.name = "gpio",
- .id = -1,
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 18),
@@ -421,7 +400,6 @@ static struct clksrc_sources clkset_sclk_audio0 = {
static struct clksrc_clk clk_sclk_audio0 = {
.clk = {
.name = "audio-bus",
- .id = -1,
.enable = s5p64x0_sclk_ctrl,
.ctrlbit = (1 << 8),
.parent = &clk_dout_epll.clk,
@@ -435,7 +413,7 @@ static struct clksrc_clk clksrcs[] = {
{
.clk = {
.name = "sclk_mmc",
- .id = 0,
+ .devname = "s3c-sdhci.0",
.ctrlbit = (1 << 24),
.enable = s5p64x0_sclk_ctrl,
},
@@ -445,7 +423,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_mmc",
- .id = 1,
+ .devname = "s3c-sdhci.1",
.ctrlbit = (1 << 25),
.enable = s5p64x0_sclk_ctrl,
},
@@ -455,7 +433,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_mmc",
- .id = 2,
+ .devname = "s3c-sdhci.2",
.ctrlbit = (1 << 26),
.enable = s5p64x0_sclk_ctrl,
},
@@ -465,7 +443,6 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "uclk1",
- .id = -1,
.ctrlbit = (1 << 5),
.enable = s5p64x0_sclk_ctrl,
},
@@ -475,7 +452,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_spi",
- .id = 0,
+ .devname = "s3c64xx-spi.0",
.ctrlbit = (1 << 20),
.enable = s5p64x0_sclk_ctrl,
},
@@ -485,7 +462,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_spi",
- .id = 1,
+ .devname = "s3c64xx-spi.1",
.ctrlbit = (1 << 21),
.enable = s5p64x0_sclk_ctrl,
},
@@ -495,7 +472,6 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_fimc",
- .id = -1,
.ctrlbit = (1 << 10),
.enable = s5p64x0_sclk_ctrl,
},
@@ -505,7 +481,6 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "aclk_mali",
- .id = -1,
.ctrlbit = (1 << 2),
.enable = s5p64x0_sclk1_ctrl,
},
@@ -515,7 +490,6 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_2d",
- .id = -1,
.ctrlbit = (1 << 12),
.enable = s5p64x0_sclk_ctrl,
},
@@ -525,7 +499,6 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_usi",
- .id = -1,
.ctrlbit = (1 << 7),
.enable = s5p64x0_sclk_ctrl,
},
@@ -535,7 +508,6 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_camif",
- .id = -1,
.ctrlbit = (1 << 6),
.enable = s5p64x0_sclk_ctrl,
},
@@ -545,7 +517,6 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_dispcon",
- .id = -1,
.ctrlbit = (1 << 1),
.enable = s5p64x0_sclk1_ctrl,
},
@@ -555,7 +526,6 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_hsmmc44",
- .id = -1,
.ctrlbit = (1 << 30),
.enable = s5p64x0_sclk_ctrl,
},
diff --git a/arch/arm/mach-s5p64x0/include/mach/clkdev.h b/arch/arm/mach-s5p64x0/include/mach/clkdev.h
new file mode 100644
index 000000000000..7dffa83d23ff
--- /dev/null
+++ b/arch/arm/mach-s5p64x0/include/mach/clkdev.h
@@ -0,0 +1,7 @@
+#ifndef __MACH_CLKDEV_H__
+#define __MACH_CLKDEV_H__
+
+#define __clk_get(clk) ({ 1; })
+#define __clk_put(clk) do {} while (0)
+
+#endif
diff --git a/arch/arm/mach-s5p64x0/mach-smdk6440.c b/arch/arm/mach-s5p64x0/mach-smdk6440.c
index 2d559f10fd47..346f8dfa6f35 100644
--- a/arch/arm/mach-s5p64x0/mach-smdk6440.c
+++ b/arch/arm/mach-s5p64x0/mach-smdk6440.c
@@ -46,6 +46,7 @@
#include <plat/adc.h>
#include <plat/ts.h>
#include <plat/s5p-time.h>
+#include <plat/backlight.h>
#define SMDK6440_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
S3C2410_UCON_RXILEVEL | \
@@ -91,45 +92,6 @@ static struct s3c2410_uartcfg smdk6440_uartcfgs[] __initdata = {
},
};
-static int smdk6440_backlight_init(struct device *dev)
-{
- int ret;
-
- ret = gpio_request(S5P6440_GPF(15), "Backlight");
- if (ret) {
- printk(KERN_ERR "failed to request GPF for PWM-OUT1\n");
- return ret;
- }
-
- /* Configure GPIO pin with S5P6440_GPF15_PWM_TOUT1 */
- s3c_gpio_cfgpin(S5P6440_GPF(15), S3C_GPIO_SFN(2));
-
- return 0;
-}
-
-static void smdk6440_backlight_exit(struct device *dev)
-{
- s3c_gpio_cfgpin(S5P6440_GPF(15), S3C_GPIO_OUTPUT);
- gpio_free(S5P6440_GPF(15));
-}
-
-static struct platform_pwm_backlight_data smdk6440_backlight_data = {
- .pwm_id = 1,
- .max_brightness = 255,
- .dft_brightness = 255,
- .pwm_period_ns = 78770,
- .init = smdk6440_backlight_init,
- .exit = smdk6440_backlight_exit,
-};
-
-static struct platform_device smdk6440_backlight_device = {
- .name = "pwm-backlight",
- .dev = {
- .parent = &s3c_device_timer[1].dev,
- .platform_data = &smdk6440_backlight_data,
- },
-};
-
static struct platform_device *smdk6440_devices[] __initdata = {
&s3c_device_adc,
&s3c_device_rtc,
@@ -139,8 +101,6 @@ static struct platform_device *smdk6440_devices[] __initdata = {
&s3c_device_wdt,
&samsung_asoc_dma,
&s5p6440_device_iis,
- &s3c_device_timer[1],
- &smdk6440_backlight_device,
};
static struct s3c2410_platform_i2c s5p6440_i2c0_data __initdata = {
@@ -175,6 +135,16 @@ static struct s3c2410_ts_mach_info s3c_ts_platform __initdata = {
.oversampling_shift = 2,
};
+/* LCD Backlight data */
+static struct samsung_bl_gpio_info smdk6440_bl_gpio_info = {
+ .no = S5P6440_GPF(15),
+ .func = S3C_GPIO_SFN(2),
+};
+
+static struct platform_pwm_backlight_data smdk6440_bl_data = {
+ .pwm_id = 1,
+};
+
static void __init smdk6440_map_io(void)
{
s5p_init_io(NULL, 0, S5P64X0_SYS_ID);
@@ -194,6 +164,8 @@ static void __init smdk6440_machine_init(void)
i2c_register_board_info(1, smdk6440_i2c_devs1,
ARRAY_SIZE(smdk6440_i2c_devs1));
+ samsung_bl_set(&smdk6440_bl_gpio_info, &smdk6440_bl_data);
+
platform_add_devices(smdk6440_devices, ARRAY_SIZE(smdk6440_devices));
}
diff --git a/arch/arm/mach-s5p64x0/mach-smdk6450.c b/arch/arm/mach-s5p64x0/mach-smdk6450.c
index d19c4690ee97..33f2adf8f3fe 100644
--- a/arch/arm/mach-s5p64x0/mach-smdk6450.c
+++ b/arch/arm/mach-s5p64x0/mach-smdk6450.c
@@ -46,6 +46,7 @@
#include <plat/adc.h>
#include <plat/ts.h>
#include <plat/s5p-time.h>
+#include <plat/backlight.h>
#define SMDK6450_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
S3C2410_UCON_RXILEVEL | \
@@ -109,45 +110,6 @@ static struct s3c2410_uartcfg smdk6450_uartcfgs[] __initdata = {
#endif
};
-static int smdk6450_backlight_init(struct device *dev)
-{
- int ret;
-
- ret = gpio_request(S5P6450_GPF(15), "Backlight");
- if (ret) {
- printk(KERN_ERR "failed to request GPF for PWM-OUT1\n");
- return ret;
- }
-
- /* Configure GPIO pin with S5P6450_GPF15_PWM_TOUT1 */
- s3c_gpio_cfgpin(S5P6450_GPF(15), S3C_GPIO_SFN(2));
-
- return 0;
-}
-
-static void smdk6450_backlight_exit(struct device *dev)
-{
- s3c_gpio_cfgpin(S5P6450_GPF(15), S3C_GPIO_OUTPUT);
- gpio_free(S5P6450_GPF(15));
-}
-
-static struct platform_pwm_backlight_data smdk6450_backlight_data = {
- .pwm_id = 1,
- .max_brightness = 255,
- .dft_brightness = 255,
- .pwm_period_ns = 78770,
- .init = smdk6450_backlight_init,
- .exit = smdk6450_backlight_exit,
-};
-
-static struct platform_device smdk6450_backlight_device = {
- .name = "pwm-backlight",
- .dev = {
- .parent = &s3c_device_timer[1].dev,
- .platform_data = &smdk6450_backlight_data,
- },
-};
-
static struct platform_device *smdk6450_devices[] __initdata = {
&s3c_device_adc,
&s3c_device_rtc,
@@ -157,8 +119,6 @@ static struct platform_device *smdk6450_devices[] __initdata = {
&s3c_device_wdt,
&samsung_asoc_dma,
&s5p6450_device_iis0,
- &s3c_device_timer[1],
- &smdk6450_backlight_device,
/* s5p6450_device_spi0 will be added */
};
@@ -194,6 +154,16 @@ static struct s3c2410_ts_mach_info s3c_ts_platform __initdata = {
.oversampling_shift = 2,
};
+/* LCD Backlight data */
+static struct samsung_bl_gpio_info smdk6450_bl_gpio_info = {
+ .no = S5P6450_GPF(15),
+ .func = S3C_GPIO_SFN(2),
+};
+
+static struct platform_pwm_backlight_data smdk6450_bl_data = {
+ .pwm_id = 1,
+};
+
static void __init smdk6450_map_io(void)
{
s5p_init_io(NULL, 0, S5P64X0_SYS_ID);
@@ -213,6 +183,8 @@ static void __init smdk6450_machine_init(void)
i2c_register_board_info(1, smdk6450_i2c_devs1,
ARRAY_SIZE(smdk6450_i2c_devs1));
+ samsung_bl_set(&smdk6450_bl_gpio_info, &smdk6450_bl_data);
+
platform_add_devices(smdk6450_devices, ARRAY_SIZE(smdk6450_devices));
}
diff --git a/arch/arm/mach-s5pc100/Kconfig b/arch/arm/mach-s5pc100/Kconfig
index 608722ff4f28..e8a33c4b054c 100644
--- a/arch/arm/mach-s5pc100/Kconfig
+++ b/arch/arm/mach-s5pc100/Kconfig
@@ -56,6 +56,7 @@ config MACH_SMDKC100
select S3C_DEV_RTC
select S3C_DEV_WDT
select SAMSUNG_DEV_ADC
+ select SAMSUNG_DEV_BACKLIGHT
select SAMSUNG_DEV_IDE
select SAMSUNG_DEV_KEYPAD
select SAMSUNG_DEV_PWM
diff --git a/arch/arm/mach-s5pc100/clock.c b/arch/arm/mach-s5pc100/clock.c
index 0305e9b8282d..ff5cbb30de5b 100644
--- a/arch/arm/mach-s5pc100/clock.c
+++ b/arch/arm/mach-s5pc100/clock.c
@@ -31,7 +31,6 @@
static struct clk s5p_clk_otgphy = {
.name = "otg_phy",
- .id = -1,
};
static struct clk *clk_src_mout_href_list[] = {
@@ -47,7 +46,6 @@ static struct clksrc_sources clk_src_mout_href = {
static struct clksrc_clk clk_mout_href = {
.clk = {
.name = "mout_href",
- .id = -1,
},
.sources = &clk_src_mout_href,
.reg_src = { .reg = S5P_CLK_SRC0, .shift = 20, .size = 1 },
@@ -66,7 +64,6 @@ static struct clksrc_sources clk_src_mout_48m = {
static struct clksrc_clk clk_mout_48m = {
.clk = {
.name = "mout_48m",
- .id = -1,
},
.sources = &clk_src_mout_48m,
.reg_src = { .reg = S5P_CLK_SRC1, .shift = 24, .size = 1 },
@@ -75,7 +72,6 @@ static struct clksrc_clk clk_mout_48m = {
static struct clksrc_clk clk_mout_mpll = {
.clk = {
.name = "mout_mpll",
- .id = -1,
},
.sources = &clk_src_mpll,
.reg_src = { .reg = S5P_CLK_SRC0, .shift = 4, .size = 1 },
@@ -85,7 +81,6 @@ static struct clksrc_clk clk_mout_mpll = {
static struct clksrc_clk clk_mout_apll = {
.clk = {
.name = "mout_apll",
- .id = -1,
},
.sources = &clk_src_apll,
.reg_src = { .reg = S5P_CLK_SRC0, .shift = 0, .size = 1 },
@@ -94,7 +89,6 @@ static struct clksrc_clk clk_mout_apll = {
static struct clksrc_clk clk_mout_epll = {
.clk = {
.name = "mout_epll",
- .id = -1,
},
.sources = &clk_src_epll,
.reg_src = { .reg = S5P_CLK_SRC0, .shift = 8, .size = 1 },
@@ -112,7 +106,6 @@ static struct clksrc_sources clk_src_mout_hpll = {
static struct clksrc_clk clk_mout_hpll = {
.clk = {
.name = "mout_hpll",
- .id = -1,
},
.sources = &clk_src_mout_hpll,
.reg_src = { .reg = S5P_CLK_SRC0, .shift = 12, .size = 1 },
@@ -121,7 +114,6 @@ static struct clksrc_clk clk_mout_hpll = {
static struct clksrc_clk clk_div_apll = {
.clk = {
.name = "div_apll",
- .id = -1,
.parent = &clk_mout_apll.clk,
},
.reg_div = { .reg = S5P_CLK_DIV0, .shift = 0, .size = 1 },
@@ -130,7 +122,6 @@ static struct clksrc_clk clk_div_apll = {
static struct clksrc_clk clk_div_arm = {
.clk = {
.name = "div_arm",
- .id = -1,
.parent = &clk_div_apll.clk,
},
.reg_div = { .reg = S5P_CLK_DIV0, .shift = 4, .size = 3 },
@@ -139,7 +130,6 @@ static struct clksrc_clk clk_div_arm = {
static struct clksrc_clk clk_div_d0_bus = {
.clk = {
.name = "div_d0_bus",
- .id = -1,
.parent = &clk_div_arm.clk,
},
.reg_div = { .reg = S5P_CLK_DIV0, .shift = 8, .size = 3 },
@@ -148,7 +138,6 @@ static struct clksrc_clk clk_div_d0_bus = {
static struct clksrc_clk clk_div_pclkd0 = {
.clk = {
.name = "div_pclkd0",
- .id = -1,
.parent = &clk_div_d0_bus.clk,
},
.reg_div = { .reg = S5P_CLK_DIV0, .shift = 12, .size = 3 },
@@ -157,7 +146,6 @@ static struct clksrc_clk clk_div_pclkd0 = {
static struct clksrc_clk clk_div_secss = {
.clk = {
.name = "div_secss",
- .id = -1,
.parent = &clk_div_d0_bus.clk,
},
.reg_div = { .reg = S5P_CLK_DIV0, .shift = 16, .size = 3 },
@@ -166,7 +154,6 @@ static struct clksrc_clk clk_div_secss = {
static struct clksrc_clk clk_div_apll2 = {
.clk = {
.name = "div_apll2",
- .id = -1,
.parent = &clk_mout_apll.clk,
},
.reg_div = { .reg = S5P_CLK_DIV1, .shift = 0, .size = 3 },
@@ -185,7 +172,6 @@ struct clksrc_sources clk_src_mout_am = {
static struct clksrc_clk clk_mout_am = {
.clk = {
.name = "mout_am",
- .id = -1,
},
.sources = &clk_src_mout_am,
.reg_src = { .reg = S5P_CLK_SRC0, .shift = 16, .size = 1 },
@@ -194,7 +180,6 @@ static struct clksrc_clk clk_mout_am = {
static struct clksrc_clk clk_div_d1_bus = {
.clk = {
.name = "div_d1_bus",
- .id = -1,
.parent = &clk_mout_am.clk,
},
.reg_div = { .reg = S5P_CLK_DIV1, .shift = 12, .size = 3 },
@@ -203,7 +188,6 @@ static struct clksrc_clk clk_div_d1_bus = {
static struct clksrc_clk clk_div_mpll2 = {
.clk = {
.name = "div_mpll2",
- .id = -1,
.parent = &clk_mout_am.clk,
},
.reg_div = { .reg = S5P_CLK_DIV1, .shift = 8, .size = 1 },
@@ -212,7 +196,6 @@ static struct clksrc_clk clk_div_mpll2 = {
static struct clksrc_clk clk_div_mpll = {
.clk = {
.name = "div_mpll",
- .id = -1,
.parent = &clk_mout_am.clk,
},
.reg_div = { .reg = S5P_CLK_DIV1, .shift = 4, .size = 2 },
@@ -231,7 +214,6 @@ struct clksrc_sources clk_src_mout_onenand = {
static struct clksrc_clk clk_mout_onenand = {
.clk = {
.name = "mout_onenand",
- .id = -1,
},
.sources = &clk_src_mout_onenand,
.reg_src = { .reg = S5P_CLK_SRC0, .shift = 24, .size = 1 },
@@ -240,7 +222,6 @@ static struct clksrc_clk clk_mout_onenand = {
static struct clksrc_clk clk_div_onenand = {
.clk = {
.name = "div_onenand",
- .id = -1,
.parent = &clk_mout_onenand.clk,
},
.reg_div = { .reg = S5P_CLK_DIV1, .shift = 20, .size = 2 },
@@ -249,7 +230,6 @@ static struct clksrc_clk clk_div_onenand = {
static struct clksrc_clk clk_div_pclkd1 = {
.clk = {
.name = "div_pclkd1",
- .id = -1,
.parent = &clk_div_d1_bus.clk,
},
.reg_div = { .reg = S5P_CLK_DIV1, .shift = 16, .size = 3 },
@@ -258,7 +238,6 @@ static struct clksrc_clk clk_div_pclkd1 = {
static struct clksrc_clk clk_div_cam = {
.clk = {
.name = "div_cam",
- .id = -1,
.parent = &clk_div_mpll2.clk,
},
.reg_div = { .reg = S5P_CLK_DIV1, .shift = 24, .size = 5 },
@@ -267,7 +246,6 @@ static struct clksrc_clk clk_div_cam = {
static struct clksrc_clk clk_div_hdmi = {
.clk = {
.name = "div_hdmi",
- .id = -1,
.parent = &clk_mout_hpll.clk,
},
.reg_div = { .reg = S5P_CLK_DIV3, .shift = 28, .size = 4 },
@@ -399,367 +377,329 @@ static int s5pc100_sclk1_ctrl(struct clk *clk, int enable)
static struct clk init_clocks_off[] = {
{
.name = "cssys",
- .id = -1,
.parent = &clk_div_d0_bus.clk,
.enable = s5pc100_d0_0_ctrl,
.ctrlbit = (1 << 6),
}, {
.name = "secss",
- .id = -1,
.parent = &clk_div_d0_bus.clk,
.enable = s5pc100_d0_0_ctrl,
.ctrlbit = (1 << 5),
}, {
.name = "g2d",
- .id = -1,
.parent = &clk_div_d0_bus.clk,
.enable = s5pc100_d0_0_ctrl,
.ctrlbit = (1 << 4),
}, {
.name = "mdma",
- .id = -1,
.parent = &clk_div_d0_bus.clk,
.enable = s5pc100_d0_0_ctrl,
.ctrlbit = (1 << 3),
}, {
.name = "cfcon",
- .id = -1,
.parent = &clk_div_d0_bus.clk,
.enable = s5pc100_d0_0_ctrl,
.ctrlbit = (1 << 2),
}, {
.name = "nfcon",
- .id = -1,
.parent = &clk_div_d0_bus.clk,
.enable = s5pc100_d0_1_ctrl,
.ctrlbit = (1 << 3),
}, {
.name = "onenandc",
- .id = -1,
.parent = &clk_div_d0_bus.clk,
.enable = s5pc100_d0_1_ctrl,
.ctrlbit = (1 << 2),
}, {
.name = "sdm",
- .id = -1,
.parent = &clk_div_d0_bus.clk,
.enable = s5pc100_d0_2_ctrl,
.ctrlbit = (1 << 2),
}, {
.name = "seckey",
- .id = -1,
.parent = &clk_div_d0_bus.clk,
.enable = s5pc100_d0_2_ctrl,
.ctrlbit = (1 << 1),
}, {
.name = "hsmmc",
- .id = 2,
+ .devname = "s3c-sdhci.2",
.parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_0_ctrl,
.ctrlbit = (1 << 7),
}, {
.name = "hsmmc",
- .id = 1,
+ .devname = "s3c-sdhci.1",
.parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_0_ctrl,
.ctrlbit = (1 << 6),
}, {
.name = "hsmmc",
- .id = 0,
+ .devname = "s3c-sdhci.0",
.parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_0_ctrl,
.ctrlbit = (1 << 5),
}, {
.name = "modemif",
- .id = -1,
.parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_0_ctrl,
.ctrlbit = (1 << 4),
}, {
.name = "otg",
- .id = -1,
.parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_0_ctrl,
.ctrlbit = (1 << 3),
}, {
.name = "usbhost",
- .id = -1,
.parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_0_ctrl,
.ctrlbit = (1 << 2),
}, {
.name = "pdma",
- .id = 1,
+ .devname = "s3c-pl330.1",
.parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_0_ctrl,
.ctrlbit = (1 << 1),
}, {
.name = "pdma",
- .id = 0,
+ .devname = "s3c-pl330.0",
.parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_0_ctrl,
.ctrlbit = (1 << 0),
}, {
.name = "lcd",
- .id = -1,
.parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_1_ctrl,
.ctrlbit = (1 << 0),
}, {
.name = "rotator",
- .id = -1,
.parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_1_ctrl,
.ctrlbit = (1 << 1),
}, {
.name = "fimc",
- .id = 0,
+ .devname = "s5p-fimc.0",
.parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_1_ctrl,
.ctrlbit = (1 << 2),
}, {
.name = "fimc",
- .id = 1,
+ .devname = "s5p-fimc.1",
.parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_1_ctrl,
.ctrlbit = (1 << 3),
}, {
.name = "fimc",
- .id = 2,
- .parent = &clk_div_d1_bus.clk,
+ .devname = "s5p-fimc.2",
.enable = s5pc100_d1_1_ctrl,
.ctrlbit = (1 << 4),
}, {
.name = "jpeg",
- .id = -1,
.parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_1_ctrl,
.ctrlbit = (1 << 5),
}, {
.name = "mipi-dsim",
- .id = -1,
.parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_1_ctrl,
.ctrlbit = (1 << 6),
}, {
.name = "mipi-csis",
- .id = -1,
.parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_1_ctrl,
.ctrlbit = (1 << 7),
}, {
.name = "g3d",
- .id = 0,
.parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_0_ctrl,
.ctrlbit = (1 << 8),
}, {
.name = "tv",
- .id = -1,
.parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_2_ctrl,
.ctrlbit = (1 << 0),
}, {
.name = "vp",
- .id = -1,
.parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_2_ctrl,
.ctrlbit = (1 << 1),
}, {
.name = "mixer",
- .id = -1,
.parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_2_ctrl,
.ctrlbit = (1 << 2),
}, {
.name = "hdmi",
- .id = -1,
.parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_2_ctrl,
.ctrlbit = (1 << 3),
}, {
.name = "mfc",
- .id = -1,
.parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_2_ctrl,
.ctrlbit = (1 << 4),
}, {
.name = "apc",
- .id = -1,
.parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_3_ctrl,
.ctrlbit = (1 << 2),
}, {
.name = "iec",
- .id = -1,
.parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_3_ctrl,
.ctrlbit = (1 << 3),
}, {
.name = "systimer",
- .id = -1,
.parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_3_ctrl,
.ctrlbit = (1 << 7),
}, {
.name = "watchdog",
- .id = -1,
.parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_3_ctrl,
.ctrlbit = (1 << 8),
}, {
.name = "rtc",
- .id = -1,
.parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_3_ctrl,
.ctrlbit = (1 << 9),
}, {
.name = "i2c",
- .id = 0,
+ .devname = "s3c2440-i2c.0",
.parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_4_ctrl,
.ctrlbit = (1 << 4),
}, {
.name = "i2c",
- .id = 1,
+ .devname = "s3c2440-i2c.1",
.parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_4_ctrl,
.ctrlbit = (1 << 5),
}, {
.name = "spi",
- .id = 0,
+ .devname = "s3c64xx-spi.0",
.parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_4_ctrl,
.ctrlbit = (1 << 6),
}, {
.name = "spi",
- .id = 1,
+ .devname = "s3c64xx-spi.1",
.parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_4_ctrl,
.ctrlbit = (1 << 7),
}, {
.name = "spi",
- .id = 2,
+ .devname = "s3c64xx-spi.2",
.parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_4_ctrl,
.ctrlbit = (1 << 8),
}, {
.name = "irda",
- .id = -1,
.parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_4_ctrl,
.ctrlbit = (1 << 9),
}, {
.name = "ccan",
- .id = 0,
.parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_4_ctrl,
.ctrlbit = (1 << 10),
}, {
.name = "ccan",
- .id = 1,
.parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_4_ctrl,
.ctrlbit = (1 << 11),
}, {
.name = "hsitx",
- .id = -1,
.parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_4_ctrl,
.ctrlbit = (1 << 12),
}, {
.name = "hsirx",
- .id = -1,
.parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_4_ctrl,
.ctrlbit = (1 << 13),
}, {
.name = "iis",
- .id = 0,
+ .devname = "samsung-i2s.0",
.parent = &clk_div_pclkd1.clk,
.enable = s5pc100_d1_5_ctrl,
.ctrlbit = (1 << 0),
}, {
.name = "iis",
- .id = 1,
+ .devname = "samsung-i2s.1",
.parent = &clk_div_pclkd1.clk,
.enable = s5pc100_d1_5_ctrl,
.ctrlbit = (1 << 1),
}, {
.name = "iis",
- .id = 2,
+ .devname = "samsung-i2s.2",
.parent = &clk_div_pclkd1.clk,
.enable = s5pc100_d1_5_ctrl,
.ctrlbit = (1 << 2),
}, {
.name = "ac97",
- .id = -1,
.parent = &clk_div_pclkd1.clk,
.enable = s5pc100_d1_5_ctrl,
.ctrlbit = (1 << 3),
}, {
.name = "pcm",
- .id = 0,
+ .devname = "samsung-pcm.0",
.parent = &clk_div_pclkd1.clk,
.enable = s5pc100_d1_5_ctrl,
.ctrlbit = (1 << 4),
}, {
.name = "pcm",
- .id = 1,
+ .devname = "samsung-pcm.1",
.parent = &clk_div_pclkd1.clk,
.enable = s5pc100_d1_5_ctrl,
.ctrlbit = (1 << 5),
}, {
.name = "spdif",
- .id = -1,
.parent = &clk_div_pclkd1.clk,
.enable = s5pc100_d1_5_ctrl,
.ctrlbit = (1 << 6),
}, {
.name = "adc",
- .id = -1,
.parent = &clk_div_pclkd1.clk,
.enable = s5pc100_d1_5_ctrl,
.ctrlbit = (1 << 7),
}, {
.name = "keypad",
- .id = -1,
.parent = &clk_div_pclkd1.clk,
.enable = s5pc100_d1_5_ctrl,
.ctrlbit = (1 << 8),
}, {
.name = "spi_48m",
- .id = 0,
+ .devname = "s3c64xx-spi.0",
.parent = &clk_mout_48m.clk,
.enable = s5pc100_sclk0_ctrl,
.ctrlbit = (1 << 7),
}, {
.name = "spi_48m",
- .id = 1,
+ .devname = "s3c64xx-spi.1",
.parent = &clk_mout_48m.clk,
.enable = s5pc100_sclk0_ctrl,
.ctrlbit = (1 << 8),
}, {
.name = "spi_48m",
- .id = 2,
+ .devname = "s3c64xx-spi.2",
.parent = &clk_mout_48m.clk,
.enable = s5pc100_sclk0_ctrl,
.ctrlbit = (1 << 9),
}, {
.name = "mmc_48m",
- .id = 0,
+ .devname = "s3c-sdhci.0",
.parent = &clk_mout_48m.clk,
.enable = s5pc100_sclk0_ctrl,
.ctrlbit = (1 << 15),
}, {
.name = "mmc_48m",
- .id = 1,
+ .devname = "s3c-sdhci.1",
.parent = &clk_mout_48m.clk,
.enable = s5pc100_sclk0_ctrl,
.ctrlbit = (1 << 16),
}, {
.name = "mmc_48m",
- .id = 2,
+ .devname = "s3c-sdhci.2",
.parent = &clk_mout_48m.clk,
.enable = s5pc100_sclk0_ctrl,
.ctrlbit = (1 << 17),
@@ -768,33 +708,27 @@ static struct clk init_clocks_off[] = {
static struct clk clk_vclk54m = {
.name = "vclk_54m",
- .id = -1,
.rate = 54000000,
};
static struct clk clk_i2scdclk0 = {
.name = "i2s_cdclk0",
- .id = -1,
};
static struct clk clk_i2scdclk1 = {
.name = "i2s_cdclk1",
- .id = -1,
};
static struct clk clk_i2scdclk2 = {
.name = "i2s_cdclk2",
- .id = -1,
};
static struct clk clk_pcmcdclk0 = {
.name = "pcm_cdclk0",
- .id = -1,
};
static struct clk clk_pcmcdclk1 = {
.name = "pcm_cdclk1",
- .id = -1,
};
static struct clk *clk_src_group1_list[] = {
@@ -836,7 +770,7 @@ struct clksrc_sources clk_src_group3 = {
static struct clksrc_clk clk_sclk_audio0 = {
.clk = {
.name = "sclk_audio",
- .id = 0,
+ .devname = "samsung-pcm.0",
.ctrlbit = (1 << 8),
.enable = s5pc100_sclk1_ctrl,
},
@@ -862,7 +796,7 @@ struct clksrc_sources clk_src_group4 = {
static struct clksrc_clk clk_sclk_audio1 = {
.clk = {
.name = "sclk_audio",
- .id = 1,
+ .devname = "samsung-pcm.1",
.ctrlbit = (1 << 9),
.enable = s5pc100_sclk1_ctrl,
},
@@ -887,7 +821,7 @@ struct clksrc_sources clk_src_group5 = {
static struct clksrc_clk clk_sclk_audio2 = {
.clk = {
.name = "sclk_audio",
- .id = 2,
+ .devname = "samsung-pcm.2",
.ctrlbit = (1 << 10),
.enable = s5pc100_sclk1_ctrl,
},
@@ -976,48 +910,12 @@ struct clksrc_sources clk_src_sclk_spdif = {
.nr_sources = ARRAY_SIZE(clk_sclk_spdif_list),
};
-static int s5pc100_spdif_set_rate(struct clk *clk, unsigned long rate)
-{
- struct clk *pclk;
- int ret;
-
- pclk = clk_get_parent(clk);
- if (IS_ERR(pclk))
- return -EINVAL;
-
- ret = pclk->ops->set_rate(pclk, rate);
- clk_put(pclk);
-
- return ret;
-}
-
-static unsigned long s5pc100_spdif_get_rate(struct clk *clk)
-{
- struct clk *pclk;
- int rate;
-
- pclk = clk_get_parent(clk);
- if (IS_ERR(pclk))
- return -EINVAL;
-
- rate = pclk->ops->get_rate(clk);
- clk_put(pclk);
-
- return rate;
-}
-
-static struct clk_ops s5pc100_sclk_spdif_ops = {
- .set_rate = s5pc100_spdif_set_rate,
- .get_rate = s5pc100_spdif_get_rate,
-};
-
static struct clksrc_clk clk_sclk_spdif = {
.clk = {
.name = "sclk_spdif",
- .id = -1,
.ctrlbit = (1 << 11),
.enable = s5pc100_sclk1_ctrl,
- .ops = &s5pc100_sclk_spdif_ops,
+ .ops = &s5p_sclk_spdif_ops,
},
.sources = &clk_src_sclk_spdif,
.reg_src = { .reg = S5P_CLK_SRC3, .shift = 24, .size = 2 },
@@ -1027,7 +925,7 @@ static struct clksrc_clk clksrcs[] = {
{
.clk = {
.name = "sclk_spi",
- .id = 0,
+ .devname = "s3c64xx-spi.0",
.ctrlbit = (1 << 4),
.enable = s5pc100_sclk0_ctrl,
@@ -1038,7 +936,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_spi",
- .id = 1,
+ .devname = "s3c64xx-spi.1",
.ctrlbit = (1 << 5),
.enable = s5pc100_sclk0_ctrl,
@@ -1049,7 +947,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_spi",
- .id = 2,
+ .devname = "s3c64xx-spi.2",
.ctrlbit = (1 << 6),
.enable = s5pc100_sclk0_ctrl,
@@ -1060,7 +958,6 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "uclk1",
- .id = -1,
.ctrlbit = (1 << 3),
.enable = s5pc100_sclk0_ctrl,
@@ -1071,7 +968,6 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_mixer",
- .id = -1,
.ctrlbit = (1 << 6),
.enable = s5pc100_sclk0_ctrl,
@@ -1081,7 +977,6 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_lcd",
- .id = -1,
.ctrlbit = (1 << 0),
.enable = s5pc100_sclk1_ctrl,
@@ -1092,7 +987,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_fimc",
- .id = 0,
+ .devname = "s5p-fimc.0",
.ctrlbit = (1 << 1),
.enable = s5pc100_sclk1_ctrl,
@@ -1103,7 +998,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_fimc",
- .id = 1,
+ .devname = "s5p-fimc.1",
.ctrlbit = (1 << 2),
.enable = s5pc100_sclk1_ctrl,
@@ -1114,7 +1009,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_fimc",
- .id = 2,
+ .devname = "s5p-fimc.2",
.ctrlbit = (1 << 3),
.enable = s5pc100_sclk1_ctrl,
@@ -1125,7 +1020,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_mmc",
- .id = 0,
+ .devname = "s3c-sdhci.0",
.ctrlbit = (1 << 12),
.enable = s5pc100_sclk1_ctrl,
@@ -1136,7 +1031,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_mmc",
- .id = 1,
+ .devname = "s3c-sdhci.1",
.ctrlbit = (1 << 13),
.enable = s5pc100_sclk1_ctrl,
@@ -1147,7 +1042,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_mmc",
- .id = 2,
+ .devname = "s3c-sdhci.2",
.ctrlbit = (1 << 14),
.enable = s5pc100_sclk1_ctrl,
@@ -1158,7 +1053,6 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_irda",
- .id = 2,
.ctrlbit = (1 << 10),
.enable = s5pc100_sclk0_ctrl,
@@ -1169,7 +1063,6 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_irda",
- .id = -1,
.ctrlbit = (1 << 10),
.enable = s5pc100_sclk0_ctrl,
@@ -1180,7 +1073,6 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_pwi",
- .id = -1,
.ctrlbit = (1 << 1),
.enable = s5pc100_sclk0_ctrl,
@@ -1191,7 +1083,6 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_uhost",
- .id = -1,
.ctrlbit = (1 << 11),
.enable = s5pc100_sclk0_ctrl,
@@ -1291,79 +1182,70 @@ void __init_or_cpufreq s5pc100_setup_clocks(void)
static struct clk init_clocks[] = {
{
.name = "tzic",
- .id = -1,
.parent = &clk_div_d0_bus.clk,
.enable = s5pc100_d0_0_ctrl,
.ctrlbit = (1 << 1),
}, {
.name = "intc",
- .id = -1,
.parent = &clk_div_d0_bus.clk,
.enable = s5pc100_d0_0_ctrl,
.ctrlbit = (1 << 0),
}, {
.name = "ebi",
- .id = -1,
.parent = &clk_div_d0_bus.clk,
.enable = s5pc100_d0_1_ctrl,
.ctrlbit = (1 << 5),
}, {
.name = "intmem",
- .id = -1,
.parent = &clk_div_d0_bus.clk,
.enable = s5pc100_d0_1_ctrl,
.ctrlbit = (1 << 4),
}, {
.name = "sromc",
- .id = -1,
.parent = &clk_div_d0_bus.clk,
.enable = s5pc100_d0_1_ctrl,
.ctrlbit = (1 << 1),
}, {
.name = "dmc",
- .id = -1,
.parent = &clk_div_d0_bus.clk,
.enable = s5pc100_d0_1_ctrl,
.ctrlbit = (1 << 0),
}, {
.name = "chipid",
- .id = -1,
.parent = &clk_div_d0_bus.clk,
.enable = s5pc100_d0_1_ctrl,
.ctrlbit = (1 << 0),
}, {
.name = "gpio",
- .id = -1,
.parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_3_ctrl,
.ctrlbit = (1 << 1),
}, {
.name = "uart",
- .id = 0,
+ .devname = "s3c6400-uart.0",
.parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_4_ctrl,
.ctrlbit = (1 << 0),
}, {
.name = "uart",
- .id = 1,
+ .devname = "s3c6400-uart.1",
.parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_4_ctrl,
.ctrlbit = (1 << 1),
}, {
.name = "uart",
- .id = 2,
+ .devname = "s3c6400-uart.2",
.parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_4_ctrl,
.ctrlbit = (1 << 2),
}, {
.name = "uart",
- .id = 3,
+ .devname = "s3c6400-uart.3",
.parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_4_ctrl,
.ctrlbit = (1 << 3),
}, {
.name = "timers",
- .id = -1,
.parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_3_ctrl,
.ctrlbit = (1 << 6),
diff --git a/arch/arm/mach-s5pc100/include/mach/clkdev.h b/arch/arm/mach-s5pc100/include/mach/clkdev.h
new file mode 100644
index 000000000000..7dffa83d23ff
--- /dev/null
+++ b/arch/arm/mach-s5pc100/include/mach/clkdev.h
@@ -0,0 +1,7 @@
+#ifndef __MACH_CLKDEV_H__
+#define __MACH_CLKDEV_H__
+
+#define __clk_get(clk) ({ 1; })
+#define __clk_put(clk) do {} while (0)
+
+#endif
diff --git a/arch/arm/mach-s5pc100/include/mach/regs-fb.h b/arch/arm/mach-s5pc100/include/mach/regs-fb.h
deleted file mode 100644
index 07aa4d6054fe..000000000000
--- a/arch/arm/mach-s5pc100/include/mach/regs-fb.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/* arch/arm/mach-s5pc100/include/mach/regs-fb.h
- *
- * Copyright 2009 Samsung Electronics Co.
- * Pawel Osciak <p.osciak@samsung.com>
- *
- * Framebuffer register definitions for Samsung S5PC100.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_REGS_FB_H
-#define __ASM_ARCH_REGS_FB_H __FILE__
-
-#include <plat/regs-fb-v4.h>
-
-/* VP1 interface timing control */
-#define VP1CON0 (0x118)
-#define VP1_RATECON_EN (1 << 31)
-#define VP1_CLKRATE_MASK (0xff)
-
-#define VP1CON1 (0x11c)
-#define VP1_VTREGCON_EN (1 << 31)
-#define VP1_VBPD_MASK (0xfff)
-#define VP1_VBPD_SHIFT (16)
-
-
-#define WPALCON_H (0x19c)
-#define WPALCON_L (0x1a0)
-
-/* Palette control for WPAL0 and WPAL1 is the same as in S3C64xx, but
- * different for WPAL2-4
- */
-/* In WPALCON_L (aka WPALCON) */
-#define WPALCON_W1PAL_32BPP_A888 (0x7 << 3)
-#define WPALCON_W0PAL_32BPP_A888 (0x7 << 0)
-
-/* To set W2PAL-W4PAL consist of one bit from WPALCON_L and two from WPALCON_H,
- * e.g. W2PAL[2..0] is made of (WPALCON_H[10..9], WPALCON_L[6]).
- */
-#define WPALCON_L_WxPAL_L_MASK (0x1)
-#define WPALCON_L_W2PAL_L_SHIFT (6)
-#define WPALCON_L_W3PAL_L_SHIFT (7)
-#define WPALCON_L_W4PAL_L_SHIFT (8)
-
-#define WPALCON_L_WxPAL_H_MASK (0x3)
-#define WPALCON_H_W2PAL_H_SHIFT (9)
-#define WPALCON_H_W3PAL_H_SHIFT (13)
-#define WPALCON_H_W4PAL_H_SHIFT (17)
-
-/* Per-window alpha value registers */
-/* For window 0 8-bit alpha values are in VIDW0ALPHAx,
- * for windows 1-4 alpha values consist of two parts, the 4 low bits are
- * taken from VIDWxALPHAx and 4 high bits are from VIDOSDxC,
- * e.g. WIN1_ALPHA0_B[7..0] = (VIDOSD1C[3..0], VIDW1ALPHA0[3..0])
- */
-#define VIDWxALPHA0(_win) (0x200 + (_win * 8))
-#define VIDWxALPHA1(_win) (0x204 + (_win * 8))
-
-/* Only for window 0 in VIDW0ALPHAx. */
-#define VIDW0ALPHAx_R(_x) ((_x) << 16)
-#define VIDW0ALPHAx_R_MASK (0xff << 16)
-#define VIDW0ALPHAx_R_SHIFT (16)
-#define VIDW0ALPHAx_G(_x) ((_x) << 8)
-#define VIDW0ALPHAx_G_MASK (0xff << 8)
-#define VIDW0ALPHAx_G_SHIFT (8)
-#define VIDW0ALPHAx_B(_x) ((_x) << 0)
-#define VIDW0ALPHAx_B_MASK (0xff << 0)
-#define VIDW0ALPHAx_B_SHIFT (0)
-
-/* Low 4 bits of alpha0-1 for windows 1-4 */
-#define VIDW14ALPHAx_R_L(_x) ((_x) << 16)
-#define VIDW14ALPHAx_R_L_MASK (0xf << 16)
-#define VIDW14ALPHAx_R_L_SHIFT (16)
-#define VIDW14ALPHAx_G_L(_x) ((_x) << 8)
-#define VIDW14ALPHAx_G_L_MASK (0xf << 8)
-#define VIDW14ALPHAx_G_L_SHIFT (8)
-#define VIDW14ALPHAx_B_L(_x) ((_x) << 0)
-#define VIDW14ALPHAx_B_L_MASK (0xf << 0)
-#define VIDW14ALPHAx_B_L_SHIFT (0)
-
-
-/* Per-window blending equation control registers */
-#define BLENDEQx(_win) (0x244 + ((_win) * 4))
-#define BLENDEQ1 (0x244)
-#define BLENDEQ2 (0x248)
-#define BLENDEQ3 (0x24c)
-#define BLENDEQ4 (0x250)
-
-#define BLENDEQx_Q_FUNC(_x) ((_x) << 18)
-#define BLENDEQx_Q_FUNC_MASK (0xf << 18)
-#define BLENDEQx_P_FUNC(_x) ((_x) << 12)
-#define BLENDEQx_P_FUNC_MASK (0xf << 12)
-#define BLENDEQx_B_FUNC(_x) ((_x) << 6)
-#define BLENDEQx_B_FUNC_MASK (0xf << 6)
-#define BLENDEQx_A_FUNC(_x) ((_x) << 0)
-#define BLENDEQx_A_FUNC_MASK (0xf << 0)
-
-#define BLENDCON (0x260)
-#define BLENDCON_8BIT_ALPHA (1 << 0)
-
-
-#endif /* __ASM_ARCH_REGS_FB_H */
-
diff --git a/arch/arm/mach-s5pc100/mach-smdkc100.c b/arch/arm/mach-s5pc100/mach-smdkc100.c
index 0525cb3ef406..227d8908aab6 100644
--- a/arch/arm/mach-s5pc100/mach-smdkc100.c
+++ b/arch/arm/mach-s5pc100/mach-smdkc100.c
@@ -29,7 +29,6 @@
#include <asm/mach/map.h>
#include <mach/map.h>
-#include <mach/regs-fb.h>
#include <mach/regs-gpio.h>
#include <video/platform_lcd.h>
@@ -51,6 +50,8 @@
#include <plat/keypad.h>
#include <plat/ts.h>
#include <plat/audio.h>
+#include <plat/backlight.h>
+#include <plat/regs-fb-v4.h>
/* Following are default values for UCON, ULCON and UFCON UART registers */
#define SMDKC100_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
@@ -179,45 +180,6 @@ static struct samsung_keypad_platdata smdkc100_keypad_data __initdata = {
.cols = 8,
};
-static int smdkc100_backlight_init(struct device *dev)
-{
- int ret;
-
- ret = gpio_request(S5PC100_GPD(0), "Backlight");
- if (ret) {
- printk(KERN_ERR "failed to request GPF for PWM-OUT0\n");
- return ret;
- }
-
- /* Configure GPIO pin with S5PC100_GPD_TOUT_0 */
- s3c_gpio_cfgpin(S5PC100_GPD(0), S3C_GPIO_SFN(2));
-
- return 0;
-}
-
-static void smdkc100_backlight_exit(struct device *dev)
-{
- s3c_gpio_cfgpin(S5PC100_GPD(0), S3C_GPIO_OUTPUT);
- gpio_free(S5PC100_GPD(0));
-}
-
-static struct platform_pwm_backlight_data smdkc100_backlight_data = {
- .pwm_id = 0,
- .max_brightness = 255,
- .dft_brightness = 255,
- .pwm_period_ns = 78770,
- .init = smdkc100_backlight_init,
- .exit = smdkc100_backlight_exit,
-};
-
-static struct platform_device smdkc100_backlight_device = {
- .name = "pwm-backlight",
- .dev = {
- .parent = &s3c_device_timer[0].dev,
- .platform_data = &smdkc100_backlight_data,
- },
-};
-
static struct platform_device *smdkc100_devices[] __initdata = {
&s3c_device_adc,
&s3c_device_cfcon,
@@ -239,8 +201,6 @@ static struct platform_device *smdkc100_devices[] __initdata = {
&s5p_device_fimc1,
&s5p_device_fimc2,
&s5pc100_device_spdif,
- &s3c_device_timer[0],
- &smdkc100_backlight_device,
};
static struct s3c2410_ts_mach_info s3c_ts_platform __initdata = {
@@ -249,6 +209,16 @@ static struct s3c2410_ts_mach_info s3c_ts_platform __initdata = {
.oversampling_shift = 2,
};
+/* LCD Backlight data */
+static struct samsung_bl_gpio_info smdkc100_bl_gpio_info = {
+ .no = S5PC100_GPD(0),
+ .func = S3C_GPIO_SFN(2),
+};
+
+static struct platform_pwm_backlight_data smdkc100_bl_data = {
+ .pwm_id = 0,
+};
+
static void __init smdkc100_map_io(void)
{
s5p_init_io(NULL, 0, S5P_VA_CHIPID);
@@ -276,6 +246,9 @@ static void __init smdkc100_machine_init(void)
/* LCD init */
gpio_request(S5PC100_GPH0(6), "GPH0");
smdkc100_lcd_power_set(&smdkc100_lcd_power_data, 0);
+
+ samsung_bl_set(&smdkc100_bl_gpio_info, &smdkc100_bl_data);
+
platform_add_devices(smdkc100_devices, ARRAY_SIZE(smdkc100_devices));
}
diff --git a/arch/arm/mach-s5pc100/setup-fb-24bpp.c b/arch/arm/mach-s5pc100/setup-fb-24bpp.c
index d31c0f3fe222..8978e4cf9ed5 100644
--- a/arch/arm/mach-s5pc100/setup-fb-24bpp.c
+++ b/arch/arm/mach-s5pc100/setup-fb-24bpp.c
@@ -15,7 +15,6 @@
#include <linux/fb.h>
#include <linux/gpio.h>
-#include <mach/regs-fb.h>
#include <mach/map.h>
#include <plat/fb.h>
#include <plat/gpio-cfg.h>
diff --git a/arch/arm/mach-s5pv210/Kconfig b/arch/arm/mach-s5pv210/Kconfig
index 37b5a97594a5..79bb3a0314ef 100644
--- a/arch/arm/mach-s5pv210/Kconfig
+++ b/arch/arm/mach-s5pv210/Kconfig
@@ -134,6 +134,7 @@ config MACH_SMDKV210
select S3C_DEV_RTC
select S3C_DEV_WDT
select SAMSUNG_DEV_ADC
+ select SAMSUNG_DEV_BACKLIGHT
select SAMSUNG_DEV_IDE
select SAMSUNG_DEV_KEYPAD
select SAMSUNG_DEV_PWM
diff --git a/arch/arm/mach-s5pv210/Makefile b/arch/arm/mach-s5pv210/Makefile
index 50907aca006c..599a3c0e8f6c 100644
--- a/arch/arm/mach-s5pv210/Makefile
+++ b/arch/arm/mach-s5pv210/Makefile
@@ -15,7 +15,6 @@ obj- :=
obj-$(CONFIG_CPU_S5PV210) += cpu.o init.o clock.o dma.o
obj-$(CONFIG_CPU_S5PV210) += setup-i2c0.o
obj-$(CONFIG_S5PV210_PM) += pm.o sleep.o
-obj-$(CONFIG_CPU_FREQ) += cpufreq.o
# machine support
diff --git a/arch/arm/mach-s5pv210/clock.c b/arch/arm/mach-s5pv210/clock.c
index 2d599499cefe..ae72f87eab15 100644
--- a/arch/arm/mach-s5pv210/clock.c
+++ b/arch/arm/mach-s5pv210/clock.c
@@ -36,7 +36,6 @@ static unsigned long xtal;
static struct clksrc_clk clk_mout_apll = {
.clk = {
.name = "mout_apll",
- .id = -1,
},
.sources = &clk_src_apll,
.reg_src = { .reg = S5P_CLK_SRC0, .shift = 0, .size = 1 },
@@ -45,7 +44,6 @@ static struct clksrc_clk clk_mout_apll = {
static struct clksrc_clk clk_mout_epll = {
.clk = {
.name = "mout_epll",
- .id = -1,
},
.sources = &clk_src_epll,
.reg_src = { .reg = S5P_CLK_SRC0, .shift = 8, .size = 1 },
@@ -54,7 +52,6 @@ static struct clksrc_clk clk_mout_epll = {
static struct clksrc_clk clk_mout_mpll = {
.clk = {
.name = "mout_mpll",
- .id = -1,
},
.sources = &clk_src_mpll,
.reg_src = { .reg = S5P_CLK_SRC0, .shift = 4, .size = 1 },
@@ -73,7 +70,6 @@ static struct clksrc_sources clkset_armclk = {
static struct clksrc_clk clk_armclk = {
.clk = {
.name = "armclk",
- .id = -1,
},
.sources = &clkset_armclk,
.reg_src = { .reg = S5P_CLK_SRC0, .shift = 16, .size = 1 },
@@ -83,7 +79,6 @@ static struct clksrc_clk clk_armclk = {
static struct clksrc_clk clk_hclk_msys = {
.clk = {
.name = "hclk_msys",
- .id = -1,
.parent = &clk_armclk.clk,
},
.reg_div = { .reg = S5P_CLK_DIV0, .shift = 8, .size = 3 },
@@ -92,7 +87,6 @@ static struct clksrc_clk clk_hclk_msys = {
static struct clksrc_clk clk_pclk_msys = {
.clk = {
.name = "pclk_msys",
- .id = -1,
.parent = &clk_hclk_msys.clk,
},
.reg_div = { .reg = S5P_CLK_DIV0, .shift = 12, .size = 3 },
@@ -101,7 +95,6 @@ static struct clksrc_clk clk_pclk_msys = {
static struct clksrc_clk clk_sclk_a2m = {
.clk = {
.name = "sclk_a2m",
- .id = -1,
.parent = &clk_mout_apll.clk,
},
.reg_div = { .reg = S5P_CLK_DIV0, .shift = 4, .size = 3 },
@@ -120,7 +113,6 @@ static struct clksrc_sources clkset_hclk_sys = {
static struct clksrc_clk clk_hclk_dsys = {
.clk = {
.name = "hclk_dsys",
- .id = -1,
},
.sources = &clkset_hclk_sys,
.reg_src = { .reg = S5P_CLK_SRC0, .shift = 20, .size = 1 },
@@ -130,7 +122,6 @@ static struct clksrc_clk clk_hclk_dsys = {
static struct clksrc_clk clk_pclk_dsys = {
.clk = {
.name = "pclk_dsys",
- .id = -1,
.parent = &clk_hclk_dsys.clk,
},
.reg_div = { .reg = S5P_CLK_DIV0, .shift = 20, .size = 3 },
@@ -139,7 +130,6 @@ static struct clksrc_clk clk_pclk_dsys = {
static struct clksrc_clk clk_hclk_psys = {
.clk = {
.name = "hclk_psys",
- .id = -1,
},
.sources = &clkset_hclk_sys,
.reg_src = { .reg = S5P_CLK_SRC0, .shift = 24, .size = 1 },
@@ -149,7 +139,6 @@ static struct clksrc_clk clk_hclk_psys = {
static struct clksrc_clk clk_pclk_psys = {
.clk = {
.name = "pclk_psys",
- .id = -1,
.parent = &clk_hclk_psys.clk,
},
.reg_div = { .reg = S5P_CLK_DIV0, .shift = 28, .size = 3 },
@@ -187,38 +176,31 @@ static int s5pv210_clk_mask1_ctrl(struct clk *clk, int enable)
static struct clk clk_sclk_hdmi27m = {
.name = "sclk_hdmi27m",
- .id = -1,
.rate = 27000000,
};
static struct clk clk_sclk_hdmiphy = {
.name = "sclk_hdmiphy",
- .id = -1,
};
static struct clk clk_sclk_usbphy0 = {
.name = "sclk_usbphy0",
- .id = -1,
};
static struct clk clk_sclk_usbphy1 = {
.name = "sclk_usbphy1",
- .id = -1,
};
static struct clk clk_pcmcdclk0 = {
.name = "pcmcdclk",
- .id = -1,
};
static struct clk clk_pcmcdclk1 = {
.name = "pcmcdclk",
- .id = -1,
};
static struct clk clk_pcmcdclk2 = {
.name = "pcmcdclk",
- .id = -1,
};
static struct clk *clkset_vpllsrc_list[] = {
@@ -234,7 +216,6 @@ static struct clksrc_sources clkset_vpllsrc = {
static struct clksrc_clk clk_vpllsrc = {
.clk = {
.name = "vpll_src",
- .id = -1,
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 7),
},
@@ -255,7 +236,6 @@ static struct clksrc_sources clkset_sclk_vpll = {
static struct clksrc_clk clk_sclk_vpll = {
.clk = {
.name = "sclk_vpll",
- .id = -1,
},
.sources = &clkset_sclk_vpll,
.reg_src = { .reg = S5P_CLK_SRC0, .shift = 12, .size = 1 },
@@ -276,7 +256,6 @@ static struct clksrc_sources clkset_moutdmc0src = {
static struct clksrc_clk clk_mout_dmc0 = {
.clk = {
.name = "mout_dmc0",
- .id = -1,
},
.sources = &clkset_moutdmc0src,
.reg_src = { .reg = S5P_CLK_SRC6, .shift = 24, .size = 2 },
@@ -285,7 +264,6 @@ static struct clksrc_clk clk_mout_dmc0 = {
static struct clksrc_clk clk_sclk_dmc0 = {
.clk = {
.name = "sclk_dmc0",
- .id = -1,
.parent = &clk_mout_dmc0.clk,
},
.reg_div = { .reg = S5P_CLK_DIV6, .shift = 28, .size = 4 },
@@ -312,181 +290,169 @@ static struct clk_ops clk_fout_apll_ops = {
static struct clk init_clocks_off[] = {
{
.name = "pdma",
- .id = 0,
+ .devname = "s3c-pl330.0",
.parent = &clk_hclk_psys.clk,
.enable = s5pv210_clk_ip0_ctrl,
.ctrlbit = (1 << 3),
}, {
.name = "pdma",
- .id = 1,
+ .devname = "s3c-pl330.1",
.parent = &clk_hclk_psys.clk,
.enable = s5pv210_clk_ip0_ctrl,
.ctrlbit = (1 << 4),
}, {
.name = "rot",
- .id = -1,
.parent = &clk_hclk_dsys.clk,
.enable = s5pv210_clk_ip0_ctrl,
.ctrlbit = (1<<29),
}, {
.name = "fimc",
- .id = 0,
+ .devname = "s5pv210-fimc.0",
.parent = &clk_hclk_dsys.clk,
.enable = s5pv210_clk_ip0_ctrl,
.ctrlbit = (1 << 24),
}, {
.name = "fimc",
- .id = 1,
+ .devname = "s5pv210-fimc.1",
.parent = &clk_hclk_dsys.clk,
.enable = s5pv210_clk_ip0_ctrl,
.ctrlbit = (1 << 25),
}, {
.name = "fimc",
- .id = 2,
+ .devname = "s5pv210-fimc.2",
.parent = &clk_hclk_dsys.clk,
.enable = s5pv210_clk_ip0_ctrl,
.ctrlbit = (1 << 26),
}, {
.name = "otg",
- .id = -1,
.parent = &clk_hclk_psys.clk,
.enable = s5pv210_clk_ip1_ctrl,
.ctrlbit = (1<<16),
}, {
.name = "usb-host",
- .id = -1,
.parent = &clk_hclk_psys.clk,
.enable = s5pv210_clk_ip1_ctrl,
.ctrlbit = (1<<17),
}, {
.name = "lcd",
- .id = -1,
.parent = &clk_hclk_dsys.clk,
.enable = s5pv210_clk_ip1_ctrl,
.ctrlbit = (1<<0),
}, {
.name = "cfcon",
- .id = 0,
.parent = &clk_hclk_psys.clk,
.enable = s5pv210_clk_ip1_ctrl,
.ctrlbit = (1<<25),
}, {
.name = "hsmmc",
- .id = 0,
+ .devname = "s3c-sdhci.0",
.parent = &clk_hclk_psys.clk,
.enable = s5pv210_clk_ip2_ctrl,
.ctrlbit = (1<<16),
}, {
.name = "hsmmc",
- .id = 1,
+ .devname = "s3c-sdhci.1",
.parent = &clk_hclk_psys.clk,
.enable = s5pv210_clk_ip2_ctrl,
.ctrlbit = (1<<17),
}, {
.name = "hsmmc",
- .id = 2,
+ .devname = "s3c-sdhci.2",
.parent = &clk_hclk_psys.clk,
.enable = s5pv210_clk_ip2_ctrl,
.ctrlbit = (1<<18),
}, {
.name = "hsmmc",
- .id = 3,
+ .devname = "s3c-sdhci.3",
.parent = &clk_hclk_psys.clk,
.enable = s5pv210_clk_ip2_ctrl,
.ctrlbit = (1<<19),
}, {
.name = "systimer",
- .id = -1,
.parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1<<16),
}, {
.name = "watchdog",
- .id = -1,
.parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1<<22),
}, {
.name = "rtc",
- .id = -1,
.parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1<<15),
}, {
.name = "i2c",
- .id = 0,
+ .devname = "s3c2440-i2c.0",
.parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1<<7),
}, {
.name = "i2c",
- .id = 1,
+ .devname = "s3c2440-i2c.1",
.parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1 << 10),
}, {
.name = "i2c",
- .id = 2,
+ .devname = "s3c2440-i2c.2",
.parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1<<9),
}, {
.name = "spi",
- .id = 0,
+ .devname = "s3c64xx-spi.0",
.parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1<<12),
}, {
.name = "spi",
- .id = 1,
+ .devname = "s3c64xx-spi.1",
.parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1<<13),
}, {
.name = "spi",
- .id = 2,
+ .devname = "s3c64xx-spi.2",
.parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1<<14),
}, {
.name = "timers",
- .id = -1,
.parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1<<23),
}, {
.name = "adc",
- .id = -1,
.parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1<<24),
}, {
.name = "keypad",
- .id = -1,
.parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1<<21),
}, {
.name = "iis",
- .id = 0,
+ .devname = "samsung-i2s.0",
.parent = &clk_p,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1<<4),
}, {
.name = "iis",
- .id = 1,
+ .devname = "samsung-i2s.1",
.parent = &clk_p,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1 << 5),
}, {
.name = "iis",
- .id = 2,
+ .devname = "samsung-i2s.2",
.parent = &clk_p,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1 << 6),
}, {
.name = "spdif",
- .id = -1,
.parent = &clk_p,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1 << 0),
@@ -496,38 +462,36 @@ static struct clk init_clocks_off[] = {
static struct clk init_clocks[] = {
{
.name = "hclk_imem",
- .id = -1,
.parent = &clk_hclk_msys.clk,
.ctrlbit = (1 << 5),
.enable = s5pv210_clk_ip0_ctrl,
.ops = &clk_hclk_imem_ops,
}, {
.name = "uart",
- .id = 0,
+ .devname = "s5pv210-uart.0",
.parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1 << 17),
}, {
.name = "uart",
- .id = 1,
+ .devname = "s5pv210-uart.1",
.parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1 << 18),
}, {
.name = "uart",
- .id = 2,
+ .devname = "s5pv210-uart.2",
.parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1 << 19),
}, {
.name = "uart",
- .id = 3,
+ .devname = "s5pv210-uart.3",
.parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1 << 20),
}, {
.name = "sromc",
- .id = -1,
.parent = &clk_hclk_psys.clk,
.enable = s5pv210_clk_ip1_ctrl,
.ctrlbit = (1 << 26),
@@ -579,7 +543,6 @@ static struct clksrc_sources clkset_sclk_dac = {
static struct clksrc_clk clk_sclk_dac = {
.clk = {
.name = "sclk_dac",
- .id = -1,
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 2),
},
@@ -590,7 +553,6 @@ static struct clksrc_clk clk_sclk_dac = {
static struct clksrc_clk clk_sclk_pixel = {
.clk = {
.name = "sclk_pixel",
- .id = -1,
.parent = &clk_sclk_vpll.clk,
},
.reg_div = { .reg = S5P_CLK_DIV1, .shift = 0, .size = 4},
@@ -609,7 +571,6 @@ static struct clksrc_sources clkset_sclk_hdmi = {
static struct clksrc_clk clk_sclk_hdmi = {
.clk = {
.name = "sclk_hdmi",
- .id = -1,
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 0),
},
@@ -647,7 +608,7 @@ static struct clksrc_sources clkset_sclk_audio0 = {
static struct clksrc_clk clk_sclk_audio0 = {
.clk = {
.name = "sclk_audio",
- .id = 0,
+ .devname = "soc-audio.0",
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 24),
},
@@ -676,7 +637,7 @@ static struct clksrc_sources clkset_sclk_audio1 = {
static struct clksrc_clk clk_sclk_audio1 = {
.clk = {
.name = "sclk_audio",
- .id = 1,
+ .devname = "soc-audio.1",
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 25),
},
@@ -705,7 +666,7 @@ static struct clksrc_sources clkset_sclk_audio2 = {
static struct clksrc_clk clk_sclk_audio2 = {
.clk = {
.name = "sclk_audio",
- .id = 2,
+ .devname = "soc-audio.2",
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 26),
},
@@ -725,48 +686,12 @@ static struct clksrc_sources clkset_sclk_spdif = {
.nr_sources = ARRAY_SIZE(clkset_sclk_spdif_list),
};
-static int s5pv210_spdif_set_rate(struct clk *clk, unsigned long rate)
-{
- struct clk *pclk;
- int ret;
-
- pclk = clk_get_parent(clk);
- if (IS_ERR(pclk))
- return -EINVAL;
-
- ret = pclk->ops->set_rate(pclk, rate);
- clk_put(pclk);
-
- return ret;
-}
-
-static unsigned long s5pv210_spdif_get_rate(struct clk *clk)
-{
- struct clk *pclk;
- int rate;
-
- pclk = clk_get_parent(clk);
- if (IS_ERR(pclk))
- return -EINVAL;
-
- rate = pclk->ops->get_rate(clk);
- clk_put(pclk);
-
- return rate;
-}
-
-static struct clk_ops s5pv210_sclk_spdif_ops = {
- .set_rate = s5pv210_spdif_set_rate,
- .get_rate = s5pv210_spdif_get_rate,
-};
-
static struct clksrc_clk clk_sclk_spdif = {
.clk = {
.name = "sclk_spdif",
- .id = -1,
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 27),
- .ops = &s5pv210_sclk_spdif_ops,
+ .ops = &s5p_sclk_spdif_ops,
},
.sources = &clkset_sclk_spdif,
.reg_src = { .reg = S5P_CLK_SRC6, .shift = 12, .size = 2 },
@@ -793,7 +718,6 @@ static struct clksrc_clk clksrcs[] = {
{
.clk = {
.name = "sclk_dmc",
- .id = -1,
},
.sources = &clkset_group1,
.reg_src = { .reg = S5P_CLK_SRC6, .shift = 24, .size = 2 },
@@ -801,7 +725,6 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_onenand",
- .id = -1,
},
.sources = &clkset_sclk_onenand,
.reg_src = { .reg = S5P_CLK_SRC0, .shift = 28, .size = 1 },
@@ -809,7 +732,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "uclk1",
- .id = 0,
+ .devname = "s5pv210-uart.0",
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 12),
},
@@ -819,7 +742,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "uclk1",
- .id = 1,
+ .devname = "s5pv210-uart.1",
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 13),
},
@@ -829,7 +752,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "uclk1",
- .id = 2,
+ .devname = "s5pv210-uart.2",
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 14),
},
@@ -839,7 +762,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "uclk1",
- .id = 3,
+ .devname = "s5pv210-uart.3",
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 15),
},
@@ -849,7 +772,6 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_mixer",
- .id = -1,
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 1),
},
@@ -858,7 +780,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_fimc",
- .id = 0,
+ .devname = "s5pv210-fimc.0",
.enable = s5pv210_clk_mask1_ctrl,
.ctrlbit = (1 << 2),
},
@@ -868,7 +790,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_fimc",
- .id = 1,
+ .devname = "s5pv210-fimc.1",
.enable = s5pv210_clk_mask1_ctrl,
.ctrlbit = (1 << 3),
},
@@ -878,7 +800,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_fimc",
- .id = 2,
+ .devname = "s5pv210-fimc.2",
.enable = s5pv210_clk_mask1_ctrl,
.ctrlbit = (1 << 4),
},
@@ -888,7 +810,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_cam",
- .id = 0,
+ .devname = "s5pv210-fimc.0",
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 3),
},
@@ -898,7 +820,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_cam",
- .id = 1,
+ .devname = "s5pv210-fimc.1",
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 4),
},
@@ -908,7 +830,6 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_fimd",
- .id = -1,
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 5),
},
@@ -918,7 +839,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_mmc",
- .id = 0,
+ .devname = "s3c-sdhci.0",
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 8),
},
@@ -928,7 +849,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_mmc",
- .id = 1,
+ .devname = "s3c-sdhci.1",
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 9),
},
@@ -938,7 +859,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_mmc",
- .id = 2,
+ .devname = "s3c-sdhci.2",
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 10),
},
@@ -948,7 +869,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_mmc",
- .id = 3,
+ .devname = "s3c-sdhci.3",
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 11),
},
@@ -958,7 +879,6 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_mfc",
- .id = -1,
.enable = s5pv210_clk_ip0_ctrl,
.ctrlbit = (1 << 16),
},
@@ -968,7 +888,6 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_g2d",
- .id = -1,
.enable = s5pv210_clk_ip0_ctrl,
.ctrlbit = (1 << 12),
},
@@ -978,7 +897,6 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_g3d",
- .id = -1,
.enable = s5pv210_clk_ip0_ctrl,
.ctrlbit = (1 << 8),
},
@@ -988,7 +906,6 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_csis",
- .id = -1,
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 6),
},
@@ -998,7 +915,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_spi",
- .id = 0,
+ .devname = "s3c64xx-spi.0",
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 16),
},
@@ -1008,7 +925,7 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_spi",
- .id = 1,
+ .devname = "s3c64xx-spi.1",
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 17),
},
@@ -1018,7 +935,6 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_pwi",
- .id = -1,
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 29),
},
@@ -1028,7 +944,6 @@ static struct clksrc_clk clksrcs[] = {
}, {
.clk = {
.name = "sclk_pwm",
- .id = -1,
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 19),
},
diff --git a/arch/arm/mach-s5pv210/cpufreq.c b/arch/arm/mach-s5pv210/cpufreq.c
deleted file mode 100644
index 153af8b359ec..000000000000
--- a/arch/arm/mach-s5pv210/cpufreq.c
+++ /dev/null
@@ -1,485 +0,0 @@
-/* linux/arch/arm/mach-s5pv210/cpufreq.c
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * CPU frequency scaling for S5PC110/S5PV210
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/cpufreq.h>
-
-#include <mach/map.h>
-#include <mach/regs-clock.h>
-
-static struct clk *cpu_clk;
-static struct clk *dmc0_clk;
-static struct clk *dmc1_clk;
-static struct cpufreq_freqs freqs;
-
-/* APLL M,P,S values for 1G/800Mhz */
-#define APLL_VAL_1000 ((1 << 31) | (125 << 16) | (3 << 8) | 1)
-#define APLL_VAL_800 ((1 << 31) | (100 << 16) | (3 << 8) | 1)
-
-/*
- * DRAM configurations to calculate refresh counter for changing
- * frequency of memory.
- */
-struct dram_conf {
- unsigned long freq; /* HZ */
- unsigned long refresh; /* DRAM refresh counter * 1000 */
-};
-
-/* DRAM configuration (DMC0 and DMC1) */
-static struct dram_conf s5pv210_dram_conf[2];
-
-enum perf_level {
- L0, L1, L2, L3, L4,
-};
-
-enum s5pv210_mem_type {
- LPDDR = 0x1,
- LPDDR2 = 0x2,
- DDR2 = 0x4,
-};
-
-enum s5pv210_dmc_port {
- DMC0 = 0,
- DMC1,
-};
-
-static struct cpufreq_frequency_table s5pv210_freq_table[] = {
- {L0, 1000*1000},
- {L1, 800*1000},
- {L2, 400*1000},
- {L3, 200*1000},
- {L4, 100*1000},
- {0, CPUFREQ_TABLE_END},
-};
-
-static u32 clkdiv_val[5][11] = {
- /*
- * Clock divider value for following
- * { APLL, A2M, HCLK_MSYS, PCLK_MSYS,
- * HCLK_DSYS, PCLK_DSYS, HCLK_PSYS, PCLK_PSYS,
- * ONEDRAM, MFC, G3D }
- */
-
- /* L0 : [1000/200/100][166/83][133/66][200/200] */
- {0, 4, 4, 1, 3, 1, 4, 1, 3, 0, 0},
-
- /* L1 : [800/200/100][166/83][133/66][200/200] */
- {0, 3, 3, 1, 3, 1, 4, 1, 3, 0, 0},
-
- /* L2 : [400/200/100][166/83][133/66][200/200] */
- {1, 3, 1, 1, 3, 1, 4, 1, 3, 0, 0},
-
- /* L3 : [200/200/100][166/83][133/66][200/200] */
- {3, 3, 1, 1, 3, 1, 4, 1, 3, 0, 0},
-
- /* L4 : [100/100/100][83/83][66/66][100/100] */
- {7, 7, 0, 0, 7, 0, 9, 0, 7, 0, 0},
-};
-
-/*
- * This function set DRAM refresh counter
- * accoriding to operating frequency of DRAM
- * ch: DMC port number 0 or 1
- * freq: Operating frequency of DRAM(KHz)
- */
-static void s5pv210_set_refresh(enum s5pv210_dmc_port ch, unsigned long freq)
-{
- unsigned long tmp, tmp1;
- void __iomem *reg = NULL;
-
- if (ch == DMC0) {
- reg = (S5P_VA_DMC0 + 0x30);
- } else if (ch == DMC1) {
- reg = (S5P_VA_DMC1 + 0x30);
- } else {
- printk(KERN_ERR "Cannot find DMC port\n");
- return;
- }
-
- /* Find current DRAM frequency */
- tmp = s5pv210_dram_conf[ch].freq;
-
- do_div(tmp, freq);
-
- tmp1 = s5pv210_dram_conf[ch].refresh;
-
- do_div(tmp1, tmp);
-
- __raw_writel(tmp1, reg);
-}
-
-int s5pv210_verify_speed(struct cpufreq_policy *policy)
-{
- if (policy->cpu)
- return -EINVAL;
-
- return cpufreq_frequency_table_verify(policy, s5pv210_freq_table);
-}
-
-unsigned int s5pv210_getspeed(unsigned int cpu)
-{
- if (cpu)
- return 0;
-
- return clk_get_rate(cpu_clk) / 1000;
-}
-
-static int s5pv210_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
-{
- unsigned long reg;
- unsigned int index, priv_index;
- unsigned int pll_changing = 0;
- unsigned int bus_speed_changing = 0;
-
- freqs.old = s5pv210_getspeed(0);
-
- if (cpufreq_frequency_table_target(policy, s5pv210_freq_table,
- target_freq, relation, &index))
- return -EINVAL;
-
- freqs.new = s5pv210_freq_table[index].frequency;
- freqs.cpu = 0;
-
- if (freqs.new == freqs.old)
- return 0;
-
- /* Finding current running level index */
- if (cpufreq_frequency_table_target(policy, s5pv210_freq_table,
- freqs.old, relation, &priv_index))
- return -EINVAL;
-
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
-
- if (freqs.new > freqs.old) {
- /* Voltage up: will be implemented */
- }
-
- /* Check if there need to change PLL */
- if ((index == L0) || (priv_index == L0))
- pll_changing = 1;
-
- /* Check if there need to change System bus clock */
- if ((index == L4) || (priv_index == L4))
- bus_speed_changing = 1;
-
- if (bus_speed_changing) {
- /*
- * Reconfigure DRAM refresh counter value for minimum
- * temporary clock while changing divider.
- * expected clock is 83Mhz : 7.8usec/(1/83Mhz) = 0x287
- */
- if (pll_changing)
- s5pv210_set_refresh(DMC1, 83000);
- else
- s5pv210_set_refresh(DMC1, 100000);
-
- s5pv210_set_refresh(DMC0, 83000);
- }
-
- /*
- * APLL should be changed in this level
- * APLL -> MPLL(for stable transition) -> APLL
- * Some clock source's clock API are not prepared.
- * Do not use clock API in below code.
- */
- if (pll_changing) {
- /*
- * 1. Temporary Change divider for MFC and G3D
- * SCLKA2M(200/1=200)->(200/4=50)Mhz
- */
- reg = __raw_readl(S5P_CLK_DIV2);
- reg &= ~(S5P_CLKDIV2_G3D_MASK | S5P_CLKDIV2_MFC_MASK);
- reg |= (3 << S5P_CLKDIV2_G3D_SHIFT) |
- (3 << S5P_CLKDIV2_MFC_SHIFT);
- __raw_writel(reg, S5P_CLK_DIV2);
-
- /* For MFC, G3D dividing */
- do {
- reg = __raw_readl(S5P_CLKDIV_STAT0);
- } while (reg & ((1 << 16) | (1 << 17)));
-
- /*
- * 2. Change SCLKA2M(200Mhz)to SCLKMPLL in MFC_MUX, G3D MUX
- * (200/4=50)->(667/4=166)Mhz
- */
- reg = __raw_readl(S5P_CLK_SRC2);
- reg &= ~(S5P_CLKSRC2_G3D_MASK | S5P_CLKSRC2_MFC_MASK);
- reg |= (1 << S5P_CLKSRC2_G3D_SHIFT) |
- (1 << S5P_CLKSRC2_MFC_SHIFT);
- __raw_writel(reg, S5P_CLK_SRC2);
-
- do {
- reg = __raw_readl(S5P_CLKMUX_STAT1);
- } while (reg & ((1 << 7) | (1 << 3)));
-
- /*
- * 3. DMC1 refresh count for 133Mhz if (index == L4) is
- * true refresh counter is already programed in upper
- * code. 0x287@83Mhz
- */
- if (!bus_speed_changing)
- s5pv210_set_refresh(DMC1, 133000);
-
- /* 4. SCLKAPLL -> SCLKMPLL */
- reg = __raw_readl(S5P_CLK_SRC0);
- reg &= ~(S5P_CLKSRC0_MUX200_MASK);
- reg |= (0x1 << S5P_CLKSRC0_MUX200_SHIFT);
- __raw_writel(reg, S5P_CLK_SRC0);
-
- do {
- reg = __raw_readl(S5P_CLKMUX_STAT0);
- } while (reg & (0x1 << 18));
-
- }
-
- /* Change divider */
- reg = __raw_readl(S5P_CLK_DIV0);
-
- reg &= ~(S5P_CLKDIV0_APLL_MASK | S5P_CLKDIV0_A2M_MASK |
- S5P_CLKDIV0_HCLK200_MASK | S5P_CLKDIV0_PCLK100_MASK |
- S5P_CLKDIV0_HCLK166_MASK | S5P_CLKDIV0_PCLK83_MASK |
- S5P_CLKDIV0_HCLK133_MASK | S5P_CLKDIV0_PCLK66_MASK);
-
- reg |= ((clkdiv_val[index][0] << S5P_CLKDIV0_APLL_SHIFT) |
- (clkdiv_val[index][1] << S5P_CLKDIV0_A2M_SHIFT) |
- (clkdiv_val[index][2] << S5P_CLKDIV0_HCLK200_SHIFT) |
- (clkdiv_val[index][3] << S5P_CLKDIV0_PCLK100_SHIFT) |
- (clkdiv_val[index][4] << S5P_CLKDIV0_HCLK166_SHIFT) |
- (clkdiv_val[index][5] << S5P_CLKDIV0_PCLK83_SHIFT) |
- (clkdiv_val[index][6] << S5P_CLKDIV0_HCLK133_SHIFT) |
- (clkdiv_val[index][7] << S5P_CLKDIV0_PCLK66_SHIFT));
-
- __raw_writel(reg, S5P_CLK_DIV0);
-
- do {
- reg = __raw_readl(S5P_CLKDIV_STAT0);
- } while (reg & 0xff);
-
- /* ARM MCS value changed */
- reg = __raw_readl(S5P_ARM_MCS_CON);
- reg &= ~0x3;
- if (index >= L3)
- reg |= 0x3;
- else
- reg |= 0x1;
-
- __raw_writel(reg, S5P_ARM_MCS_CON);
-
- if (pll_changing) {
- /* 5. Set Lock time = 30us*24Mhz = 0x2cf */
- __raw_writel(0x2cf, S5P_APLL_LOCK);
-
- /*
- * 6. Turn on APLL
- * 6-1. Set PMS values
- * 6-2. Wait untile the PLL is locked
- */
- if (index == L0)
- __raw_writel(APLL_VAL_1000, S5P_APLL_CON);
- else
- __raw_writel(APLL_VAL_800, S5P_APLL_CON);
-
- do {
- reg = __raw_readl(S5P_APLL_CON);
- } while (!(reg & (0x1 << 29)));
-
- /*
- * 7. Change souce clock from SCLKMPLL(667Mhz)
- * to SCLKA2M(200Mhz) in MFC_MUX and G3D MUX
- * (667/4=166)->(200/4=50)Mhz
- */
- reg = __raw_readl(S5P_CLK_SRC2);
- reg &= ~(S5P_CLKSRC2_G3D_MASK | S5P_CLKSRC2_MFC_MASK);
- reg |= (0 << S5P_CLKSRC2_G3D_SHIFT) |
- (0 << S5P_CLKSRC2_MFC_SHIFT);
- __raw_writel(reg, S5P_CLK_SRC2);
-
- do {
- reg = __raw_readl(S5P_CLKMUX_STAT1);
- } while (reg & ((1 << 7) | (1 << 3)));
-
- /*
- * 8. Change divider for MFC and G3D
- * (200/4=50)->(200/1=200)Mhz
- */
- reg = __raw_readl(S5P_CLK_DIV2);
- reg &= ~(S5P_CLKDIV2_G3D_MASK | S5P_CLKDIV2_MFC_MASK);
- reg |= (clkdiv_val[index][10] << S5P_CLKDIV2_G3D_SHIFT) |
- (clkdiv_val[index][9] << S5P_CLKDIV2_MFC_SHIFT);
- __raw_writel(reg, S5P_CLK_DIV2);
-
- /* For MFC, G3D dividing */
- do {
- reg = __raw_readl(S5P_CLKDIV_STAT0);
- } while (reg & ((1 << 16) | (1 << 17)));
-
- /* 9. Change MPLL to APLL in MSYS_MUX */
- reg = __raw_readl(S5P_CLK_SRC0);
- reg &= ~(S5P_CLKSRC0_MUX200_MASK);
- reg |= (0x0 << S5P_CLKSRC0_MUX200_SHIFT);
- __raw_writel(reg, S5P_CLK_SRC0);
-
- do {
- reg = __raw_readl(S5P_CLKMUX_STAT0);
- } while (reg & (0x1 << 18));
-
- /*
- * 10. DMC1 refresh counter
- * L4 : DMC1 = 100Mhz 7.8us/(1/100) = 0x30c
- * Others : DMC1 = 200Mhz 7.8us/(1/200) = 0x618
- */
- if (!bus_speed_changing)
- s5pv210_set_refresh(DMC1, 200000);
- }
-
- /*
- * L4 level need to change memory bus speed, hence onedram clock divier
- * and memory refresh parameter should be changed
- */
- if (bus_speed_changing) {
- reg = __raw_readl(S5P_CLK_DIV6);
- reg &= ~S5P_CLKDIV6_ONEDRAM_MASK;
- reg |= (clkdiv_val[index][8] << S5P_CLKDIV6_ONEDRAM_SHIFT);
- __raw_writel(reg, S5P_CLK_DIV6);
-
- do {
- reg = __raw_readl(S5P_CLKDIV_STAT1);
- } while (reg & (1 << 15));
-
- /* Reconfigure DRAM refresh counter value */
- if (index != L4) {
- /*
- * DMC0 : 166Mhz
- * DMC1 : 200Mhz
- */
- s5pv210_set_refresh(DMC0, 166000);
- s5pv210_set_refresh(DMC1, 200000);
- } else {
- /*
- * DMC0 : 83Mhz
- * DMC1 : 100Mhz
- */
- s5pv210_set_refresh(DMC0, 83000);
- s5pv210_set_refresh(DMC1, 100000);
- }
- }
-
- if (freqs.new < freqs.old) {
- /* Voltage down: will be implemented */
- }
-
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
-
- printk(KERN_DEBUG "Perf changed[L%d]\n", index);
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int s5pv210_cpufreq_suspend(struct cpufreq_policy *policy)
-{
- return 0;
-}
-
-static int s5pv210_cpufreq_resume(struct cpufreq_policy *policy)
-{
- return 0;
-}
-#endif
-
-static int check_mem_type(void __iomem *dmc_reg)
-{
- unsigned long val;
-
- val = __raw_readl(dmc_reg + 0x4);
- val = (val & (0xf << 8));
-
- return val >> 8;
-}
-
-static int __init s5pv210_cpu_init(struct cpufreq_policy *policy)
-{
- unsigned long mem_type;
-
- cpu_clk = clk_get(NULL, "armclk");
- if (IS_ERR(cpu_clk))
- return PTR_ERR(cpu_clk);
-
- dmc0_clk = clk_get(NULL, "sclk_dmc0");
- if (IS_ERR(dmc0_clk)) {
- clk_put(cpu_clk);
- return PTR_ERR(dmc0_clk);
- }
-
- dmc1_clk = clk_get(NULL, "hclk_msys");
- if (IS_ERR(dmc1_clk)) {
- clk_put(dmc0_clk);
- clk_put(cpu_clk);
- return PTR_ERR(dmc1_clk);
- }
-
- if (policy->cpu != 0)
- return -EINVAL;
-
- /*
- * check_mem_type : This driver only support LPDDR & LPDDR2.
- * other memory type is not supported.
- */
- mem_type = check_mem_type(S5P_VA_DMC0);
-
- if ((mem_type != LPDDR) && (mem_type != LPDDR2)) {
- printk(KERN_ERR "CPUFreq doesn't support this memory type\n");
- return -EINVAL;
- }
-
- /* Find current refresh counter and frequency each DMC */
- s5pv210_dram_conf[0].refresh = (__raw_readl(S5P_VA_DMC0 + 0x30) * 1000);
- s5pv210_dram_conf[0].freq = clk_get_rate(dmc0_clk);
-
- s5pv210_dram_conf[1].refresh = (__raw_readl(S5P_VA_DMC1 + 0x30) * 1000);
- s5pv210_dram_conf[1].freq = clk_get_rate(dmc1_clk);
-
- policy->cur = policy->min = policy->max = s5pv210_getspeed(0);
-
- cpufreq_frequency_table_get_attr(s5pv210_freq_table, policy->cpu);
-
- policy->cpuinfo.transition_latency = 40000;
-
- return cpufreq_frequency_table_cpuinfo(policy, s5pv210_freq_table);
-}
-
-static struct cpufreq_driver s5pv210_driver = {
- .flags = CPUFREQ_STICKY,
- .verify = s5pv210_verify_speed,
- .target = s5pv210_target,
- .get = s5pv210_getspeed,
- .init = s5pv210_cpu_init,
- .name = "s5pv210",
-#ifdef CONFIG_PM
- .suspend = s5pv210_cpufreq_suspend,
- .resume = s5pv210_cpufreq_resume,
-#endif
-};
-
-static int __init s5pv210_cpufreq_init(void)
-{
- return cpufreq_register_driver(&s5pv210_driver);
-}
-
-late_initcall(s5pv210_cpufreq_init);
diff --git a/arch/arm/mach-s5pv210/include/mach/clkdev.h b/arch/arm/mach-s5pv210/include/mach/clkdev.h
new file mode 100644
index 000000000000..7dffa83d23ff
--- /dev/null
+++ b/arch/arm/mach-s5pv210/include/mach/clkdev.h
@@ -0,0 +1,7 @@
+#ifndef __MACH_CLKDEV_H__
+#define __MACH_CLKDEV_H__
+
+#define __clk_get(clk) ({ 1; })
+#define __clk_put(clk) do {} while (0)
+
+#endif
diff --git a/arch/arm/mach-s5pv210/include/mach/regs-fb.h b/arch/arm/mach-s5pv210/include/mach/regs-fb.h
deleted file mode 100644
index 60d992989bdc..000000000000
--- a/arch/arm/mach-s5pv210/include/mach/regs-fb.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Copyright 2010 Ben Dooks <ben-linux@fluff.org>
- *
- * Dummy framebuffer to allow build for the moment.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_MACH_REGS_FB_H
-#define __ASM_ARCH_MACH_REGS_FB_H __FILE__
-
-#include <plat/regs-fb-v4.h>
-
-static inline unsigned int s3c_fb_pal_reg(unsigned int window, int reg)
-{
- return 0x2400 + (window * 256 *4 ) + reg;
-}
-
-#endif /* __ASM_ARCH_MACH_REGS_FB_H */
diff --git a/arch/arm/mach-s5pv210/mach-aquila.c b/arch/arm/mach-s5pv210/mach-aquila.c
index 4e1d8ff5ae59..509627f25111 100644
--- a/arch/arm/mach-s5pv210/mach-aquila.c
+++ b/arch/arm/mach-s5pv210/mach-aquila.c
@@ -29,7 +29,6 @@
#include <mach/map.h>
#include <mach/regs-clock.h>
-#include <mach/regs-fb.h>
#include <plat/gpio-cfg.h>
#include <plat/regs-serial.h>
@@ -40,6 +39,7 @@
#include <plat/fimc-core.h>
#include <plat/sdhci.h>
#include <plat/s5p-time.h>
+#include <plat/regs-fb-v4.h>
/* Following are default values for UCON, ULCON and UFCON UART registers */
#define AQUILA_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
diff --git a/arch/arm/mach-s5pv210/mach-goni.c b/arch/arm/mach-s5pv210/mach-goni.c
index 31d5aa769753..e0c4d06b9db6 100644
--- a/arch/arm/mach-s5pv210/mach-goni.c
+++ b/arch/arm/mach-s5pv210/mach-goni.c
@@ -34,7 +34,6 @@
#include <mach/map.h>
#include <mach/regs-clock.h>
-#include <mach/regs-fb.h>
#include <plat/gpio-cfg.h>
#include <plat/regs-serial.h>
@@ -47,6 +46,7 @@
#include <plat/sdhci.h>
#include <plat/clock.h>
#include <plat/s5p-time.h>
+#include <plat/regs-fb-v4.h>
/* Following are default values for UCON, ULCON and UFCON UART registers */
#define GONI_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
diff --git a/arch/arm/mach-s5pv210/mach-smdkv210.c b/arch/arm/mach-s5pv210/mach-smdkv210.c
index c6a9e86c2d5c..ef20f922249d 100644
--- a/arch/arm/mach-s5pv210/mach-smdkv210.c
+++ b/arch/arm/mach-s5pv210/mach-smdkv210.c
@@ -29,7 +29,6 @@
#include <mach/map.h>
#include <mach/regs-clock.h>
-#include <mach/regs-fb.h>
#include <plat/regs-serial.h>
#include <plat/regs-srom.h>
@@ -45,6 +44,8 @@
#include <plat/pm.h>
#include <plat/fb.h>
#include <plat/s5p-time.h>
+#include <plat/backlight.h>
+#include <plat/regs-fb-v4.h>
/* Following are default values for UCON, ULCON and UFCON UART registers */
#define SMDKV210_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
@@ -210,45 +211,6 @@ static struct s3c_fb_platdata smdkv210_lcd0_pdata __initdata = {
.setup_gpio = s5pv210_fb_gpio_setup_24bpp,
};
-static int smdkv210_backlight_init(struct device *dev)
-{
- int ret;
-
- ret = gpio_request(S5PV210_GPD0(3), "Backlight");
- if (ret) {
- printk(KERN_ERR "failed to request GPD for PWM-OUT 3\n");
- return ret;
- }
-
- /* Configure GPIO pin with S5PV210_GPD_0_3_TOUT_3 */
- s3c_gpio_cfgpin(S5PV210_GPD0(3), S3C_GPIO_SFN(2));
-
- return 0;
-}
-
-static void smdkv210_backlight_exit(struct device *dev)
-{
- s3c_gpio_cfgpin(S5PV210_GPD0(3), S3C_GPIO_OUTPUT);
- gpio_free(S5PV210_GPD0(3));
-}
-
-static struct platform_pwm_backlight_data smdkv210_backlight_data = {
- .pwm_id = 3,
- .max_brightness = 255,
- .dft_brightness = 255,
- .pwm_period_ns = 78770,
- .init = smdkv210_backlight_init,
- .exit = smdkv210_backlight_exit,
-};
-
-static struct platform_device smdkv210_backlight_device = {
- .name = "pwm-backlight",
- .dev = {
- .parent = &s3c_device_timer[3].dev,
- .platform_data = &smdkv210_backlight_data,
- },
-};
-
static struct platform_device *smdkv210_devices[] __initdata = {
&s3c_device_adc,
&s3c_device_cfcon,
@@ -270,8 +232,6 @@ static struct platform_device *smdkv210_devices[] __initdata = {
&samsung_device_keypad,
&smdkv210_dm9000,
&smdkv210_lcd_lte480wv,
- &s3c_device_timer[3],
- &smdkv210_backlight_device,
};
static void __init smdkv210_dm9000_init(void)
@@ -310,6 +270,16 @@ static struct s3c2410_ts_mach_info s3c_ts_platform __initdata = {
.oversampling_shift = 2,
};
+/* LCD Backlight data */
+static struct samsung_bl_gpio_info smdkv210_bl_gpio_info = {
+ .no = S5PV210_GPD0(3),
+ .func = S3C_GPIO_SFN(2),
+};
+
+static struct platform_pwm_backlight_data smdkv210_bl_data = {
+ .pwm_id = 3,
+};
+
static void __init smdkv210_map_io(void)
{
s5p_init_io(NULL, 0, S5P_VA_CHIPID);
@@ -341,6 +311,8 @@ static void __init smdkv210_machine_init(void)
s3c_fb_set_platdata(&smdkv210_lcd0_pdata);
+ samsung_bl_set(&smdkv210_bl_gpio_info, &smdkv210_bl_data);
+
platform_add_devices(smdkv210_devices, ARRAY_SIZE(smdkv210_devices));
}
diff --git a/arch/arm/mach-s5pv210/pm.c b/arch/arm/mach-s5pv210/pm.c
index 24febae3d4c0..309e388a8a83 100644
--- a/arch/arm/mach-s5pv210/pm.c
+++ b/arch/arm/mach-s5pv210/pm.c
@@ -88,7 +88,7 @@ static struct sleep_save s5pv210_core_save[] = {
SAVE_ITEM(S3C2410_TCNTO(0)),
};
-void s5pv210_cpu_suspend(void)
+void s5pv210_cpu_suspend(unsigned long arg)
{
unsigned long tmp;
diff --git a/arch/arm/mach-s5pv210/setup-fb-24bpp.c b/arch/arm/mach-s5pv210/setup-fb-24bpp.c
index e932ebfac56d..55103c8220b3 100644
--- a/arch/arm/mach-s5pv210/setup-fb-24bpp.c
+++ b/arch/arm/mach-s5pv210/setup-fb-24bpp.c
@@ -15,7 +15,6 @@
#include <linux/fb.h>
#include <linux/gpio.h>
-#include <mach/regs-fb.h>
#include <mach/map.h>
#include <plat/fb.h>
#include <mach/regs-clock.h>
diff --git a/arch/arm/mach-s5pv210/sleep.S b/arch/arm/mach-s5pv210/sleep.S
index a3d649466fb1..e3452ccd4b08 100644
--- a/arch/arm/mach-s5pv210/sleep.S
+++ b/arch/arm/mach-s5pv210/sleep.S
@@ -32,27 +32,6 @@
.text
- /* s3c_cpu_save
- *
- * entry:
- * r1 = v:p offset
- */
-
-ENTRY(s3c_cpu_save)
-
- stmfd sp!, { r3 - r12, lr }
- ldr r3, =resume_with_mmu
- bl cpu_suspend
-
- ldr r0, =pm_cpu_sleep
- ldr r0, [ r0 ]
- mov pc, r0
-
-resume_with_mmu:
- ldmfd sp!, { r3 - r12, pc }
-
- .ltorg
-
/* sleep magic, to allow the bootloader to check for an valid
* image to resume to. Must be the first word before the
* s3c_cpu_resume entry.
diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c
index 5778274a8260..26257df19b63 100644
--- a/arch/arm/mach-sa1100/assabet.c
+++ b/arch/arm/mach-sa1100/assabet.c
@@ -453,4 +453,7 @@ MACHINE_START(ASSABET, "Intel-Assabet")
.init_irq = sa1100_init_irq,
.timer = &sa1100_timer,
.init_machine = assabet_init,
+#ifdef CONFIG_SA1111
+ .dma_zone_size = SZ_1M,
+#endif
MACHINE_END
diff --git a/arch/arm/mach-sa1100/badge4.c b/arch/arm/mach-sa1100/badge4.c
index 4f19ff868b00..b4311b0a4395 100644
--- a/arch/arm/mach-sa1100/badge4.c
+++ b/arch/arm/mach-sa1100/badge4.c
@@ -306,4 +306,7 @@ MACHINE_START(BADGE4, "Hewlett-Packard Laboratories BadgePAD 4")
.map_io = badge4_map_io,
.init_irq = sa1100_init_irq,
.timer = &sa1100_timer,
+#ifdef CONFIG_SA1111
+ .dma_zone_size = SZ_1M,
+#endif
MACHINE_END
diff --git a/arch/arm/mach-sa1100/include/mach/memory.h b/arch/arm/mach-sa1100/include/mach/memory.h
index cff31ee246b7..12d376795abc 100644
--- a/arch/arm/mach-sa1100/include/mach/memory.h
+++ b/arch/arm/mach-sa1100/include/mach/memory.h
@@ -14,10 +14,6 @@
*/
#define PLAT_PHYS_OFFSET UL(0xc0000000)
-#ifdef CONFIG_SA1111
-#define ARM_DMA_ZONE_SIZE SZ_1M
-#endif
-
/*
* Because of the wide memory address space between physical RAM banks on the
* SA1100, it's much convenient to use Linux's SparseMEM support to implement
diff --git a/arch/arm/mach-sa1100/jornada720.c b/arch/arm/mach-sa1100/jornada720.c
index 491ac9f20fb4..176c066aec7e 100644
--- a/arch/arm/mach-sa1100/jornada720.c
+++ b/arch/arm/mach-sa1100/jornada720.c
@@ -369,4 +369,7 @@ MACHINE_START(JORNADA720, "HP Jornada 720")
.init_irq = sa1100_init_irq,
.timer = &sa1100_timer,
.init_machine = jornada720_mach_init,
+#ifdef CONFIG_SA1111
+ .dma_zone_size = SZ_1M,
+#endif
MACHINE_END
diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
index c4661aab22fb..bf85b8b259d5 100644
--- a/arch/arm/mach-sa1100/pm.c
+++ b/arch/arm/mach-sa1100/pm.c
@@ -29,10 +29,11 @@
#include <mach/hardware.h>
#include <asm/memory.h>
+#include <asm/suspend.h>
#include <asm/system.h>
#include <asm/mach/time.h>
-extern void sa1100_cpu_suspend(long);
+extern int sa1100_finish_suspend(unsigned long);
#define SAVE(x) sleep_save[SLEEP_SAVE_##x] = x
#define RESTORE(x) x = sleep_save[SLEEP_SAVE_##x]
@@ -75,9 +76,7 @@ static int sa11x0_pm_enter(suspend_state_t state)
PSPR = virt_to_phys(cpu_resume);
/* go zzz */
- sa1100_cpu_suspend(PLAT_PHYS_OFFSET - PAGE_OFFSET);
-
- cpu_init();
+ cpu_suspend(0, sa1100_finish_suspend);
/*
* Ensure not to come back here if it wasn't intended
diff --git a/arch/arm/mach-sa1100/sleep.S b/arch/arm/mach-sa1100/sleep.S
index 04f2a618d4ef..e8223315b442 100644
--- a/arch/arm/mach-sa1100/sleep.S
+++ b/arch/arm/mach-sa1100/sleep.S
@@ -22,18 +22,13 @@
.text
/*
- * sa1100_cpu_suspend()
+ * sa1100_finish_suspend()
*
* Causes sa11x0 to enter sleep state
*
*/
-ENTRY(sa1100_cpu_suspend)
- stmfd sp!, {r4 - r12, lr} @ save registers on stack
- mov r1, r0
- ldr r3, =sa1100_cpu_resume @ return function
- bl cpu_suspend
-
+ENTRY(sa1100_finish_suspend)
@ disable clock switching
mcr p15, 0, r1, c15, c2, 2
@@ -139,13 +134,3 @@ sa1110_sdram_controller_fix:
str r13, [r12]
20: b 20b @ loop waiting for sleep
-
-/*
- * cpu_sa1100_resume()
- *
- * entry point from bootloader into kernel during resume
- */
- .align 5
-sa1100_cpu_resume:
- mcr p15, 0, r1, c15, c1, 2 @ enable clock switching
- ldmfd sp!, {r4 - r12, pc} @ return to caller
diff --git a/arch/arm/mach-shark/core.c b/arch/arm/mach-shark/core.c
index 5cf7f94c1f31..ac2873c8014b 100644
--- a/arch/arm/mach-shark/core.c
+++ b/arch/arm/mach-shark/core.c
@@ -156,4 +156,5 @@ MACHINE_START(SHARK, "Shark")
.map_io = shark_map_io,
.init_irq = shark_init_irq,
.timer = &shark_timer,
+ .dma_zone_size = SZ_4M,
MACHINE_END
diff --git a/arch/arm/mach-shark/include/mach/entry-macro.S b/arch/arm/mach-shark/include/mach/entry-macro.S
index e2853c0a3333..0bb6cc626eb7 100644
--- a/arch/arm/mach-shark/include/mach/entry-macro.S
+++ b/arch/arm/mach-shark/include/mach/entry-macro.S
@@ -11,17 +11,17 @@
.endm
.macro get_irqnr_preamble, base, tmp
+ mov \base, #0xe0000000
.endm
.macro arch_ret_to_user, tmp1, tmp2
.endm
.macro get_irqnr_and_base, irqnr, irqstat, base, tmp
- mov r4, #0xe0000000
mov \irqstat, #0x0C
- strb \irqstat, [r4, #0x20] @outb(0x0C, 0x20) /* Poll command */
- ldrb \irqnr, [r4, #0x20] @irq = inb(0x20) & 7
+ strb \irqstat, [\base, #0x20] @outb(0x0C, 0x20) /* Poll command */
+ ldrb \irqnr, [\base, #0x20] @irq = inb(0x20) & 7
and \irqstat, \irqnr, #0x80
teq \irqstat, #0
beq 43f
@@ -29,8 +29,8 @@
teq \irqnr, #2
bne 44f
43: mov \irqstat, #0x0C
- strb \irqstat, [r4, #0xa0] @outb(0x0C, 0xA0) /* Poll command */
- ldrb \irqnr, [r4, #0xa0] @irq = (inb(0xA0) & 7) + 8
+ strb \irqstat, [\base, #0xa0] @outb(0x0C, 0xA0) /* Poll command */
+ ldrb \irqnr, [\base, #0xa0] @irq = (inb(0xA0) & 7) + 8
and \irqstat, \irqnr, #0x80
teq \irqstat, #0
beq 44f
diff --git a/arch/arm/mach-shark/include/mach/memory.h b/arch/arm/mach-shark/include/mach/memory.h
index 4c0831f83b0c..1cf8d6962617 100644
--- a/arch/arm/mach-shark/include/mach/memory.h
+++ b/arch/arm/mach-shark/include/mach/memory.h
@@ -17,8 +17,6 @@
*/
#define PLAT_PHYS_OFFSET UL(0x08000000)
-#define ARM_DMA_ZONE_SIZE SZ_4M
-
/*
* Cache flushing area
*/
diff --git a/arch/arm/mach-shmobile/include/mach/sdhi-sh7372.h b/arch/arm/mach-shmobile/include/mach/sdhi-sh7372.h
new file mode 100644
index 000000000000..4a81b01f1e8f
--- /dev/null
+++ b/arch/arm/mach-shmobile/include/mach/sdhi-sh7372.h
@@ -0,0 +1,21 @@
+#ifndef SDHI_SH7372_H
+#define SDHI_SH7372_H
+
+#define SDGENCNTA 0xfe40009c
+
+/* The countdown of SDGENCNTA is controlled by
+ * ZB3D2CLK which runs at 149.5MHz.
+ * That is 149.5ticks/us. Approximate this as 150ticks/us.
+ */
+static void udelay(int us)
+{
+ __raw_writel(us * 150, SDGENCNTA);
+ while(__raw_readl(SDGENCNTA)) ;
+}
+
+static void msleep(int ms)
+{
+ udelay(ms * 1000);
+}
+
+#endif
diff --git a/arch/arm/mach-shmobile/include/mach/sdhi.h b/arch/arm/mach-shmobile/include/mach/sdhi.h
new file mode 100644
index 000000000000..0ec9e69f2c3b
--- /dev/null
+++ b/arch/arm/mach-shmobile/include/mach/sdhi.h
@@ -0,0 +1,16 @@
+#ifndef SDHI_H
+#define SDHI_H
+
+/**************************************************
+ *
+ * CPU specific settings
+ *
+ **************************************************/
+
+#ifdef CONFIG_ARCH_SH7372
+#include "mach/sdhi-sh7372.h"
+#else
+#error "unsupported CPU."
+#endif
+
+#endif /* SDHI_H */
diff --git a/arch/arm/mach-shmobile/platsmp.c b/arch/arm/mach-shmobile/platsmp.c
index f3888feb1c68..66f980625a33 100644
--- a/arch/arm/mach-shmobile/platsmp.c
+++ b/arch/arm/mach-shmobile/platsmp.c
@@ -64,10 +64,5 @@ void __init smp_init_cpus(void)
void __init platform_smp_prepare_cpus(unsigned int max_cpus)
{
- int i;
-
- for (i = 0; i < max_cpus; i++)
- set_cpu_present(i, true);
-
shmobile_smp_prepare_cpus();
}
diff --git a/arch/arm/mach-tegra/board-seaboard.c b/arch/arm/mach-tegra/board-seaboard.c
index a8d7ace9f958..10fbbdc8699a 100644
--- a/arch/arm/mach-tegra/board-seaboard.c
+++ b/arch/arm/mach-tegra/board-seaboard.c
@@ -159,7 +159,7 @@ static void __init seaboard_i2c_init(void)
i2c_register_board_info(0, &isl29018_device, 1);
- i2c_register_board_info(4, &adt7461_device, 1);
+ i2c_register_board_info(3, &adt7461_device, 1);
tegra_i2c_device1.dev.platform_data = &seaboard_i2c1_platform_data;
tegra_i2c_device2.dev.platform_data = &seaboard_i2c2_platform_data;
diff --git a/arch/arm/mach-tegra/board-trimslice-pinmux.c b/arch/arm/mach-tegra/board-trimslice-pinmux.c
index 13534fa08abf..d9dc5d297edd 100644
--- a/arch/arm/mach-tegra/board-trimslice-pinmux.c
+++ b/arch/arm/mach-tegra/board-trimslice-pinmux.c
@@ -35,7 +35,7 @@ static __initdata struct tegra_pingroup_config trimslice_pinmux[] = {
{TEGRA_PINGROUP_CSUS, TEGRA_MUX_VI_SENSOR_CLK, TEGRA_PUPD_PULL_DOWN, TEGRA_TRI_TRISTATE},
{TEGRA_PINGROUP_DAP1, TEGRA_MUX_DAP1, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_DAP2, TEGRA_MUX_DAP2, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
- {TEGRA_PINGROUP_DAP3, TEGRA_MUX_DAP3, TEGRA_PUPD_NORMAL, TEGRA_TRI_NORMAL},
+ {TEGRA_PINGROUP_DAP3, TEGRA_MUX_DAP3, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
{TEGRA_PINGROUP_DAP4, TEGRA_MUX_DAP4, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
{TEGRA_PINGROUP_DDC, TEGRA_MUX_I2C2, TEGRA_PUPD_PULL_UP, TEGRA_TRI_NORMAL},
{TEGRA_PINGROUP_DTA, TEGRA_MUX_VI, TEGRA_PUPD_NORMAL, TEGRA_TRI_TRISTATE},
diff --git a/arch/arm/mach-tegra/platsmp.c b/arch/arm/mach-tegra/platsmp.c
index b8ae3c978dee..1a594dce8fbc 100644
--- a/arch/arm/mach-tegra/platsmp.c
+++ b/arch/arm/mach-tegra/platsmp.c
@@ -129,14 +129,6 @@ void __init smp_init_cpus(void)
void __init platform_smp_prepare_cpus(unsigned int max_cpus)
{
- int i;
-
- /*
- * Initialise the present map, which describes the set of CPUs
- * actually populated at the present time.
- */
- for (i = 0; i < max_cpus; i++)
- set_cpu_present(i, true);
scu_enable(scu_base);
}
diff --git a/arch/arm/mach-ux500/platsmp.c b/arch/arm/mach-ux500/platsmp.c
index 0c527fe2cebb..a33df5f4c27a 100644
--- a/arch/arm/mach-ux500/platsmp.c
+++ b/arch/arm/mach-ux500/platsmp.c
@@ -172,14 +172,6 @@ void __init smp_init_cpus(void)
void __init platform_smp_prepare_cpus(unsigned int max_cpus)
{
- int i;
-
- /*
- * Initialise the present map, which describes the set of CPUs
- * actually populated at the present time.
- */
- for (i = 0; i < max_cpus; i++)
- set_cpu_present(i, true);
scu_enable(scu_base_addr());
wakeup_secondary();
diff --git a/arch/arm/mach-vexpress/ct-ca9x4.c b/arch/arm/mach-vexpress/ct-ca9x4.c
index 765a71ff7f3b..bfd32f52c2db 100644
--- a/arch/arm/mach-vexpress/ct-ca9x4.c
+++ b/arch/arm/mach-vexpress/ct-ca9x4.c
@@ -229,10 +229,6 @@ static void ct_ca9x4_init_cpu_map(void)
static void ct_ca9x4_smp_enable(unsigned int max_cpus)
{
- int i;
- for (i = 0; i < max_cpus; i++)
- set_cpu_present(i, true);
-
scu_enable(MMIO_P2V(A9_MPCORE_SCU));
}
#endif
diff --git a/arch/arm/mm/abort-ev4.S b/arch/arm/mm/abort-ev4.S
index 4f18f9e87bae..54473cd4aba9 100644
--- a/arch/arm/mm/abort-ev4.S
+++ b/arch/arm/mm/abort-ev4.S
@@ -3,14 +3,11 @@
/*
* Function: v4_early_abort
*
- * Params : r2 = address of aborted instruction
- * : r3 = saved SPSR
+ * Params : r2 = pt_regs
+ * : r4 = aborted context pc
+ * : r5 = aborted context psr
*
- * Returns : r0 = address of abort
- * : r1 = FSR, bit 11 = write
- * : r2-r8 = corrupted
- * : r9 = preserved
- * : sp = pointer to registers
+ * Returns : r4 - r11, r13 preserved
*
* Purpose : obtain information about current aborted instruction.
* Note: we read user space. This means we might cause a data
@@ -21,10 +18,8 @@
ENTRY(v4_early_abort)
mrc p15, 0, r1, c5, c0, 0 @ get FSR
mrc p15, 0, r0, c6, c0, 0 @ get FAR
- ldr r3, [r2] @ read aborted ARM instruction
+ ldr r3, [r4] @ read aborted ARM instruction
bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR
tst r3, #1 << 20 @ L = 1 -> write?
orreq r1, r1, #1 << 11 @ yes.
- mov pc, lr
-
-
+ b do_DataAbort
diff --git a/arch/arm/mm/abort-ev4t.S b/arch/arm/mm/abort-ev4t.S
index b6282548f922..9da704e7b86e 100644
--- a/arch/arm/mm/abort-ev4t.S
+++ b/arch/arm/mm/abort-ev4t.S
@@ -4,14 +4,11 @@
/*
* Function: v4t_early_abort
*
- * Params : r2 = address of aborted instruction
- * : r3 = saved SPSR
+ * Params : r2 = pt_regs
+ * : r4 = aborted context pc
+ * : r5 = aborted context psr
*
- * Returns : r0 = address of abort
- * : r1 = FSR, bit 11 = write
- * : r2-r8 = corrupted
- * : r9 = preserved
- * : sp = pointer to registers
+ * Returns : r4 - r11, r13 preserved
*
* Purpose : obtain information about current aborted instruction.
* Note: we read user space. This means we might cause a data
@@ -22,9 +19,9 @@
ENTRY(v4t_early_abort)
mrc p15, 0, r1, c5, c0, 0 @ get FSR
mrc p15, 0, r0, c6, c0, 0 @ get FAR
- do_thumb_abort
- ldreq r3, [r2] @ read aborted ARM instruction
+ do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
+ ldreq r3, [r4] @ read aborted ARM instruction
bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR
tst r3, #1 << 20 @ check write
orreq r1, r1, #1 << 11
- mov pc, lr
+ b do_DataAbort
diff --git a/arch/arm/mm/abort-ev5t.S b/arch/arm/mm/abort-ev5t.S
index 02251b526c0d..a0908d4653a3 100644
--- a/arch/arm/mm/abort-ev5t.S
+++ b/arch/arm/mm/abort-ev5t.S
@@ -4,14 +4,11 @@
/*
* Function: v5t_early_abort
*
- * Params : r2 = address of aborted instruction
- * : r3 = saved SPSR
+ * Params : r2 = pt_regs
+ * : r4 = aborted context pc
+ * : r5 = aborted context psr
*
- * Returns : r0 = address of abort
- * : r1 = FSR, bit 11 = write
- * : r2-r8 = corrupted
- * : r9 = preserved
- * : sp = pointer to registers
+ * Returns : r4 - r11, r13 preserved
*
* Purpose : obtain information about current aborted instruction.
* Note: we read user space. This means we might cause a data
@@ -22,10 +19,10 @@
ENTRY(v5t_early_abort)
mrc p15, 0, r1, c5, c0, 0 @ get FSR
mrc p15, 0, r0, c6, c0, 0 @ get FAR
- do_thumb_abort
- ldreq r3, [r2] @ read aborted ARM instruction
+ do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
+ ldreq r3, [r4] @ read aborted ARM instruction
bic r1, r1, #1 << 11 @ clear bits 11 of FSR
- do_ldrd_abort
+ do_ldrd_abort tmp=ip, insn=r3
tst r3, #1 << 20 @ check write
orreq r1, r1, #1 << 11
- mov pc, lr
+ b do_DataAbort
diff --git a/arch/arm/mm/abort-ev5tj.S b/arch/arm/mm/abort-ev5tj.S
index bce68d601c8b..4006b7a61264 100644
--- a/arch/arm/mm/abort-ev5tj.S
+++ b/arch/arm/mm/abort-ev5tj.S
@@ -4,14 +4,11 @@
/*
* Function: v5tj_early_abort
*
- * Params : r2 = address of aborted instruction
- * : r3 = saved SPSR
+ * Params : r2 = pt_regs
+ * : r4 = aborted context pc
+ * : r5 = aborted context psr
*
- * Returns : r0 = address of abort
- * : r1 = FSR, bit 11 = write
- * : r2-r8 = corrupted
- * : r9 = preserved
- * : sp = pointer to registers
+ * Returns : r4 - r11, r13 preserved
*
* Purpose : obtain information about current aborted instruction.
* Note: we read user space. This means we might cause a data
@@ -23,13 +20,11 @@ ENTRY(v5tj_early_abort)
mrc p15, 0, r1, c5, c0, 0 @ get FSR
mrc p15, 0, r0, c6, c0, 0 @ get FAR
bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR
- tst r3, #PSR_J_BIT @ Java?
- movne pc, lr
- do_thumb_abort
- ldreq r3, [r2] @ read aborted ARM instruction
- do_ldrd_abort
+ tst r5, #PSR_J_BIT @ Java?
+ bne do_DataAbort
+ do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
+ ldreq r3, [r4] @ read aborted ARM instruction
+ do_ldrd_abort tmp=ip, insn=r3
tst r3, #1 << 20 @ L = 0 -> write
orreq r1, r1, #1 << 11 @ yes.
- mov pc, lr
-
-
+ b do_DataAbort
diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S
index 1478aa522144..ff1f7cc11f87 100644
--- a/arch/arm/mm/abort-ev6.S
+++ b/arch/arm/mm/abort-ev6.S
@@ -4,14 +4,11 @@
/*
* Function: v6_early_abort
*
- * Params : r2 = address of aborted instruction
- * : r3 = saved SPSR
+ * Params : r2 = pt_regs
+ * : r4 = aborted context pc
+ * : r5 = aborted context psr
*
- * Returns : r0 = address of abort
- * : r1 = FSR, bit 11 = write
- * : r2-r8 = corrupted
- * : r9 = preserved
- * : sp = pointer to registers
+ * Returns : r4 - r11, r13 preserved
*
* Purpose : obtain information about current aborted instruction.
* Note: we read user space. This means we might cause a data
@@ -33,16 +30,14 @@ ENTRY(v6_early_abort)
* The test below covers all the write situations, including Java bytecodes
*/
bic r1, r1, #1 << 11 @ clear bit 11 of FSR
- tst r3, #PSR_J_BIT @ Java?
- movne pc, lr
- do_thumb_abort
- ldreq r3, [r2] @ read aborted ARM instruction
+ tst r5, #PSR_J_BIT @ Java?
+ bne do_DataAbort
+ do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
+ ldreq r3, [r4] @ read aborted ARM instruction
#ifdef CONFIG_CPU_ENDIAN_BE8
reveq r3, r3
#endif
- do_ldrd_abort
+ do_ldrd_abort tmp=ip, insn=r3
tst r3, #1 << 20 @ L = 0 -> write
orreq r1, r1, #1 << 11 @ yes.
- mov pc, lr
-
-
+ b do_DataAbort
diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S
index ec88b157d3bb..703375277ba6 100644
--- a/arch/arm/mm/abort-ev7.S
+++ b/arch/arm/mm/abort-ev7.S
@@ -3,14 +3,11 @@
/*
* Function: v7_early_abort
*
- * Params : r2 = address of aborted instruction
- * : r3 = saved SPSR
+ * Params : r2 = pt_regs
+ * : r4 = aborted context pc
+ * : r5 = aborted context psr
*
- * Returns : r0 = address of abort
- * : r1 = FSR, bit 11 = write
- * : r2-r8 = corrupted
- * : r9 = preserved
- * : sp = pointer to registers
+ * Returns : r4 - r11, r13 preserved
*
* Purpose : obtain information about current aborted instruction.
*/
@@ -37,18 +34,18 @@ ENTRY(v7_early_abort)
ldr r3, =0x40d @ On permission fault
and r3, r1, r3
cmp r3, #0x0d
- movne pc, lr
+ bne do_DataAbort
mcr p15, 0, r0, c7, c8, 0 @ Retranslate FAR
isb
- mrc p15, 0, r2, c7, c4, 0 @ Read the PAR
- and r3, r2, #0x7b @ On translation fault
+ mrc p15, 0, ip, c7, c4, 0 @ Read the PAR
+ and r3, ip, #0x7b @ On translation fault
cmp r3, #0x0b
- movne pc, lr
+ bne do_DataAbort
bic r1, r1, #0xf @ Fix up FSR FS[5:0]
- and r2, r2, #0x7e
- orr r1, r1, r2, LSR #1
+ and ip, ip, #0x7e
+ orr r1, r1, ip, LSR #1
#endif
- mov pc, lr
+ b do_DataAbort
ENDPROC(v7_early_abort)
diff --git a/arch/arm/mm/abort-lv4t.S b/arch/arm/mm/abort-lv4t.S
index 9fb7b0e25ea1..f3982580c273 100644
--- a/arch/arm/mm/abort-lv4t.S
+++ b/arch/arm/mm/abort-lv4t.S
@@ -3,14 +3,11 @@
/*
* Function: v4t_late_abort
*
- * Params : r2 = address of aborted instruction
- * : r3 = saved SPSR
+ * Params : r2 = pt_regs
+ * : r4 = aborted context pc
+ * : r5 = aborted context psr
*
- * Returns : r0 = address of abort
- * : r1 = FSR, bit 11 = write
- * : r2-r8 = corrupted
- * : r9 = preserved
- * : sp = pointer to registers
+ * Returns : r4-r5, r10-r11, r13 preserved
*
* Purpose : obtain information about current aborted instruction.
* Note: we read user space. This means we might cause a data
@@ -18,7 +15,7 @@
* picture. Unfortunately, this does happen. We live with it.
*/
ENTRY(v4t_late_abort)
- tst r3, #PSR_T_BIT @ check for thumb mode
+ tst r5, #PSR_T_BIT @ check for thumb mode
#ifdef CONFIG_CPU_CP15_MMU
mrc p15, 0, r1, c5, c0, 0 @ get FSR
mrc p15, 0, r0, c6, c0, 0 @ get FAR
@@ -28,7 +25,7 @@ ENTRY(v4t_late_abort)
mov r1, #0
#endif
bne .data_thumb_abort
- ldr r8, [r2] @ read arm instruction
+ ldr r8, [r4] @ read arm instruction
tst r8, #1 << 20 @ L = 1 -> write?
orreq r1, r1, #1 << 11 @ yes.
and r7, r8, #15 << 24
@@ -47,86 +44,84 @@ ENTRY(v4t_late_abort)
/* 9 */ b .data_arm_ldmstm @ ldm*b rn, <rlist>
/* a */ b .data_unknown
/* b */ b .data_unknown
-/* c */ mov pc, lr @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m
-/* d */ mov pc, lr @ ldc rd, [rn, #m]
+/* c */ b do_DataAbort @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m
+/* d */ b do_DataAbort @ ldc rd, [rn, #m]
/* e */ b .data_unknown
/* f */
.data_unknown: @ Part of jumptable
- mov r0, r2
+ mov r0, r4
mov r1, r8
- mov r2, sp
- bl baddataabort
- b ret_from_exception
+ b baddataabort
.data_arm_ldmstm:
tst r8, #1 << 21 @ check writeback bit
- moveq pc, lr @ no writeback -> no fixup
+ beq do_DataAbort @ no writeback -> no fixup
mov r7, #0x11
orr r7, r7, #0x1100
and r6, r8, r7
- and r2, r8, r7, lsl #1
- add r6, r6, r2, lsr #1
- and r2, r8, r7, lsl #2
- add r6, r6, r2, lsr #2
- and r2, r8, r7, lsl #3
- add r6, r6, r2, lsr #3
+ and r9, r8, r7, lsl #1
+ add r6, r6, r9, lsr #1
+ and r9, r8, r7, lsl #2
+ add r6, r6, r9, lsr #2
+ and r9, r8, r7, lsl #3
+ add r6, r6, r9, lsr #3
add r6, r6, r6, lsr #8
add r6, r6, r6, lsr #4
and r6, r6, #15 @ r6 = no. of registers to transfer.
- and r5, r8, #15 << 16 @ Extract 'n' from instruction
- ldr r7, [sp, r5, lsr #14] @ Get register 'Rn'
+ and r9, r8, #15 << 16 @ Extract 'n' from instruction
+ ldr r7, [r2, r9, lsr #14] @ Get register 'Rn'
tst r8, #1 << 23 @ Check U bit
subne r7, r7, r6, lsl #2 @ Undo increment
addeq r7, r7, r6, lsl #2 @ Undo decrement
- str r7, [sp, r5, lsr #14] @ Put register 'Rn'
- mov pc, lr
+ str r7, [r2, r9, lsr #14] @ Put register 'Rn'
+ b do_DataAbort
.data_arm_lateldrhpre:
tst r8, #1 << 21 @ Check writeback bit
- moveq pc, lr @ No writeback -> no fixup
+ beq do_DataAbort @ No writeback -> no fixup
.data_arm_lateldrhpost:
- and r5, r8, #0x00f @ get Rm / low nibble of immediate value
+ and r9, r8, #0x00f @ get Rm / low nibble of immediate value
tst r8, #1 << 22 @ if (immediate offset)
andne r6, r8, #0xf00 @ { immediate high nibble
- orrne r6, r5, r6, lsr #4 @ combine nibbles } else
- ldreq r6, [sp, r5, lsl #2] @ { load Rm value }
+ orrne r6, r9, r6, lsr #4 @ combine nibbles } else
+ ldreq r6, [r2, r9, lsl #2] @ { load Rm value }
.data_arm_apply_r6_and_rn:
- and r5, r8, #15 << 16 @ Extract 'n' from instruction
- ldr r7, [sp, r5, lsr #14] @ Get register 'Rn'
+ and r9, r8, #15 << 16 @ Extract 'n' from instruction
+ ldr r7, [r2, r9, lsr #14] @ Get register 'Rn'
tst r8, #1 << 23 @ Check U bit
subne r7, r7, r6 @ Undo incrmenet
addeq r7, r7, r6 @ Undo decrement
- str r7, [sp, r5, lsr #14] @ Put register 'Rn'
- mov pc, lr
+ str r7, [r2, r9, lsr #14] @ Put register 'Rn'
+ b do_DataAbort
.data_arm_lateldrpreconst:
tst r8, #1 << 21 @ check writeback bit
- moveq pc, lr @ no writeback -> no fixup
+ beq do_DataAbort @ no writeback -> no fixup
.data_arm_lateldrpostconst:
- movs r2, r8, lsl #20 @ Get offset
- moveq pc, lr @ zero -> no fixup
- and r5, r8, #15 << 16 @ Extract 'n' from instruction
- ldr r7, [sp, r5, lsr #14] @ Get register 'Rn'
+ movs r6, r8, lsl #20 @ Get offset
+ beq do_DataAbort @ zero -> no fixup
+ and r9, r8, #15 << 16 @ Extract 'n' from instruction
+ ldr r7, [r2, r9, lsr #14] @ Get register 'Rn'
tst r8, #1 << 23 @ Check U bit
- subne r7, r7, r2, lsr #20 @ Undo increment
- addeq r7, r7, r2, lsr #20 @ Undo decrement
- str r7, [sp, r5, lsr #14] @ Put register 'Rn'
- mov pc, lr
+ subne r7, r7, r6, lsr #20 @ Undo increment
+ addeq r7, r7, r6, lsr #20 @ Undo decrement
+ str r7, [r2, r9, lsr #14] @ Put register 'Rn'
+ b do_DataAbort
.data_arm_lateldrprereg:
tst r8, #1 << 21 @ check writeback bit
- moveq pc, lr @ no writeback -> no fixup
+ beq do_DataAbort @ no writeback -> no fixup
.data_arm_lateldrpostreg:
and r7, r8, #15 @ Extract 'm' from instruction
- ldr r6, [sp, r7, lsl #2] @ Get register 'Rm'
- mov r5, r8, lsr #7 @ get shift count
- ands r5, r5, #31
+ ldr r6, [r2, r7, lsl #2] @ Get register 'Rm'
+ mov r9, r8, lsr #7 @ get shift count
+ ands r9, r9, #31
and r7, r8, #0x70 @ get shift type
orreq r7, r7, #8 @ shift count = 0
add pc, pc, r7
nop
- mov r6, r6, lsl r5 @ 0: LSL #!0
+ mov r6, r6, lsl r9 @ 0: LSL #!0
b .data_arm_apply_r6_and_rn
b .data_arm_apply_r6_and_rn @ 1: LSL #0
nop
@@ -134,7 +129,7 @@ ENTRY(v4t_late_abort)
nop
b .data_unknown @ 3: MUL?
nop
- mov r6, r6, lsr r5 @ 4: LSR #!0
+ mov r6, r6, lsr r9 @ 4: LSR #!0
b .data_arm_apply_r6_and_rn
mov r6, r6, lsr #32 @ 5: LSR #32
b .data_arm_apply_r6_and_rn
@@ -142,7 +137,7 @@ ENTRY(v4t_late_abort)
nop
b .data_unknown @ 7: MUL?
nop
- mov r6, r6, asr r5 @ 8: ASR #!0
+ mov r6, r6, asr r9 @ 8: ASR #!0
b .data_arm_apply_r6_and_rn
mov r6, r6, asr #32 @ 9: ASR #32
b .data_arm_apply_r6_and_rn
@@ -150,7 +145,7 @@ ENTRY(v4t_late_abort)
nop
b .data_unknown @ B: MUL?
nop
- mov r6, r6, ror r5 @ C: ROR #!0
+ mov r6, r6, ror r9 @ C: ROR #!0
b .data_arm_apply_r6_and_rn
mov r6, r6, rrx @ D: RRX
b .data_arm_apply_r6_and_rn
@@ -159,7 +154,7 @@ ENTRY(v4t_late_abort)
b .data_unknown @ F: MUL?
.data_thumb_abort:
- ldrh r8, [r2] @ read instruction
+ ldrh r8, [r4] @ read instruction
tst r8, #1 << 11 @ L = 1 -> write?
orreq r1, r1, #1 << 8 @ yes
and r7, r8, #15 << 12
@@ -172,10 +167,10 @@ ENTRY(v4t_late_abort)
/* 3 */ b .data_unknown
/* 4 */ b .data_unknown
/* 5 */ b .data_thumb_reg
-/* 6 */ mov pc, lr
-/* 7 */ mov pc, lr
-/* 8 */ mov pc, lr
-/* 9 */ mov pc, lr
+/* 6 */ b do_DataAbort
+/* 7 */ b do_DataAbort
+/* 8 */ b do_DataAbort
+/* 9 */ b do_DataAbort
/* A */ b .data_unknown
/* B */ b .data_thumb_pushpop
/* C */ b .data_thumb_ldmstm
@@ -185,41 +180,41 @@ ENTRY(v4t_late_abort)
.data_thumb_reg:
tst r8, #1 << 9
- moveq pc, lr
+ beq do_DataAbort
tst r8, #1 << 10 @ If 'S' (signed) bit is set
movne r1, #0 @ it must be a load instr
- mov pc, lr
+ b do_DataAbort
.data_thumb_pushpop:
tst r8, #1 << 10
beq .data_unknown
and r6, r8, #0x55 @ hweight8(r8) + R bit
- and r2, r8, #0xaa
- add r6, r6, r2, lsr #1
- and r2, r6, #0xcc
+ and r9, r8, #0xaa
+ add r6, r6, r9, lsr #1
+ and r9, r6, #0xcc
and r6, r6, #0x33
- add r6, r6, r2, lsr #2
+ add r6, r6, r9, lsr #2
movs r7, r8, lsr #9 @ C = r8 bit 8 (R bit)
adc r6, r6, r6, lsr #4 @ high + low nibble + R bit
and r6, r6, #15 @ number of regs to transfer
- ldr r7, [sp, #13 << 2]
+ ldr r7, [r2, #13 << 2]
tst r8, #1 << 11
addeq r7, r7, r6, lsl #2 @ increment SP if PUSH
subne r7, r7, r6, lsl #2 @ decrement SP if POP
- str r7, [sp, #13 << 2]
- mov pc, lr
+ str r7, [r2, #13 << 2]
+ b do_DataAbort
.data_thumb_ldmstm:
and r6, r8, #0x55 @ hweight8(r8)
- and r2, r8, #0xaa
- add r6, r6, r2, lsr #1
- and r2, r6, #0xcc
+ and r9, r8, #0xaa
+ add r6, r6, r9, lsr #1
+ and r9, r6, #0xcc
and r6, r6, #0x33
- add r6, r6, r2, lsr #2
+ add r6, r6, r9, lsr #2
add r6, r6, r6, lsr #4
- and r5, r8, #7 << 8
- ldr r7, [sp, r5, lsr #6]
+ and r9, r8, #7 << 8
+ ldr r7, [r2, r9, lsr #6]
and r6, r6, #15 @ number of regs to transfer
sub r7, r7, r6, lsl #2 @ always decrement
- str r7, [sp, r5, lsr #6]
- mov pc, lr
+ str r7, [r2, r9, lsr #6]
+ b do_DataAbort
diff --git a/arch/arm/mm/abort-macro.S b/arch/arm/mm/abort-macro.S
index d7cb1bfa51a4..52162d59407a 100644
--- a/arch/arm/mm/abort-macro.S
+++ b/arch/arm/mm/abort-macro.S
@@ -9,34 +9,32 @@
*
*/
- .macro do_thumb_abort
- tst r3, #PSR_T_BIT
+ .macro do_thumb_abort, fsr, pc, psr, tmp
+ tst \psr, #PSR_T_BIT
beq not_thumb
- ldrh r3, [r2] @ Read aborted Thumb instruction
- and r3, r3, # 0xfe00 @ Mask opcode field
- cmp r3, # 0x5600 @ Is it ldrsb?
- orreq r3, r3, #1 << 11 @ Set L-bit if yes
- tst r3, #1 << 11 @ L = 0 -> write
- orreq r1, r1, #1 << 11 @ yes.
- mov pc, lr
+ ldrh \tmp, [\pc] @ Read aborted Thumb instruction
+ and \tmp, \tmp, # 0xfe00 @ Mask opcode field
+ cmp \tmp, # 0x5600 @ Is it ldrsb?
+ orreq \tmp, \tmp, #1 << 11 @ Set L-bit if yes
+ tst \tmp, #1 << 11 @ L = 0 -> write
+ orreq \psr, \psr, #1 << 11 @ yes.
+ b do_DataAbort
not_thumb:
.endm
/*
- * We check for the following insturction encoding for LDRD.
+ * We check for the following instruction encoding for LDRD.
*
- * [27:25] == 0
+ * [27:25] == 000
* [7:4] == 1101
* [20] == 0
*/
- .macro do_ldrd_abort
- tst r3, #0x0e000000 @ [27:25] == 0
+ .macro do_ldrd_abort, tmp, insn
+ tst \insn, #0x0e100000 @ [27:25,20] == 0
bne not_ldrd
- and r2, r3, #0x000000f0 @ [7:4] == 1101
- cmp r2, #0x000000d0
- bne not_ldrd
- tst r3, #1 << 20 @ [20] == 0
- moveq pc, lr
+ and \tmp, \insn, #0x000000f0 @ [7:4] == 1101
+ cmp \tmp, #0x000000d0
+ beq do_DataAbort
not_ldrd:
.endm
diff --git a/arch/arm/mm/abort-nommu.S b/arch/arm/mm/abort-nommu.S
index 625e580945b5..119cb479c2ab 100644
--- a/arch/arm/mm/abort-nommu.S
+++ b/arch/arm/mm/abort-nommu.S
@@ -3,11 +3,11 @@
/*
* Function: nommu_early_abort
*
- * Params : r2 = address of aborted instruction
- * : r3 = saved SPSR
+ * Params : r2 = pt_regs
+ * : r4 = aborted context pc
+ * : r5 = aborted context psr
*
- * Returns : r0 = 0 (abort address)
- * : r1 = 0 (FSR)
+ * Returns : r4 - r11, r13 preserved
*
* Note: There is no FSR/FAR on !CPU_CP15_MMU cores.
* Just fill zero into the registers.
@@ -16,5 +16,5 @@
ENTRY(nommu_early_abort)
mov r0, #0 @ clear r0, r1 (no FSR/FAR)
mov r1, #0
- mov pc, lr
+ b do_DataAbort
ENDPROC(nommu_early_abort)
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index 724ba3bce72c..be7c638b648b 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -727,6 +727,9 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
int isize = 4;
int thumb2_32b = 0;
+ if (interrupts_enabled(regs))
+ local_irq_enable();
+
instrptr = instruction_pointer(regs);
fs = get_fs();
diff --git a/arch/arm/mm/cache-fa.S b/arch/arm/mm/cache-fa.S
index 1fa6f71470de..072016371093 100644
--- a/arch/arm/mm/cache-fa.S
+++ b/arch/arm/mm/cache-fa.S
@@ -242,16 +242,5 @@ ENDPROC(fa_dma_unmap_area)
__INITDATA
- .type fa_cache_fns, #object
-ENTRY(fa_cache_fns)
- .long fa_flush_icache_all
- .long fa_flush_kern_cache_all
- .long fa_flush_user_cache_all
- .long fa_flush_user_cache_range
- .long fa_coherent_kern_range
- .long fa_coherent_user_range
- .long fa_flush_kern_dcache_area
- .long fa_dma_map_area
- .long fa_dma_unmap_area
- .long fa_dma_flush_range
- .size fa_cache_fns, . - fa_cache_fns
+ @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
+ define_cache_functions fa
diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S
index 2e2bc406a18d..c2301f226100 100644
--- a/arch/arm/mm/cache-v3.S
+++ b/arch/arm/mm/cache-v3.S
@@ -129,16 +129,5 @@ ENDPROC(v3_dma_map_area)
__INITDATA
- .type v3_cache_fns, #object
-ENTRY(v3_cache_fns)
- .long v3_flush_icache_all
- .long v3_flush_kern_cache_all
- .long v3_flush_user_cache_all
- .long v3_flush_user_cache_range
- .long v3_coherent_kern_range
- .long v3_coherent_user_range
- .long v3_flush_kern_dcache_area
- .long v3_dma_map_area
- .long v3_dma_unmap_area
- .long v3_dma_flush_range
- .size v3_cache_fns, . - v3_cache_fns
+ @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
+ define_cache_functions v3
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S
index a8fefb523f19..fd9bb7addc8d 100644
--- a/arch/arm/mm/cache-v4.S
+++ b/arch/arm/mm/cache-v4.S
@@ -141,16 +141,5 @@ ENDPROC(v4_dma_map_area)
__INITDATA
- .type v4_cache_fns, #object
-ENTRY(v4_cache_fns)
- .long v4_flush_icache_all
- .long v4_flush_kern_cache_all
- .long v4_flush_user_cache_all
- .long v4_flush_user_cache_range
- .long v4_coherent_kern_range
- .long v4_coherent_user_range
- .long v4_flush_kern_dcache_area
- .long v4_dma_map_area
- .long v4_dma_unmap_area
- .long v4_dma_flush_range
- .size v4_cache_fns, . - v4_cache_fns
+ @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
+ define_cache_functions v4
diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S
index f40c69656d8d..4f2c14151ccb 100644
--- a/arch/arm/mm/cache-v4wb.S
+++ b/arch/arm/mm/cache-v4wb.S
@@ -253,16 +253,5 @@ ENDPROC(v4wb_dma_unmap_area)
__INITDATA
- .type v4wb_cache_fns, #object
-ENTRY(v4wb_cache_fns)
- .long v4wb_flush_icache_all
- .long v4wb_flush_kern_cache_all
- .long v4wb_flush_user_cache_all
- .long v4wb_flush_user_cache_range
- .long v4wb_coherent_kern_range
- .long v4wb_coherent_user_range
- .long v4wb_flush_kern_dcache_area
- .long v4wb_dma_map_area
- .long v4wb_dma_unmap_area
- .long v4wb_dma_flush_range
- .size v4wb_cache_fns, . - v4wb_cache_fns
+ @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
+ define_cache_functions v4wb
diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S
index a7b276dbda11..4d7b467631ce 100644
--- a/arch/arm/mm/cache-v4wt.S
+++ b/arch/arm/mm/cache-v4wt.S
@@ -197,16 +197,5 @@ ENDPROC(v4wt_dma_map_area)
__INITDATA
- .type v4wt_cache_fns, #object
-ENTRY(v4wt_cache_fns)
- .long v4wt_flush_icache_all
- .long v4wt_flush_kern_cache_all
- .long v4wt_flush_user_cache_all
- .long v4wt_flush_user_cache_range
- .long v4wt_coherent_kern_range
- .long v4wt_coherent_user_range
- .long v4wt_flush_kern_dcache_area
- .long v4wt_dma_map_area
- .long v4wt_dma_unmap_area
- .long v4wt_dma_flush_range
- .size v4wt_cache_fns, . - v4wt_cache_fns
+ @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
+ define_cache_functions v4wt
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index 73b4a8b66a57..74c2e5a33a4d 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -330,16 +330,5 @@ ENDPROC(v6_dma_unmap_area)
__INITDATA
- .type v6_cache_fns, #object
-ENTRY(v6_cache_fns)
- .long v6_flush_icache_all
- .long v6_flush_kern_cache_all
- .long v6_flush_user_cache_all
- .long v6_flush_user_cache_range
- .long v6_coherent_kern_range
- .long v6_coherent_user_range
- .long v6_flush_kern_dcache_area
- .long v6_dma_map_area
- .long v6_dma_unmap_area
- .long v6_dma_flush_range
- .size v6_cache_fns, . - v6_cache_fns
+ @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
+ define_cache_functions v6
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index d32f02b61866..3b24bfa3b828 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -325,16 +325,5 @@ ENDPROC(v7_dma_unmap_area)
__INITDATA
- .type v7_cache_fns, #object
-ENTRY(v7_cache_fns)
- .long v7_flush_icache_all
- .long v7_flush_kern_cache_all
- .long v7_flush_user_cache_all
- .long v7_flush_user_cache_range
- .long v7_coherent_kern_range
- .long v7_coherent_user_range
- .long v7_flush_kern_dcache_area
- .long v7_dma_map_area
- .long v7_dma_unmap_area
- .long v7_dma_flush_range
- .size v7_cache_fns, . - v7_cache_fns
+ @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
+ define_cache_functions v7
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index bdba6c65c901..63cca0097130 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -41,7 +41,6 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to,
kfrom = kmap_atomic(from, KM_USER0);
kto = kmap_atomic(to, KM_USER1);
copy_page(kto, kfrom);
- __cpuc_flush_dcache_area(kto, PAGE_SIZE);
kunmap_atomic(kto, KM_USER1);
kunmap_atomic(kfrom, KM_USER0);
}
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 82a093cee09a..0a0a1e7c20d2 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -25,9 +25,11 @@
#include <asm/tlbflush.h>
#include <asm/sizes.h>
+#include "mm.h"
+
static u64 get_coherent_dma_mask(struct device *dev)
{
- u64 mask = ISA_DMA_THRESHOLD;
+ u64 mask = (u64)arm_dma_limit;
if (dev) {
mask = dev->coherent_dma_mask;
@@ -41,10 +43,10 @@ static u64 get_coherent_dma_mask(struct device *dev)
return 0;
}
- if ((~mask) & ISA_DMA_THRESHOLD) {
+ if ((~mask) & (u64)arm_dma_limit) {
dev_warn(dev, "coherent DMA mask %#llx is smaller "
"than system GFP_DMA mask %#llx\n",
- mask, (unsigned long long)ISA_DMA_THRESHOLD);
+ mask, (u64)arm_dma_limit);
return 0;
}
}
@@ -657,6 +659,33 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
}
EXPORT_SYMBOL(dma_sync_sg_for_device);
+/*
+ * Return whether the given device DMA address mask can be supported
+ * properly. For example, if your device can only drive the low 24-bits
+ * during bus mastering, then you would pass 0x00ffffff as the mask
+ * to this function.
+ */
+int dma_supported(struct device *dev, u64 mask)
+{
+ if (mask < (u64)arm_dma_limit)
+ return 0;
+ return 1;
+}
+EXPORT_SYMBOL(dma_supported);
+
+int dma_set_mask(struct device *dev, u64 dma_mask)
+{
+ if (!dev->dma_mask || !dma_supported(dev, dma_mask))
+ return -EIO;
+
+#ifndef CONFIG_DMABOUNCE
+ *dev->dma_mask = dma_mask;
+#endif
+
+ return 0;
+}
+EXPORT_SYMBOL(dma_set_mask);
+
#define PREALLOC_DMA_DEBUG_ENTRIES 4096
static int __init dma_debug_do_init(void)
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 9ea4f7ddd665..3b5ea68acbb8 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -94,7 +94,7 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
pud = pud_offset(pgd, addr);
if (PTRS_PER_PUD != 1)
- printk(", *pud=%08lx", pud_val(*pud));
+ printk(", *pud=%08llx", (long long)pud_val(*pud));
if (pud_none(*pud))
break;
@@ -285,6 +285,10 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
tsk = current;
mm = tsk->mm;
+ /* Enable interrupts if they were enabled in the parent context. */
+ if (interrupts_enabled(regs))
+ local_irq_enable();
+
/*
* If we're in an interrupt or have no user
* context, we must not take the fault..
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index c19571c40a21..2fee782077c1 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -212,6 +212,18 @@ static void __init arm_bootmem_init(unsigned long start_pfn,
}
#ifdef CONFIG_ZONE_DMA
+
+unsigned long arm_dma_zone_size __read_mostly;
+EXPORT_SYMBOL(arm_dma_zone_size);
+
+/*
+ * The DMA mask corresponding to the maximum bus address allocatable
+ * using GFP_DMA. The default here places no restriction on DMA
+ * allocations. This must be the smallest DMA mask in the system,
+ * so a successful GFP_DMA allocation will always satisfy this.
+ */
+u32 arm_dma_limit;
+
static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
unsigned long dma_size)
{
@@ -267,17 +279,17 @@ static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
#endif
}
-#ifdef ARM_DMA_ZONE_SIZE
-#ifndef CONFIG_ZONE_DMA
-#error ARM_DMA_ZONE_SIZE set but no DMA zone to limit allocations
-#endif
-
+#ifdef CONFIG_ZONE_DMA
/*
* Adjust the sizes according to any special requirements for
* this machine type.
*/
- arm_adjust_dma_zone(zone_size, zhole_size,
- ARM_DMA_ZONE_SIZE >> PAGE_SHIFT);
+ if (arm_dma_zone_size) {
+ arm_adjust_dma_zone(zone_size, zhole_size,
+ arm_dma_zone_size >> PAGE_SHIFT);
+ arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
+ } else
+ arm_dma_limit = 0xffffffff;
#endif
free_area_init_node(0, zone_size, min, zhole_size);
@@ -422,6 +434,17 @@ static inline int free_area(unsigned long pfn, unsigned long end, char *s)
return pages;
}
+/*
+ * Poison init memory with an undefined instruction (ARM) or a branch to an
+ * undefined instruction (Thumb).
+ */
+static inline void poison_init_mem(void *s, size_t count)
+{
+ u32 *p = (u32 *)s;
+ while ((count = count - 4))
+ *p++ = 0xe7fddef0;
+}
+
static inline void
free_memmap(unsigned long start_pfn, unsigned long end_pfn)
{
@@ -639,8 +662,8 @@ void __init mem_init(void)
" pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n"
#endif
" modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
- " .init : 0x%p" " - 0x%p" " (%4d kB)\n"
" .text : 0x%p" " - 0x%p" " (%4d kB)\n"
+ " .init : 0x%p" " - 0x%p" " (%4d kB)\n"
" .data : 0x%p" " - 0x%p" " (%4d kB)\n"
" .bss : 0x%p" " - 0x%p" " (%4d kB)\n",
@@ -662,8 +685,8 @@ void __init mem_init(void)
#endif
MLM(MODULES_VADDR, MODULES_END),
- MLK_ROUNDUP(__init_begin, __init_end),
MLK_ROUNDUP(_text, _etext),
+ MLK_ROUNDUP(__init_begin, __init_end),
MLK_ROUNDUP(_sdata, _edata),
MLK_ROUNDUP(__bss_start, __bss_stop));
@@ -704,11 +727,13 @@ void free_initmem(void)
#ifdef CONFIG_HAVE_TCM
extern char __tcm_start, __tcm_end;
+ poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)),
__phys_to_pfn(__pa(&__tcm_end)),
"TCM link");
#endif
+ poison_init_mem(__init_begin, __init_end - __init_begin);
if (!machine_is_integrator() && !machine_is_cintegrator())
totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)),
__phys_to_pfn(__pa(__init_end)),
@@ -721,10 +746,12 @@ static int keep_initrd;
void free_initrd_mem(unsigned long start, unsigned long end)
{
- if (!keep_initrd)
+ if (!keep_initrd) {
+ poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
totalram_pages += free_area(__phys_to_pfn(__pa(start)),
__phys_to_pfn(__pa(end)),
"initrd");
+ }
}
static int __init keepinitrd_setup(char *__unused)
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 5b3d7d543659..010566799c80 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -23,5 +23,11 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
#endif
+#ifdef CONFIG_ZONE_DMA
+extern u32 arm_dma_limit;
+#else
+#define arm_dma_limit ((u32)~0)
+#endif
+
void __init bootmem_init(void);
void arm_mm_memblock_reserve(void);
diff --git a/arch/arm/mm/pabort-legacy.S b/arch/arm/mm/pabort-legacy.S
index 87970eba88ea..8bbff025269a 100644
--- a/arch/arm/mm/pabort-legacy.S
+++ b/arch/arm/mm/pabort-legacy.S
@@ -4,16 +4,18 @@
/*
* Function: legacy_pabort
*
- * Params : r0 = address of aborted instruction
+ * Params : r2 = pt_regs
+ * : r4 = address of aborted instruction
+ * : r5 = psr for parent context
*
- * Returns : r0 = address of abort
- * : r1 = Simulated IFSR with section translation fault status
+ * Returns : r4 - r11, r13 preserved
*
* Purpose : obtain information about current prefetch abort.
*/
.align 5
ENTRY(legacy_pabort)
+ mov r0, r4
mov r1, #5
- mov pc, lr
+ b do_PrefetchAbort
ENDPROC(legacy_pabort)
diff --git a/arch/arm/mm/pabort-v6.S b/arch/arm/mm/pabort-v6.S
index 06e3d1ef2115..9627646ce783 100644
--- a/arch/arm/mm/pabort-v6.S
+++ b/arch/arm/mm/pabort-v6.S
@@ -4,16 +4,18 @@
/*
* Function: v6_pabort
*
- * Params : r0 = address of aborted instruction
+ * Params : r2 = pt_regs
+ * : r4 = address of aborted instruction
+ * : r5 = psr for parent context
*
- * Returns : r0 = address of abort
- * : r1 = IFSR
+ * Returns : r4 - r11, r13 preserved
*
* Purpose : obtain information about current prefetch abort.
*/
.align 5
ENTRY(v6_pabort)
+ mov r0, r4
mrc p15, 0, r1, c5, c0, 1 @ get IFSR
- mov pc, lr
+ b do_PrefetchAbort
ENDPROC(v6_pabort)
diff --git a/arch/arm/mm/pabort-v7.S b/arch/arm/mm/pabort-v7.S
index a8b3b300a18d..875761f44f3b 100644
--- a/arch/arm/mm/pabort-v7.S
+++ b/arch/arm/mm/pabort-v7.S
@@ -2,12 +2,13 @@
#include <asm/assembler.h>
/*
- * Function: v6_pabort
+ * Function: v7_pabort
*
- * Params : r0 = address of aborted instruction
+ * Params : r2 = pt_regs
+ * : r4 = address of aborted instruction
+ * : r5 = psr for parent context
*
- * Returns : r0 = address of abort
- * : r1 = IFSR
+ * Returns : r4 - r11, r13 preserved
*
* Purpose : obtain information about current prefetch abort.
*/
@@ -16,5 +17,5 @@
ENTRY(v7_pabort)
mrc p15, 0, r0, c6, c0, 2 @ get IFAR
mrc p15, 0, r1, c5, c0, 1 @ get IFSR
- mov pc, lr
+ b do_PrefetchAbort
ENDPROC(v7_pabort)
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index 6c4e7fd6c8af..67469665d47a 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -364,17 +364,8 @@ ENTRY(arm1020_dma_unmap_area)
mov pc, lr
ENDPROC(arm1020_dma_unmap_area)
-ENTRY(arm1020_cache_fns)
- .long arm1020_flush_icache_all
- .long arm1020_flush_kern_cache_all
- .long arm1020_flush_user_cache_all
- .long arm1020_flush_user_cache_range
- .long arm1020_coherent_kern_range
- .long arm1020_coherent_user_range
- .long arm1020_flush_kern_dcache_area
- .long arm1020_dma_map_area
- .long arm1020_dma_unmap_area
- .long arm1020_dma_flush_range
+ @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
+ define_cache_functions arm1020
.align 5
ENTRY(cpu_arm1020_dcache_clean_area)
@@ -477,38 +468,14 @@ arm1020_crval:
crval clear=0x0000593f, mmuset=0x00003935, ucset=0x00001930
__INITDATA
+ @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
+ define_processor_functions arm1020, dabort=v4t_early_abort, pabort=legacy_pabort
-/*
- * Purpose : Function pointers used to access above functions - all calls
- * come through these
- */
- .type arm1020_processor_functions, #object
-arm1020_processor_functions:
- .word v4t_early_abort
- .word legacy_pabort
- .word cpu_arm1020_proc_init
- .word cpu_arm1020_proc_fin
- .word cpu_arm1020_reset
- .word cpu_arm1020_do_idle
- .word cpu_arm1020_dcache_clean_area
- .word cpu_arm1020_switch_mm
- .word cpu_arm1020_set_pte_ext
- .word 0
- .word 0
- .word 0
- .size arm1020_processor_functions, . - arm1020_processor_functions
.section ".rodata"
- .type cpu_arch_name, #object
-cpu_arch_name:
- .asciz "armv5t"
- .size cpu_arch_name, . - cpu_arch_name
-
- .type cpu_elf_name, #object
-cpu_elf_name:
- .asciz "v5"
- .size cpu_elf_name, . - cpu_elf_name
+ string cpu_arch_name, "armv5t"
+ string cpu_elf_name, "v5"
.type cpu_arm1020_name, #object
cpu_arm1020_name:
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index 4ce947c19623..4251421c0ed5 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -350,17 +350,8 @@ ENTRY(arm1020e_dma_unmap_area)
mov pc, lr
ENDPROC(arm1020e_dma_unmap_area)
-ENTRY(arm1020e_cache_fns)
- .long arm1020e_flush_icache_all
- .long arm1020e_flush_kern_cache_all
- .long arm1020e_flush_user_cache_all
- .long arm1020e_flush_user_cache_range
- .long arm1020e_coherent_kern_range
- .long arm1020e_coherent_user_range
- .long arm1020e_flush_kern_dcache_area
- .long arm1020e_dma_map_area
- .long arm1020e_dma_unmap_area
- .long arm1020e_dma_flush_range
+ @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
+ define_cache_functions arm1020e
.align 5
ENTRY(cpu_arm1020e_dcache_clean_area)
@@ -458,43 +449,14 @@ arm1020e_crval:
crval clear=0x00007f3f, mmuset=0x00003935, ucset=0x00001930
__INITDATA
-
-/*
- * Purpose : Function pointers used to access above functions - all calls
- * come through these
- */
- .type arm1020e_processor_functions, #object
-arm1020e_processor_functions:
- .word v4t_early_abort
- .word legacy_pabort
- .word cpu_arm1020e_proc_init
- .word cpu_arm1020e_proc_fin
- .word cpu_arm1020e_reset
- .word cpu_arm1020e_do_idle
- .word cpu_arm1020e_dcache_clean_area
- .word cpu_arm1020e_switch_mm
- .word cpu_arm1020e_set_pte_ext
- .word 0
- .word 0
- .word 0
- .size arm1020e_processor_functions, . - arm1020e_processor_functions
+ @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
+ define_processor_functions arm1020e, dabort=v4t_early_abort, pabort=legacy_pabort
.section ".rodata"
- .type cpu_arch_name, #object
-cpu_arch_name:
- .asciz "armv5te"
- .size cpu_arch_name, . - cpu_arch_name
-
- .type cpu_elf_name, #object
-cpu_elf_name:
- .asciz "v5"
- .size cpu_elf_name, . - cpu_elf_name
-
- .type cpu_arm1020e_name, #object
-cpu_arm1020e_name:
- .asciz "ARM1020E"
- .size cpu_arm1020e_name, . - cpu_arm1020e_name
+ string cpu_arch_name, "armv5te"
+ string cpu_elf_name, "v5"
+ string cpu_arm1020e_name, "ARM1020E"
.align
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index c8884c5413a2..d283cf3d06e3 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -339,17 +339,8 @@ ENTRY(arm1022_dma_unmap_area)
mov pc, lr
ENDPROC(arm1022_dma_unmap_area)
-ENTRY(arm1022_cache_fns)
- .long arm1022_flush_icache_all
- .long arm1022_flush_kern_cache_all
- .long arm1022_flush_user_cache_all
- .long arm1022_flush_user_cache_range
- .long arm1022_coherent_kern_range
- .long arm1022_coherent_user_range
- .long arm1022_flush_kern_dcache_area
- .long arm1022_dma_map_area
- .long arm1022_dma_unmap_area
- .long arm1022_dma_flush_range
+ @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
+ define_cache_functions arm1022
.align 5
ENTRY(cpu_arm1022_dcache_clean_area)
@@ -441,43 +432,14 @@ arm1022_crval:
crval clear=0x00007f3f, mmuset=0x00003935, ucset=0x00001930
__INITDATA
-
-/*
- * Purpose : Function pointers used to access above functions - all calls
- * come through these
- */
- .type arm1022_processor_functions, #object
-arm1022_processor_functions:
- .word v4t_early_abort
- .word legacy_pabort
- .word cpu_arm1022_proc_init
- .word cpu_arm1022_proc_fin
- .word cpu_arm1022_reset
- .word cpu_arm1022_do_idle
- .word cpu_arm1022_dcache_clean_area
- .word cpu_arm1022_switch_mm
- .word cpu_arm1022_set_pte_ext
- .word 0
- .word 0
- .word 0
- .size arm1022_processor_functions, . - arm1022_processor_functions
+ @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
+ define_processor_functions arm1022, dabort=v4t_early_abort, pabort=legacy_pabort
.section ".rodata"
- .type cpu_arch_name, #object
-cpu_arch_name:
- .asciz "armv5te"
- .size cpu_arch_name, . - cpu_arch_name
-
- .type cpu_elf_name, #object
-cpu_elf_name:
- .asciz "v5"
- .size cpu_elf_name, . - cpu_elf_name
-
- .type cpu_arm1022_name, #object
-cpu_arm1022_name:
- .asciz "ARM1022"
- .size cpu_arm1022_name, . - cpu_arm1022_name
+ string cpu_arch_name, "armv5te"
+ string cpu_elf_name, "v5"
+ string cpu_arm1022_name, "ARM1022"
.align
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index 413684660aad..678a1ceafed2 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -333,17 +333,8 @@ ENTRY(arm1026_dma_unmap_area)
mov pc, lr
ENDPROC(arm1026_dma_unmap_area)
-ENTRY(arm1026_cache_fns)
- .long arm1026_flush_icache_all
- .long arm1026_flush_kern_cache_all
- .long arm1026_flush_user_cache_all
- .long arm1026_flush_user_cache_range
- .long arm1026_coherent_kern_range
- .long arm1026_coherent_user_range
- .long arm1026_flush_kern_dcache_area
- .long arm1026_dma_map_area
- .long arm1026_dma_unmap_area
- .long arm1026_dma_flush_range
+ @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
+ define_cache_functions arm1026
.align 5
ENTRY(cpu_arm1026_dcache_clean_area)
@@ -436,45 +427,15 @@ arm1026_crval:
crval clear=0x00007f3f, mmuset=0x00003935, ucset=0x00001934
__INITDATA
-
-/*
- * Purpose : Function pointers used to access above functions - all calls
- * come through these
- */
- .type arm1026_processor_functions, #object
-arm1026_processor_functions:
- .word v5t_early_abort
- .word legacy_pabort
- .word cpu_arm1026_proc_init
- .word cpu_arm1026_proc_fin
- .word cpu_arm1026_reset
- .word cpu_arm1026_do_idle
- .word cpu_arm1026_dcache_clean_area
- .word cpu_arm1026_switch_mm
- .word cpu_arm1026_set_pte_ext
- .word 0
- .word 0
- .word 0
- .size arm1026_processor_functions, . - arm1026_processor_functions
+ @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
+ define_processor_functions arm1026, dabort=v5t_early_abort, pabort=legacy_pabort
.section .rodata
- .type cpu_arch_name, #object
-cpu_arch_name:
- .asciz "armv5tej"
- .size cpu_arch_name, . - cpu_arch_name
-
- .type cpu_elf_name, #object
-cpu_elf_name:
- .asciz "v5"
- .size cpu_elf_name, . - cpu_elf_name
+ string cpu_arch_name, "armv5tej"
+ string cpu_elf_name, "v5"
.align
-
- .type cpu_arm1026_name, #object
-cpu_arm1026_name:
- .asciz "ARM1026EJ-S"
- .size cpu_arm1026_name, . - cpu_arm1026_name
-
+ string cpu_arm1026_name, "ARM1026EJ-S"
.align
.section ".proc.info.init", #alloc, #execinstr
diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S
index 5f79dc4ce3fb..e5b974cddac3 100644
--- a/arch/arm/mm/proc-arm6_7.S
+++ b/arch/arm/mm/proc-arm6_7.S
@@ -29,19 +29,19 @@ ENTRY(cpu_arm7_dcache_clean_area)
/*
* Function: arm6_7_data_abort ()
*
- * Params : r2 = address of aborted instruction
- * : sp = pointer to registers
+ * Params : r2 = pt_regs
+ * : r4 = aborted context pc
+ * : r5 = aborted context psr
*
* Purpose : obtain information about current aborted instruction
*
- * Returns : r0 = address of abort
- * : r1 = FSR
+ * Returns : r4-r5, r10-r11, r13 preserved
*/
ENTRY(cpu_arm7_data_abort)
mrc p15, 0, r1, c5, c0, 0 @ get FSR
mrc p15, 0, r0, c6, c0, 0 @ get FAR
- ldr r8, [r2] @ read arm instruction
+ ldr r8, [r4] @ read arm instruction
tst r8, #1 << 20 @ L = 0 -> write?
orreq r1, r1, #1 << 11 @ yes.
and r7, r8, #15 << 24
@@ -49,7 +49,7 @@ ENTRY(cpu_arm7_data_abort)
nop
/* 0 */ b .data_unknown
-/* 1 */ mov pc, lr @ swp
+/* 1 */ b do_DataAbort @ swp
/* 2 */ b .data_unknown
/* 3 */ b .data_unknown
/* 4 */ b .data_arm_lateldrpostconst @ ldr rd, [rn], #m
@@ -60,87 +60,85 @@ ENTRY(cpu_arm7_data_abort)
/* 9 */ b .data_arm_ldmstm @ ldm*b rn, <rlist>
/* a */ b .data_unknown
/* b */ b .data_unknown
-/* c */ mov pc, lr @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m
-/* d */ mov pc, lr @ ldc rd, [rn, #m]
+/* c */ b do_DataAbort @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m
+/* d */ b do_DataAbort @ ldc rd, [rn, #m]
/* e */ b .data_unknown
/* f */
.data_unknown: @ Part of jumptable
- mov r0, r2
+ mov r0, r4
mov r1, r8
- mov r2, sp
- bl baddataabort
- b ret_from_exception
+ b baddataabort
ENTRY(cpu_arm6_data_abort)
mrc p15, 0, r1, c5, c0, 0 @ get FSR
mrc p15, 0, r0, c6, c0, 0 @ get FAR
- ldr r8, [r2] @ read arm instruction
+ ldr r8, [r4] @ read arm instruction
tst r8, #1 << 20 @ L = 0 -> write?
orreq r1, r1, #1 << 11 @ yes.
and r7, r8, #14 << 24
teq r7, #8 << 24 @ was it ldm/stm
- movne pc, lr
+ bne do_DataAbort
.data_arm_ldmstm:
tst r8, #1 << 21 @ check writeback bit
- moveq pc, lr @ no writeback -> no fixup
+ beq do_DataAbort @ no writeback -> no fixup
mov r7, #0x11
orr r7, r7, #0x1100
and r6, r8, r7
- and r2, r8, r7, lsl #1
- add r6, r6, r2, lsr #1
- and r2, r8, r7, lsl #2
- add r6, r6, r2, lsr #2
- and r2, r8, r7, lsl #3
- add r6, r6, r2, lsr #3
+ and r9, r8, r7, lsl #1
+ add r6, r6, r9, lsr #1
+ and r9, r8, r7, lsl #2
+ add r6, r6, r9, lsr #2
+ and r9, r8, r7, lsl #3
+ add r6, r6, r9, lsr #3
add r6, r6, r6, lsr #8
add r6, r6, r6, lsr #4
and r6, r6, #15 @ r6 = no. of registers to transfer.
- and r5, r8, #15 << 16 @ Extract 'n' from instruction
- ldr r7, [sp, r5, lsr #14] @ Get register 'Rn'
+ and r9, r8, #15 << 16 @ Extract 'n' from instruction
+ ldr r7, [r2, r9, lsr #14] @ Get register 'Rn'
tst r8, #1 << 23 @ Check U bit
subne r7, r7, r6, lsl #2 @ Undo increment
addeq r7, r7, r6, lsl #2 @ Undo decrement
- str r7, [sp, r5, lsr #14] @ Put register 'Rn'
- mov pc, lr
+ str r7, [r2, r9, lsr #14] @ Put register 'Rn'
+ b do_DataAbort
.data_arm_apply_r6_and_rn:
- and r5, r8, #15 << 16 @ Extract 'n' from instruction
- ldr r7, [sp, r5, lsr #14] @ Get register 'Rn'
+ and r9, r8, #15 << 16 @ Extract 'n' from instruction
+ ldr r7, [r2, r9, lsr #14] @ Get register 'Rn'
tst r8, #1 << 23 @ Check U bit
subne r7, r7, r6 @ Undo incrmenet
addeq r7, r7, r6 @ Undo decrement
- str r7, [sp, r5, lsr #14] @ Put register 'Rn'
- mov pc, lr
+ str r7, [r2, r9, lsr #14] @ Put register 'Rn'
+ b do_DataAbort
.data_arm_lateldrpreconst:
tst r8, #1 << 21 @ check writeback bit
- moveq pc, lr @ no writeback -> no fixup
+ beq do_DataAbort @ no writeback -> no fixup
.data_arm_lateldrpostconst:
- movs r2, r8, lsl #20 @ Get offset
- moveq pc, lr @ zero -> no fixup
- and r5, r8, #15 << 16 @ Extract 'n' from instruction
- ldr r7, [sp, r5, lsr #14] @ Get register 'Rn'
+ movs r6, r8, lsl #20 @ Get offset
+ beq do_DataAbort @ zero -> no fixup
+ and r9, r8, #15 << 16 @ Extract 'n' from instruction
+ ldr r7, [r2, r9, lsr #14] @ Get register 'Rn'
tst r8, #1 << 23 @ Check U bit
- subne r7, r7, r2, lsr #20 @ Undo increment
- addeq r7, r7, r2, lsr #20 @ Undo decrement
- str r7, [sp, r5, lsr #14] @ Put register 'Rn'
- mov pc, lr
+ subne r7, r7, r6, lsr #20 @ Undo increment
+ addeq r7, r7, r6, lsr #20 @ Undo decrement
+ str r7, [r2, r9, lsr #14] @ Put register 'Rn'
+ b do_DataAbort
.data_arm_lateldrprereg:
tst r8, #1 << 21 @ check writeback bit
- moveq pc, lr @ no writeback -> no fixup
+ beq do_DataAbort @ no writeback -> no fixup
.data_arm_lateldrpostreg:
and r7, r8, #15 @ Extract 'm' from instruction
- ldr r6, [sp, r7, lsl #2] @ Get register 'Rm'
- mov r5, r8, lsr #7 @ get shift count
- ands r5, r5, #31
+ ldr r6, [r2, r7, lsl #2] @ Get register 'Rm'
+ mov r9, r8, lsr #7 @ get shift count
+ ands r9, r9, #31
and r7, r8, #0x70 @ get shift type
orreq r7, r7, #8 @ shift count = 0
add pc, pc, r7
nop
- mov r6, r6, lsl r5 @ 0: LSL #!0
+ mov r6, r6, lsl r9 @ 0: LSL #!0
b .data_arm_apply_r6_and_rn
b .data_arm_apply_r6_and_rn @ 1: LSL #0
nop
@@ -148,7 +146,7 @@ ENTRY(cpu_arm6_data_abort)
nop
b .data_unknown @ 3: MUL?
nop
- mov r6, r6, lsr r5 @ 4: LSR #!0
+ mov r6, r6, lsr r9 @ 4: LSR #!0
b .data_arm_apply_r6_and_rn
mov r6, r6, lsr #32 @ 5: LSR #32
b .data_arm_apply_r6_and_rn
@@ -156,7 +154,7 @@ ENTRY(cpu_arm6_data_abort)
nop
b .data_unknown @ 7: MUL?
nop
- mov r6, r6, asr r5 @ 8: ASR #!0
+ mov r6, r6, asr r9 @ 8: ASR #!0
b .data_arm_apply_r6_and_rn
mov r6, r6, asr #32 @ 9: ASR #32
b .data_arm_apply_r6_and_rn
@@ -164,7 +162,7 @@ ENTRY(cpu_arm6_data_abort)
nop
b .data_unknown @ B: MUL?
nop
- mov r6, r6, ror r5 @ C: ROR #!0
+ mov r6, r6, ror r9 @ C: ROR #!0
b .data_arm_apply_r6_and_rn
mov r6, r6, rrx @ D: RRX
b .data_arm_apply_r6_and_rn
@@ -269,159 +267,57 @@ __arm7_setup: mov r0, #0
__INITDATA
-/*
- * Purpose : Function pointers used to access above functions - all calls
- * come through these
- */
- .type arm6_processor_functions, #object
-ENTRY(arm6_processor_functions)
- .word cpu_arm6_data_abort
- .word legacy_pabort
- .word cpu_arm6_proc_init
- .word cpu_arm6_proc_fin
- .word cpu_arm6_reset
- .word cpu_arm6_do_idle
- .word cpu_arm6_dcache_clean_area
- .word cpu_arm6_switch_mm
- .word cpu_arm6_set_pte_ext
- .word 0
- .word 0
- .word 0
- .size arm6_processor_functions, . - arm6_processor_functions
-
-/*
- * Purpose : Function pointers used to access above functions - all calls
- * come through these
- */
- .type arm7_processor_functions, #object
-ENTRY(arm7_processor_functions)
- .word cpu_arm7_data_abort
- .word legacy_pabort
- .word cpu_arm7_proc_init
- .word cpu_arm7_proc_fin
- .word cpu_arm7_reset
- .word cpu_arm7_do_idle
- .word cpu_arm7_dcache_clean_area
- .word cpu_arm7_switch_mm
- .word cpu_arm7_set_pte_ext
- .word 0
- .word 0
- .word 0
- .size arm7_processor_functions, . - arm7_processor_functions
+ @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
+ define_processor_functions arm6, dabort=cpu_arm6_data_abort, pabort=legacy_pabort
+ define_processor_functions arm7, dabort=cpu_arm7_data_abort, pabort=legacy_pabort
.section ".rodata"
- .type cpu_arch_name, #object
-cpu_arch_name: .asciz "armv3"
- .size cpu_arch_name, . - cpu_arch_name
-
- .type cpu_elf_name, #object
-cpu_elf_name: .asciz "v3"
- .size cpu_elf_name, . - cpu_elf_name
-
- .type cpu_arm6_name, #object
-cpu_arm6_name: .asciz "ARM6"
- .size cpu_arm6_name, . - cpu_arm6_name
-
- .type cpu_arm610_name, #object
-cpu_arm610_name:
- .asciz "ARM610"
- .size cpu_arm610_name, . - cpu_arm610_name
-
- .type cpu_arm7_name, #object
-cpu_arm7_name: .asciz "ARM7"
- .size cpu_arm7_name, . - cpu_arm7_name
-
- .type cpu_arm710_name, #object
-cpu_arm710_name:
- .asciz "ARM710"
- .size cpu_arm710_name, . - cpu_arm710_name
+ string cpu_arch_name, "armv3"
+ string cpu_elf_name, "v3"
+ string cpu_arm6_name, "ARM6"
+ string cpu_arm610_name, "ARM610"
+ string cpu_arm7_name, "ARM7"
+ string cpu_arm710_name, "ARM710"
.align
.section ".proc.info.init", #alloc, #execinstr
- .type __arm6_proc_info, #object
-__arm6_proc_info:
- .long 0x41560600
- .long 0xfffffff0
- .long 0x00000c1e
- .long PMD_TYPE_SECT | \
- PMD_BIT4 | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- b __arm6_setup
- .long cpu_arch_name
- .long cpu_elf_name
- .long HWCAP_SWP | HWCAP_26BIT
- .long cpu_arm6_name
- .long arm6_processor_functions
- .long v3_tlb_fns
- .long v3_user_fns
- .long v3_cache_fns
- .size __arm6_proc_info, . - __arm6_proc_info
-
- .type __arm610_proc_info, #object
-__arm610_proc_info:
- .long 0x41560610
- .long 0xfffffff0
- .long 0x00000c1e
+.macro arm67_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, \
+ cpu_mm_mmu_flags:req, cpu_flush:req, cpu_proc_funcs:req
+ .type __\name\()_proc_info, #object
+__\name\()_proc_info:
+ .long \cpu_val
+ .long \cpu_mask
+ .long \cpu_mm_mmu_flags
.long PMD_TYPE_SECT | \
PMD_BIT4 | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
- b __arm6_setup
+ b \cpu_flush
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_26BIT
- .long cpu_arm610_name
- .long arm6_processor_functions
+ .long \cpu_name
+ .long \cpu_proc_funcs
.long v3_tlb_fns
.long v3_user_fns
.long v3_cache_fns
- .size __arm610_proc_info, . - __arm610_proc_info
-
- .type __arm7_proc_info, #object
-__arm7_proc_info:
- .long 0x41007000
- .long 0xffffff00
- .long 0x00000c1e
- .long PMD_TYPE_SECT | \
- PMD_BIT4 | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- b __arm7_setup
- .long cpu_arch_name
- .long cpu_elf_name
- .long HWCAP_SWP | HWCAP_26BIT
- .long cpu_arm7_name
- .long arm7_processor_functions
- .long v3_tlb_fns
- .long v3_user_fns
- .long v3_cache_fns
- .size __arm7_proc_info, . - __arm7_proc_info
-
- .type __arm710_proc_info, #object
-__arm710_proc_info:
- .long 0x41007100
- .long 0xfff8ff00
- .long PMD_TYPE_SECT | \
+ .size __\name\()_proc_info, . - __\name\()_proc_info
+.endm
+
+ arm67_proc_info arm6, 0x41560600, 0xfffffff0, cpu_arm6_name, \
+ 0x00000c1e, __arm6_setup, arm6_processor_functions
+ arm67_proc_info arm610, 0x41560610, 0xfffffff0, cpu_arm610_name, \
+ 0x00000c1e, __arm6_setup, arm6_processor_functions
+ arm67_proc_info arm7, 0x41007000, 0xffffff00, cpu_arm7_name, \
+ 0x00000c1e, __arm7_setup, arm7_processor_functions
+ arm67_proc_info arm710, 0x41007100, 0xfff8ff00, cpu_arm710_name, \
+ PMD_TYPE_SECT | \
PMD_SECT_BUFFERABLE | \
PMD_SECT_CACHEABLE | \
PMD_BIT4 | \
PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- .long PMD_TYPE_SECT | \
- PMD_BIT4 | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- b __arm7_setup
- .long cpu_arch_name
- .long cpu_elf_name
- .long HWCAP_SWP | HWCAP_26BIT
- .long cpu_arm710_name
- .long arm7_processor_functions
- .long v3_tlb_fns
- .long v3_user_fns
- .long v3_cache_fns
- .size __arm710_proc_info, . - __arm710_proc_info
+ PMD_SECT_AP_READ, \
+ __arm7_setup, arm7_processor_functions
diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S
index 7a06e5964f59..55f4e290665a 100644
--- a/arch/arm/mm/proc-arm720.S
+++ b/arch/arm/mm/proc-arm720.S
@@ -169,46 +169,15 @@ arm720_crval:
crval clear=0x00002f3f, mmuset=0x0000213d, ucset=0x00000130
__INITDATA
-
-/*
- * Purpose : Function pointers used to access above functions - all calls
- * come through these
- */
- .type arm720_processor_functions, #object
-ENTRY(arm720_processor_functions)
- .word v4t_late_abort
- .word legacy_pabort
- .word cpu_arm720_proc_init
- .word cpu_arm720_proc_fin
- .word cpu_arm720_reset
- .word cpu_arm720_do_idle
- .word cpu_arm720_dcache_clean_area
- .word cpu_arm720_switch_mm
- .word cpu_arm720_set_pte_ext
- .word 0
- .word 0
- .word 0
- .size arm720_processor_functions, . - arm720_processor_functions
+ @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
+ define_processor_functions arm720, dabort=v4t_late_abort, pabort=legacy_pabort
.section ".rodata"
- .type cpu_arch_name, #object
-cpu_arch_name: .asciz "armv4t"
- .size cpu_arch_name, . - cpu_arch_name
-
- .type cpu_elf_name, #object
-cpu_elf_name: .asciz "v4"
- .size cpu_elf_name, . - cpu_elf_name
-
- .type cpu_arm710_name, #object
-cpu_arm710_name:
- .asciz "ARM710T"
- .size cpu_arm710_name, . - cpu_arm710_name
-
- .type cpu_arm720_name, #object
-cpu_arm720_name:
- .asciz "ARM720T"
- .size cpu_arm720_name, . - cpu_arm720_name
+ string cpu_arch_name, "armv4t"
+ string cpu_elf_name, "v4"
+ string cpu_arm710_name, "ARM710T"
+ string cpu_arm720_name, "ARM720T"
.align
@@ -218,10 +187,11 @@ cpu_arm720_name:
.section ".proc.info.init", #alloc, #execinstr
- .type __arm710_proc_info, #object
-__arm710_proc_info:
- .long 0x41807100 @ cpu_val
- .long 0xffffff00 @ cpu_mask
+.macro arm720_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cpu_flush:req
+ .type __\name\()_proc_info,#object
+__\name\()_proc_info:
+ .long \cpu_val
+ .long \cpu_mask
.long PMD_TYPE_SECT | \
PMD_SECT_BUFFERABLE | \
PMD_SECT_CACHEABLE | \
@@ -232,38 +202,17 @@ __arm710_proc_info:
PMD_BIT4 | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
- b __arm710_setup @ cpu_flush
+ b \cpu_flush @ cpu_flush
.long cpu_arch_name @ arch_name
.long cpu_elf_name @ elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB @ elf_hwcap
- .long cpu_arm710_name @ name
+ .long \cpu_name
.long arm720_processor_functions
.long v4_tlb_fns
.long v4wt_user_fns
.long v4_cache_fns
- .size __arm710_proc_info, . - __arm710_proc_info
+ .size __\name\()_proc_info, . - __\name\()_proc_info
+.endm
- .type __arm720_proc_info, #object
-__arm720_proc_info:
- .long 0x41807200 @ cpu_val
- .long 0xffffff00 @ cpu_mask
- .long PMD_TYPE_SECT | \
- PMD_SECT_BUFFERABLE | \
- PMD_SECT_CACHEABLE | \
- PMD_BIT4 | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- .long PMD_TYPE_SECT | \
- PMD_BIT4 | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- b __arm720_setup @ cpu_flush
- .long cpu_arch_name @ arch_name
- .long cpu_elf_name @ elf_name
- .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB @ elf_hwcap
- .long cpu_arm720_name @ name
- .long arm720_processor_functions
- .long v4_tlb_fns
- .long v4wt_user_fns
- .long v4_cache_fns
- .size __arm720_proc_info, . - __arm720_proc_info
+ arm720_proc_info arm710, 0x41807100, 0xffffff00, cpu_arm710_name, __arm710_setup
+ arm720_proc_info arm720, 0x41807200, 0xffffff00, cpu_arm720_name, __arm720_setup
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S
index 6f9d12effee1..4506be3adda6 100644
--- a/arch/arm/mm/proc-arm740.S
+++ b/arch/arm/mm/proc-arm740.S
@@ -17,6 +17,8 @@
#include <asm/pgtable.h>
#include <asm/ptrace.h>
+#include "proc-macros.S"
+
.text
/*
* cpu_arm740_proc_init()
@@ -115,42 +117,14 @@ __arm740_setup:
__INITDATA
-/*
- * Purpose : Function pointers used to access above functions - all calls
- * come through these
- */
- .type arm740_processor_functions, #object
-ENTRY(arm740_processor_functions)
- .word v4t_late_abort
- .word legacy_pabort
- .word cpu_arm740_proc_init
- .word cpu_arm740_proc_fin
- .word cpu_arm740_reset
- .word cpu_arm740_do_idle
- .word cpu_arm740_dcache_clean_area
- .word cpu_arm740_switch_mm
- .word 0 @ cpu_*_set_pte
- .word 0
- .word 0
- .word 0
- .size arm740_processor_functions, . - arm740_processor_functions
+ @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
+ define_processor_functions arm740, dabort=v4t_late_abort, pabort=legacy_pabort, nommu=1
.section ".rodata"
- .type cpu_arch_name, #object
-cpu_arch_name:
- .asciz "armv4"
- .size cpu_arch_name, . - cpu_arch_name
-
- .type cpu_elf_name, #object
-cpu_elf_name:
- .asciz "v4"
- .size cpu_elf_name, . - cpu_elf_name
-
- .type cpu_arm740_name, #object
-cpu_arm740_name:
- .ascii "ARM740T"
- .size cpu_arm740_name, . - cpu_arm740_name
+ string cpu_arch_name, "armv4"
+ string cpu_elf_name, "v4"
+ string cpu_arm740_name, "ARM740T"
.align
@@ -170,5 +144,3 @@ __arm740_proc_info:
.long 0
.long v3_cache_fns @ cache model
.size __arm740_proc_info, . - __arm740_proc_info
-
-
diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S
index 537ffcb0646d..7e0e1fe4ed4d 100644
--- a/arch/arm/mm/proc-arm7tdmi.S
+++ b/arch/arm/mm/proc-arm7tdmi.S
@@ -17,6 +17,8 @@
#include <asm/pgtable.h>
#include <asm/ptrace.h>
+#include "proc-macros.S"
+
.text
/*
* cpu_arm7tdmi_proc_init()
@@ -55,197 +57,57 @@ __arm7tdmi_setup:
__INITDATA
-/*
- * Purpose : Function pointers used to access above functions - all calls
- * come through these
- */
- .type arm7tdmi_processor_functions, #object
-ENTRY(arm7tdmi_processor_functions)
- .word v4t_late_abort
- .word legacy_pabort
- .word cpu_arm7tdmi_proc_init
- .word cpu_arm7tdmi_proc_fin
- .word cpu_arm7tdmi_reset
- .word cpu_arm7tdmi_do_idle
- .word cpu_arm7tdmi_dcache_clean_area
- .word cpu_arm7tdmi_switch_mm
- .word 0 @ cpu_*_set_pte
- .word 0
- .word 0
- .word 0
- .size arm7tdmi_processor_functions, . - arm7tdmi_processor_functions
+ @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
+ define_processor_functions arm7tdmi, dabort=v4t_late_abort, pabort=legacy_pabort, nommu=1
.section ".rodata"
- .type cpu_arch_name, #object
-cpu_arch_name:
- .asciz "armv4t"
- .size cpu_arch_name, . - cpu_arch_name
-
- .type cpu_elf_name, #object
-cpu_elf_name:
- .asciz "v4"
- .size cpu_elf_name, . - cpu_elf_name
-
- .type cpu_arm7tdmi_name, #object
-cpu_arm7tdmi_name:
- .asciz "ARM7TDMI"
- .size cpu_arm7tdmi_name, . - cpu_arm7tdmi_name
-
- .type cpu_triscenda7_name, #object
-cpu_triscenda7_name:
- .asciz "Triscend-A7x"
- .size cpu_triscenda7_name, . - cpu_triscenda7_name
-
- .type cpu_at91_name, #object
-cpu_at91_name:
- .asciz "Atmel-AT91M40xxx"
- .size cpu_at91_name, . - cpu_at91_name
-
- .type cpu_s3c3410_name, #object
-cpu_s3c3410_name:
- .asciz "Samsung-S3C3410"
- .size cpu_s3c3410_name, . - cpu_s3c3410_name
-
- .type cpu_s3c44b0x_name, #object
-cpu_s3c44b0x_name:
- .asciz "Samsung-S3C44B0x"
- .size cpu_s3c44b0x_name, . - cpu_s3c44b0x_name
-
- .type cpu_s3c4510b, #object
-cpu_s3c4510b_name:
- .asciz "Samsung-S3C4510B"
- .size cpu_s3c4510b_name, . - cpu_s3c4510b_name
-
- .type cpu_s3c4530_name, #object
-cpu_s3c4530_name:
- .asciz "Samsung-S3C4530"
- .size cpu_s3c4530_name, . - cpu_s3c4530_name
-
- .type cpu_netarm_name, #object
-cpu_netarm_name:
- .asciz "NETARM"
- .size cpu_netarm_name, . - cpu_netarm_name
+ string cpu_arch_name, "armv4t"
+ string cpu_elf_name, "v4"
+ string cpu_arm7tdmi_name, "ARM7TDMI"
+ string cpu_triscenda7_name, "Triscend-A7x"
+ string cpu_at91_name, "Atmel-AT91M40xxx"
+ string cpu_s3c3410_name, "Samsung-S3C3410"
+ string cpu_s3c44b0x_name, "Samsung-S3C44B0x"
+ string cpu_s3c4510b_name, "Samsung-S3C4510B"
+ string cpu_s3c4530_name, "Samsung-S3C4530"
+ string cpu_netarm_name, "NETARM"
.align
.section ".proc.info.init", #alloc, #execinstr
- .type __arm7tdmi_proc_info, #object
-__arm7tdmi_proc_info:
- .long 0x41007700
- .long 0xfff8ff00
- .long 0
- .long 0
- b __arm7tdmi_setup
- .long cpu_arch_name
- .long cpu_elf_name
- .long HWCAP_SWP | HWCAP_26BIT
- .long cpu_arm7tdmi_name
- .long arm7tdmi_processor_functions
- .long 0
- .long 0
- .long v4_cache_fns
- .size __arm7tdmi_proc_info, . - __arm7tdmi_proc_info
-
- .type __triscenda7_proc_info, #object
-__triscenda7_proc_info:
- .long 0x0001d2ff
- .long 0x0001ffff
- .long 0
- .long 0
- b __arm7tdmi_setup
- .long cpu_arch_name
- .long cpu_elf_name
- .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT
- .long cpu_triscenda7_name
- .long arm7tdmi_processor_functions
- .long 0
- .long 0
- .long v4_cache_fns
- .size __triscenda7_proc_info, . - __triscenda7_proc_info
-
- .type __at91_proc_info, #object
-__at91_proc_info:
- .long 0x14000040
- .long 0xfff000e0
- .long 0
- .long 0
- b __arm7tdmi_setup
- .long cpu_arch_name
- .long cpu_elf_name
- .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT
- .long cpu_at91_name
- .long arm7tdmi_processor_functions
- .long 0
- .long 0
- .long v4_cache_fns
- .size __at91_proc_info, . - __at91_proc_info
-
- .type __s3c4510b_proc_info, #object
-__s3c4510b_proc_info:
- .long 0x36365000
- .long 0xfffff000
- .long 0
- .long 0
- b __arm7tdmi_setup
- .long cpu_arch_name
- .long cpu_elf_name
- .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT
- .long cpu_s3c4510b_name
- .long arm7tdmi_processor_functions
- .long 0
- .long 0
- .long v4_cache_fns
- .size __s3c4510b_proc_info, . - __s3c4510b_proc_info
-
- .type __s3c4530_proc_info, #object
-__s3c4530_proc_info:
- .long 0x4c000000
- .long 0xfff000e0
- .long 0
- .long 0
- b __arm7tdmi_setup
- .long cpu_arch_name
- .long cpu_elf_name
- .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT
- .long cpu_s3c4530_name
- .long arm7tdmi_processor_functions
- .long 0
- .long 0
- .long v4_cache_fns
- .size __s3c4530_proc_info, . - __s3c4530_proc_info
-
- .type __s3c3410_proc_info, #object
-__s3c3410_proc_info:
- .long 0x34100000
- .long 0xffff0000
- .long 0
- .long 0
- b __arm7tdmi_setup
- .long cpu_arch_name
- .long cpu_elf_name
- .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT
- .long cpu_s3c3410_name
- .long arm7tdmi_processor_functions
- .long 0
- .long 0
- .long v4_cache_fns
- .size __s3c3410_proc_info, . - __s3c3410_proc_info
-
- .type __s3c44b0x_proc_info, #object
-__s3c44b0x_proc_info:
- .long 0x44b00000
- .long 0xffff0000
+.macro arm7tdmi_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, \
+ extra_hwcaps=0
+ .type __\name\()_proc_info, #object
+__\name\()_proc_info:
+ .long \cpu_val
+ .long \cpu_mask
.long 0
.long 0
b __arm7tdmi_setup
.long cpu_arch_name
.long cpu_elf_name
- .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT
- .long cpu_s3c44b0x_name
+ .long HWCAP_SWP | HWCAP_26BIT | ( \extra_hwcaps )
+ .long \cpu_name
.long arm7tdmi_processor_functions
.long 0
.long 0
.long v4_cache_fns
- .size __s3c44b0x_proc_info, . - __s3c44b0x_proc_info
+ .size __\name\()_proc_info, . - __\name\()_proc_info
+.endm
+
+ arm7tdmi_proc_info arm7tdmi, 0x41007700, 0xfff8ff00, \
+ cpu_arm7tdmi_name
+ arm7tdmi_proc_info triscenda7, 0x0001d2ff, 0x0001ffff, \
+ cpu_triscenda7_name, extra_hwcaps=HWCAP_THUMB
+ arm7tdmi_proc_info at91, 0x14000040, 0xfff000e0, \
+ cpu_at91_name, extra_hwcaps=HWCAP_THUMB
+ arm7tdmi_proc_info s3c4510b, 0x36365000, 0xfffff000, \
+ cpu_s3c4510b_name, extra_hwcaps=HWCAP_THUMB
+ arm7tdmi_proc_info s3c4530, 0x4c000000, 0xfff000e0, \
+ cpu_s3c4530_name, extra_hwcaps=HWCAP_THUMB
+ arm7tdmi_proc_info s3c3410, 0x34100000, 0xffff0000, \
+ cpu_s3c3410_name, extra_hwcaps=HWCAP_THUMB
+ arm7tdmi_proc_info s3c44b0x, 0x44b00000, 0xffff0000, \
+ cpu_s3c44b0x_name, extra_hwcaps=HWCAP_THUMB
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index bf8a1d1cccb6..92bd102e3982 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -315,18 +315,8 @@ ENTRY(arm920_dma_unmap_area)
mov pc, lr
ENDPROC(arm920_dma_unmap_area)
-ENTRY(arm920_cache_fns)
- .long arm920_flush_icache_all
- .long arm920_flush_kern_cache_all
- .long arm920_flush_user_cache_all
- .long arm920_flush_user_cache_range
- .long arm920_coherent_kern_range
- .long arm920_coherent_user_range
- .long arm920_flush_kern_dcache_area
- .long arm920_dma_map_area
- .long arm920_dma_unmap_area
- .long arm920_dma_flush_range
-
+ @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
+ define_cache_functions arm920
#endif
@@ -416,9 +406,6 @@ ENTRY(cpu_arm920_do_resume)
PMD_SECT_CACHEABLE | PMD_BIT4 | PMD_SECT_AP_WRITE
b cpu_resume_mmu
ENDPROC(cpu_arm920_do_resume)
-#else
-#define cpu_arm920_do_suspend 0
-#define cpu_arm920_do_resume 0
#endif
__CPUINIT
@@ -450,43 +437,14 @@ arm920_crval:
crval clear=0x00003f3f, mmuset=0x00003135, ucset=0x00001130
__INITDATA
-
-/*
- * Purpose : Function pointers used to access above functions - all calls
- * come through these
- */
- .type arm920_processor_functions, #object
-arm920_processor_functions:
- .word v4t_early_abort
- .word legacy_pabort
- .word cpu_arm920_proc_init
- .word cpu_arm920_proc_fin
- .word cpu_arm920_reset
- .word cpu_arm920_do_idle
- .word cpu_arm920_dcache_clean_area
- .word cpu_arm920_switch_mm
- .word cpu_arm920_set_pte_ext
- .word cpu_arm920_suspend_size
- .word cpu_arm920_do_suspend
- .word cpu_arm920_do_resume
- .size arm920_processor_functions, . - arm920_processor_functions
+ @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
+ define_processor_functions arm920, dabort=v4t_early_abort, pabort=legacy_pabort, suspend=1
.section ".rodata"
- .type cpu_arch_name, #object
-cpu_arch_name:
- .asciz "armv4t"
- .size cpu_arch_name, . - cpu_arch_name
-
- .type cpu_elf_name, #object
-cpu_elf_name:
- .asciz "v4"
- .size cpu_elf_name, . - cpu_elf_name
-
- .type cpu_arm920_name, #object
-cpu_arm920_name:
- .asciz "ARM920T"
- .size cpu_arm920_name, . - cpu_arm920_name
+ string cpu_arch_name, "armv4t"
+ string cpu_elf_name, "v4"
+ string cpu_arm920_name, "ARM920T"
.align
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index 95ba1fc56e4d..490e18833857 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -317,18 +317,8 @@ ENTRY(arm922_dma_unmap_area)
mov pc, lr
ENDPROC(arm922_dma_unmap_area)
-ENTRY(arm922_cache_fns)
- .long arm922_flush_icache_all
- .long arm922_flush_kern_cache_all
- .long arm922_flush_user_cache_all
- .long arm922_flush_user_cache_range
- .long arm922_coherent_kern_range
- .long arm922_coherent_user_range
- .long arm922_flush_kern_dcache_area
- .long arm922_dma_map_area
- .long arm922_dma_unmap_area
- .long arm922_dma_flush_range
-
+ @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
+ define_cache_functions arm922
#endif
@@ -420,43 +410,14 @@ arm922_crval:
crval clear=0x00003f3f, mmuset=0x00003135, ucset=0x00001130
__INITDATA
-
-/*
- * Purpose : Function pointers used to access above functions - all calls
- * come through these
- */
- .type arm922_processor_functions, #object
-arm922_processor_functions:
- .word v4t_early_abort
- .word legacy_pabort
- .word cpu_arm922_proc_init
- .word cpu_arm922_proc_fin
- .word cpu_arm922_reset
- .word cpu_arm922_do_idle
- .word cpu_arm922_dcache_clean_area
- .word cpu_arm922_switch_mm
- .word cpu_arm922_set_pte_ext
- .word 0
- .word 0
- .word 0
- .size arm922_processor_functions, . - arm922_processor_functions
+ @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
+ define_processor_functions arm922, dabort=v4t_early_abort, pabort=legacy_pabort
.section ".rodata"
- .type cpu_arch_name, #object
-cpu_arch_name:
- .asciz "armv4t"
- .size cpu_arch_name, . - cpu_arch_name
-
- .type cpu_elf_name, #object
-cpu_elf_name:
- .asciz "v4"
- .size cpu_elf_name, . - cpu_elf_name
-
- .type cpu_arm922_name, #object
-cpu_arm922_name:
- .asciz "ARM922T"
- .size cpu_arm922_name, . - cpu_arm922_name
+ string cpu_arch_name, "armv4t"
+ string cpu_elf_name, "v4"
+ string cpu_arm922_name, "ARM922T"
.align
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index 541e4774eea1..51d494be057e 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -372,17 +372,8 @@ ENTRY(arm925_dma_unmap_area)
mov pc, lr
ENDPROC(arm925_dma_unmap_area)
-ENTRY(arm925_cache_fns)
- .long arm925_flush_icache_all
- .long arm925_flush_kern_cache_all
- .long arm925_flush_user_cache_all
- .long arm925_flush_user_cache_range
- .long arm925_coherent_kern_range
- .long arm925_coherent_user_range
- .long arm925_flush_kern_dcache_area
- .long arm925_dma_map_area
- .long arm925_dma_unmap_area
- .long arm925_dma_flush_range
+ @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
+ define_cache_functions arm925
ENTRY(cpu_arm925_dcache_clean_area)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
@@ -487,52 +478,24 @@ arm925_crval:
crval clear=0x00007f3f, mmuset=0x0000313d, ucset=0x00001130
__INITDATA
-
-/*
- * Purpose : Function pointers used to access above functions - all calls
- * come through these
- */
- .type arm925_processor_functions, #object
-arm925_processor_functions:
- .word v4t_early_abort
- .word legacy_pabort
- .word cpu_arm925_proc_init
- .word cpu_arm925_proc_fin
- .word cpu_arm925_reset
- .word cpu_arm925_do_idle
- .word cpu_arm925_dcache_clean_area
- .word cpu_arm925_switch_mm
- .word cpu_arm925_set_pte_ext
- .word 0
- .word 0
- .word 0
- .size arm925_processor_functions, . - arm925_processor_functions
+ @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
+ define_processor_functions arm925, dabort=v4t_early_abort, pabort=legacy_pabort
.section ".rodata"
- .type cpu_arch_name, #object
-cpu_arch_name:
- .asciz "armv4t"
- .size cpu_arch_name, . - cpu_arch_name
-
- .type cpu_elf_name, #object
-cpu_elf_name:
- .asciz "v4"
- .size cpu_elf_name, . - cpu_elf_name
-
- .type cpu_arm925_name, #object
-cpu_arm925_name:
- .asciz "ARM925T"
- .size cpu_arm925_name, . - cpu_arm925_name
+ string cpu_arch_name, "armv4t"
+ string cpu_elf_name, "v4"
+ string cpu_arm925_name, "ARM925T"
.align
.section ".proc.info.init", #alloc, #execinstr
- .type __arm925_proc_info,#object
-__arm925_proc_info:
- .long 0x54029250
- .long 0xfffffff0
+.macro arm925_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache
+ .type __\name\()_proc_info,#object
+__\name\()_proc_info:
+ .long \cpu_val
+ .long \cpu_mask
.long PMD_TYPE_SECT | \
PMD_BIT4 | \
PMD_SECT_AP_WRITE | \
@@ -550,27 +513,8 @@ __arm925_proc_info:
.long v4wbi_tlb_fns
.long v4wb_user_fns
.long arm925_cache_fns
- .size __arm925_proc_info, . - __arm925_proc_info
+ .size __\name\()_proc_info, . - __\name\()_proc_info
+.endm
- .type __arm915_proc_info,#object
-__arm915_proc_info:
- .long 0x54029150
- .long 0xfffffff0
- .long PMD_TYPE_SECT | \
- PMD_BIT4 | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- .long PMD_TYPE_SECT | \
- PMD_BIT4 | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- b __arm925_setup
- .long cpu_arch_name
- .long cpu_elf_name
- .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
- .long cpu_arm925_name
- .long arm925_processor_functions
- .long v4wbi_tlb_fns
- .long v4wb_user_fns
- .long arm925_cache_fns
- .size __arm925_proc_info, . - __arm925_proc_info
+ arm925_proc_info arm925, 0x54029250, 0xfffffff0, cpu_arm925_name
+ arm925_proc_info arm915, 0x54029150, 0xfffffff0, cpu_arm925_name
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 0ed85d930c09..2bbcf053dffd 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -335,17 +335,8 @@ ENTRY(arm926_dma_unmap_area)
mov pc, lr
ENDPROC(arm926_dma_unmap_area)
-ENTRY(arm926_cache_fns)
- .long arm926_flush_icache_all
- .long arm926_flush_kern_cache_all
- .long arm926_flush_user_cache_all
- .long arm926_flush_user_cache_range
- .long arm926_coherent_kern_range
- .long arm926_coherent_user_range
- .long arm926_flush_kern_dcache_area
- .long arm926_dma_map_area
- .long arm926_dma_unmap_area
- .long arm926_dma_flush_range
+ @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
+ define_cache_functions arm926
ENTRY(cpu_arm926_dcache_clean_area)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
@@ -430,9 +421,6 @@ ENTRY(cpu_arm926_do_resume)
PMD_SECT_CACHEABLE | PMD_BIT4 | PMD_SECT_AP_WRITE
b cpu_resume_mmu
ENDPROC(cpu_arm926_do_resume)
-#else
-#define cpu_arm926_do_suspend 0
-#define cpu_arm926_do_resume 0
#endif
__CPUINIT
@@ -475,42 +463,14 @@ arm926_crval:
__INITDATA
-/*
- * Purpose : Function pointers used to access above functions - all calls
- * come through these
- */
- .type arm926_processor_functions, #object
-arm926_processor_functions:
- .word v5tj_early_abort
- .word legacy_pabort
- .word cpu_arm926_proc_init
- .word cpu_arm926_proc_fin
- .word cpu_arm926_reset
- .word cpu_arm926_do_idle
- .word cpu_arm926_dcache_clean_area
- .word cpu_arm926_switch_mm
- .word cpu_arm926_set_pte_ext
- .word cpu_arm926_suspend_size
- .word cpu_arm926_do_suspend
- .word cpu_arm926_do_resume
- .size arm926_processor_functions, . - arm926_processor_functions
+ @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
+ define_processor_functions arm926, dabort=v5tj_early_abort, pabort=legacy_pabort, suspend=1
.section ".rodata"
- .type cpu_arch_name, #object
-cpu_arch_name:
- .asciz "armv5tej"
- .size cpu_arch_name, . - cpu_arch_name
-
- .type cpu_elf_name, #object
-cpu_elf_name:
- .asciz "v5"
- .size cpu_elf_name, . - cpu_elf_name
-
- .type cpu_arm926_name, #object
-cpu_arm926_name:
- .asciz "ARM926EJ-S"
- .size cpu_arm926_name, . - cpu_arm926_name
+ string cpu_arch_name, "armv5tej"
+ string cpu_elf_name, "v5"
+ string cpu_arm926_name, "ARM926EJ-S"
.align
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
index 26aea3f71c26..ac750d506153 100644
--- a/arch/arm/mm/proc-arm940.S
+++ b/arch/arm/mm/proc-arm940.S
@@ -264,17 +264,8 @@ ENTRY(arm940_dma_unmap_area)
mov pc, lr
ENDPROC(arm940_dma_unmap_area)
-ENTRY(arm940_cache_fns)
- .long arm940_flush_icache_all
- .long arm940_flush_kern_cache_all
- .long arm940_flush_user_cache_all
- .long arm940_flush_user_cache_range
- .long arm940_coherent_kern_range
- .long arm940_coherent_user_range
- .long arm940_flush_kern_dcache_area
- .long arm940_dma_map_area
- .long arm940_dma_unmap_area
- .long arm940_dma_flush_range
+ @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
+ define_cache_functions arm940
__CPUINIT
@@ -348,42 +339,14 @@ __arm940_setup:
__INITDATA
-/*
- * Purpose : Function pointers used to access above functions - all calls
- * come through these
- */
- .type arm940_processor_functions, #object
-ENTRY(arm940_processor_functions)
- .word nommu_early_abort
- .word legacy_pabort
- .word cpu_arm940_proc_init
- .word cpu_arm940_proc_fin
- .word cpu_arm940_reset
- .word cpu_arm940_do_idle
- .word cpu_arm940_dcache_clean_area
- .word cpu_arm940_switch_mm
- .word 0 @ cpu_*_set_pte
- .word 0
- .word 0
- .word 0
- .size arm940_processor_functions, . - arm940_processor_functions
+ @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
+ define_processor_functions arm940, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1
.section ".rodata"
-.type cpu_arch_name, #object
-cpu_arch_name:
- .asciz "armv4t"
- .size cpu_arch_name, . - cpu_arch_name
-
- .type cpu_elf_name, #object
-cpu_elf_name:
- .asciz "v4"
- .size cpu_elf_name, . - cpu_elf_name
-
- .type cpu_arm940_name, #object
-cpu_arm940_name:
- .ascii "ARM940T"
- .size cpu_arm940_name, . - cpu_arm940_name
+ string cpu_arch_name, "armv4t"
+ string cpu_elf_name, "v4"
+ string cpu_arm940_name, "ARM940T"
.align
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index 8063345406fe..f8f7ea34bfc5 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -306,18 +306,8 @@ ENTRY(arm946_dma_unmap_area)
mov pc, lr
ENDPROC(arm946_dma_unmap_area)
-ENTRY(arm946_cache_fns)
- .long arm946_flush_icache_all
- .long arm946_flush_kern_cache_all
- .long arm946_flush_user_cache_all
- .long arm946_flush_user_cache_range
- .long arm946_coherent_kern_range
- .long arm946_coherent_user_range
- .long arm946_flush_kern_dcache_area
- .long arm946_dma_map_area
- .long arm946_dma_unmap_area
- .long arm946_dma_flush_range
-
+ @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
+ define_cache_functions arm946
ENTRY(cpu_arm946_dcache_clean_area)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
@@ -403,43 +393,14 @@ __arm946_setup:
__INITDATA
-/*
- * Purpose : Function pointers used to access above functions - all calls
- * come through these
- */
- .type arm946_processor_functions, #object
-ENTRY(arm946_processor_functions)
- .word nommu_early_abort
- .word legacy_pabort
- .word cpu_arm946_proc_init
- .word cpu_arm946_proc_fin
- .word cpu_arm946_reset
- .word cpu_arm946_do_idle
-
- .word cpu_arm946_dcache_clean_area
- .word cpu_arm946_switch_mm
- .word 0 @ cpu_*_set_pte
- .word 0
- .word 0
- .word 0
- .size arm946_processor_functions, . - arm946_processor_functions
+ @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
+ define_processor_functions arm946, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1
.section ".rodata"
- .type cpu_arch_name, #object
-cpu_arch_name:
- .asciz "armv5te"
- .size cpu_arch_name, . - cpu_arch_name
-
- .type cpu_elf_name, #object
-cpu_elf_name:
- .asciz "v5t"
- .size cpu_elf_name, . - cpu_elf_name
-
- .type cpu_arm946_name, #object
-cpu_arm946_name:
- .ascii "ARM946E-S"
- .size cpu_arm946_name, . - cpu_arm946_name
+ string cpu_arch_name, "armv5te"
+ string cpu_elf_name, "v5t"
+ string cpu_arm946_name, "ARM946E-S"
.align
diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S
index 546b54da1005..2120f9e2af7f 100644
--- a/arch/arm/mm/proc-arm9tdmi.S
+++ b/arch/arm/mm/proc-arm9tdmi.S
@@ -17,6 +17,8 @@
#include <asm/pgtable.h>
#include <asm/ptrace.h>
+#include "proc-macros.S"
+
.text
/*
* cpu_arm9tdmi_proc_init()
@@ -55,82 +57,38 @@ __arm9tdmi_setup:
__INITDATA
-/*
- * Purpose : Function pointers used to access above functions - all calls
- * come through these
- */
- .type arm9tdmi_processor_functions, #object
-ENTRY(arm9tdmi_processor_functions)
- .word nommu_early_abort
- .word legacy_pabort
- .word cpu_arm9tdmi_proc_init
- .word cpu_arm9tdmi_proc_fin
- .word cpu_arm9tdmi_reset
- .word cpu_arm9tdmi_do_idle
- .word cpu_arm9tdmi_dcache_clean_area
- .word cpu_arm9tdmi_switch_mm
- .word 0 @ cpu_*_set_pte
- .word 0
- .word 0
- .word 0
- .size arm9tdmi_processor_functions, . - arm9tdmi_processor_functions
+ @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
+ define_processor_functions arm9tdmi, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1
.section ".rodata"
- .type cpu_arch_name, #object
-cpu_arch_name:
- .asciz "armv4t"
- .size cpu_arch_name, . - cpu_arch_name
-
- .type cpu_elf_name, #object
-cpu_elf_name:
- .asciz "v4"
- .size cpu_elf_name, . - cpu_elf_name
-
- .type cpu_arm9tdmi_name, #object
-cpu_arm9tdmi_name:
- .asciz "ARM9TDMI"
- .size cpu_arm9tdmi_name, . - cpu_arm9tdmi_name
-
- .type cpu_p2001_name, #object
-cpu_p2001_name:
- .asciz "P2001"
- .size cpu_p2001_name, . - cpu_p2001_name
+ string cpu_arch_name, "armv4t"
+ string cpu_elf_name, "v4"
+ string cpu_arm9tdmi_name, "ARM9TDMI"
+ string cpu_p2001_name, "P2001"
.align
.section ".proc.info.init", #alloc, #execinstr
- .type __arm9tdmi_proc_info, #object
-__arm9tdmi_proc_info:
- .long 0x41009900
- .long 0xfff8ff00
+.macro arm9tdmi_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req
+ .type __\name\()_proc_info, #object
+__\name\()_proc_info:
+ .long \cpu_val
+ .long \cpu_mask
.long 0
.long 0
b __arm9tdmi_setup
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT
- .long cpu_arm9tdmi_name
+ .long \cpu_name
.long arm9tdmi_processor_functions
.long 0
.long 0
.long v4_cache_fns
- .size __arm9tdmi_proc_info, . - __arm9tdmi_proc_info
+ .size __\name\()_proc_info, . - __\name\()_proc_info
+.endm
- .type __p2001_proc_info, #object
-__p2001_proc_info:
- .long 0x41029000
- .long 0xffffffff
- .long 0
- .long 0
- b __arm9tdmi_setup
- .long cpu_arch_name
- .long cpu_elf_name
- .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT
- .long cpu_p2001_name
- .long arm9tdmi_processor_functions
- .long 0
- .long 0
- .long v4_cache_fns
- .size __p2001_proc_info, . - __p2001_proc_info
+ arm9tdmi_proc_info arm9tdmi, 0x41009900, 0xfff8ff00, cpu_arm9tdmi_name
+ arm9tdmi_proc_info p2001, 0x41029000, 0xffffffff, cpu_p2001_name
diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S
index fc2a4ae15cf4..4c7a5710472b 100644
--- a/arch/arm/mm/proc-fa526.S
+++ b/arch/arm/mm/proc-fa526.S
@@ -180,42 +180,14 @@ fa526_cr1_set:
__INITDATA
-/*
- * Purpose : Function pointers used to access above functions - all calls
- * come through these
- */
- .type fa526_processor_functions, #object
-fa526_processor_functions:
- .word v4_early_abort
- .word legacy_pabort
- .word cpu_fa526_proc_init
- .word cpu_fa526_proc_fin
- .word cpu_fa526_reset
- .word cpu_fa526_do_idle
- .word cpu_fa526_dcache_clean_area
- .word cpu_fa526_switch_mm
- .word cpu_fa526_set_pte_ext
- .word 0
- .word 0
- .word 0
- .size fa526_processor_functions, . - fa526_processor_functions
+ @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
+ define_processor_functions fa526, dabort=v4_early_abort, pabort=legacy_pabort
.section ".rodata"
- .type cpu_arch_name, #object
-cpu_arch_name:
- .asciz "armv4"
- .size cpu_arch_name, . - cpu_arch_name
-
- .type cpu_elf_name, #object
-cpu_elf_name:
- .asciz "v4"
- .size cpu_elf_name, . - cpu_elf_name
-
- .type cpu_fa526_name, #object
-cpu_fa526_name:
- .asciz "FA526"
- .size cpu_fa526_name, . - cpu_fa526_name
+ string cpu_arch_name, "armv4"
+ string cpu_elf_name, "v4"
+ string cpu_fa526_name, "FA526"
.align
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index d3883eed7a4a..8a6c2f78c1c3 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -411,29 +411,28 @@ ENTRY(feroceon_dma_unmap_area)
mov pc, lr
ENDPROC(feroceon_dma_unmap_area)
-ENTRY(feroceon_cache_fns)
- .long feroceon_flush_icache_all
- .long feroceon_flush_kern_cache_all
- .long feroceon_flush_user_cache_all
- .long feroceon_flush_user_cache_range
- .long feroceon_coherent_kern_range
- .long feroceon_coherent_user_range
- .long feroceon_flush_kern_dcache_area
- .long feroceon_dma_map_area
- .long feroceon_dma_unmap_area
- .long feroceon_dma_flush_range
-
-ENTRY(feroceon_range_cache_fns)
- .long feroceon_flush_icache_all
- .long feroceon_flush_kern_cache_all
- .long feroceon_flush_user_cache_all
- .long feroceon_flush_user_cache_range
- .long feroceon_coherent_kern_range
- .long feroceon_coherent_user_range
- .long feroceon_range_flush_kern_dcache_area
- .long feroceon_range_dma_map_area
- .long feroceon_dma_unmap_area
- .long feroceon_range_dma_flush_range
+ @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
+ define_cache_functions feroceon
+
+.macro range_alias basename
+ .globl feroceon_range_\basename
+ .type feroceon_range_\basename , %function
+ .equ feroceon_range_\basename , feroceon_\basename
+.endm
+
+/*
+ * Most of the cache functions are unchanged for this case.
+ * Export suitable alias symbols for the unchanged functions:
+ */
+ range_alias flush_icache_all
+ range_alias flush_user_cache_all
+ range_alias flush_kern_cache_all
+ range_alias flush_user_cache_range
+ range_alias coherent_kern_range
+ range_alias coherent_user_range
+ range_alias dma_unmap_area
+
+ define_cache_functions feroceon_range
.align 5
ENTRY(cpu_feroceon_dcache_clean_area)
@@ -539,93 +538,27 @@ feroceon_crval:
__INITDATA
-/*
- * Purpose : Function pointers used to access above functions - all calls
- * come through these
- */
- .type feroceon_processor_functions, #object
-feroceon_processor_functions:
- .word v5t_early_abort
- .word legacy_pabort
- .word cpu_feroceon_proc_init
- .word cpu_feroceon_proc_fin
- .word cpu_feroceon_reset
- .word cpu_feroceon_do_idle
- .word cpu_feroceon_dcache_clean_area
- .word cpu_feroceon_switch_mm
- .word cpu_feroceon_set_pte_ext
- .word 0
- .word 0
- .word 0
- .size feroceon_processor_functions, . - feroceon_processor_functions
+ @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
+ define_processor_functions feroceon, dabort=v5t_early_abort, pabort=legacy_pabort
.section ".rodata"
- .type cpu_arch_name, #object
-cpu_arch_name:
- .asciz "armv5te"
- .size cpu_arch_name, . - cpu_arch_name
-
- .type cpu_elf_name, #object
-cpu_elf_name:
- .asciz "v5"
- .size cpu_elf_name, . - cpu_elf_name
-
- .type cpu_feroceon_name, #object
-cpu_feroceon_name:
- .asciz "Feroceon"
- .size cpu_feroceon_name, . - cpu_feroceon_name
-
- .type cpu_88fr531_name, #object
-cpu_88fr531_name:
- .asciz "Feroceon 88FR531-vd"
- .size cpu_88fr531_name, . - cpu_88fr531_name
-
- .type cpu_88fr571_name, #object
-cpu_88fr571_name:
- .asciz "Feroceon 88FR571-vd"
- .size cpu_88fr571_name, . - cpu_88fr571_name
-
- .type cpu_88fr131_name, #object
-cpu_88fr131_name:
- .asciz "Feroceon 88FR131"
- .size cpu_88fr131_name, . - cpu_88fr131_name
+ string cpu_arch_name, "armv5te"
+ string cpu_elf_name, "v5"
+ string cpu_feroceon_name, "Feroceon"
+ string cpu_88fr531_name, "Feroceon 88FR531-vd"
+ string cpu_88fr571_name, "Feroceon 88FR571-vd"
+ string cpu_88fr131_name, "Feroceon 88FR131"
.align
.section ".proc.info.init", #alloc, #execinstr
-#ifdef CONFIG_CPU_FEROCEON_OLD_ID
- .type __feroceon_old_id_proc_info,#object
-__feroceon_old_id_proc_info:
- .long 0x41009260
- .long 0xff00fff0
- .long PMD_TYPE_SECT | \
- PMD_SECT_BUFFERABLE | \
- PMD_SECT_CACHEABLE | \
- PMD_BIT4 | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- .long PMD_TYPE_SECT | \
- PMD_BIT4 | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- b __feroceon_setup
- .long cpu_arch_name
- .long cpu_elf_name
- .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
- .long cpu_feroceon_name
- .long feroceon_processor_functions
- .long v4wbi_tlb_fns
- .long feroceon_user_fns
- .long feroceon_cache_fns
- .size __feroceon_old_id_proc_info, . - __feroceon_old_id_proc_info
-#endif
-
- .type __88fr531_proc_info,#object
-__88fr531_proc_info:
- .long 0x56055310
- .long 0xfffffff0
+.macro feroceon_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache:req
+ .type __\name\()_proc_info,#object
+__\name\()_proc_info:
+ .long \cpu_val
+ .long \cpu_mask
.long PMD_TYPE_SECT | \
PMD_SECT_BUFFERABLE | \
PMD_SECT_CACHEABLE | \
@@ -640,59 +573,22 @@ __88fr531_proc_info:
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
- .long cpu_88fr531_name
+ .long \cpu_name
.long feroceon_processor_functions
.long v4wbi_tlb_fns
.long feroceon_user_fns
- .long feroceon_cache_fns
- .size __88fr531_proc_info, . - __88fr531_proc_info
+ .long \cache
+ .size __\name\()_proc_info, . - __\name\()_proc_info
+.endm
- .type __88fr571_proc_info,#object
-__88fr571_proc_info:
- .long 0x56155710
- .long 0xfffffff0
- .long PMD_TYPE_SECT | \
- PMD_SECT_BUFFERABLE | \
- PMD_SECT_CACHEABLE | \
- PMD_BIT4 | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- .long PMD_TYPE_SECT | \
- PMD_BIT4 | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- b __feroceon_setup
- .long cpu_arch_name
- .long cpu_elf_name
- .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
- .long cpu_88fr571_name
- .long feroceon_processor_functions
- .long v4wbi_tlb_fns
- .long feroceon_user_fns
- .long feroceon_range_cache_fns
- .size __88fr571_proc_info, . - __88fr571_proc_info
+#ifdef CONFIG_CPU_FEROCEON_OLD_ID
+ feroceon_proc_info feroceon_old_id, 0x41009260, 0xff00fff0, \
+ cpu_name=cpu_feroceon_name, cache=feroceon_cache_fns
+#endif
- .type __88fr131_proc_info,#object
-__88fr131_proc_info:
- .long 0x56251310
- .long 0xfffffff0
- .long PMD_TYPE_SECT | \
- PMD_SECT_BUFFERABLE | \
- PMD_SECT_CACHEABLE | \
- PMD_BIT4 | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- .long PMD_TYPE_SECT | \
- PMD_BIT4 | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- b __feroceon_setup
- .long cpu_arch_name
- .long cpu_elf_name
- .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
- .long cpu_88fr131_name
- .long feroceon_processor_functions
- .long v4wbi_tlb_fns
- .long feroceon_user_fns
- .long feroceon_range_cache_fns
- .size __88fr131_proc_info, . - __88fr131_proc_info
+ feroceon_proc_info 88fr531, 0x56055310, 0xfffffff0, cpu_88fr531_name, \
+ cache=feroceon_cache_fns
+ feroceon_proc_info 88fr571, 0x56155710, 0xfffffff0, cpu_88fr571_name, \
+ cache=feroceon_range_cache_fns
+ feroceon_proc_info 88fr131, 0x56251310, 0xfffffff0, cpu_88fr131_name, \
+ cache=feroceon_range_cache_fns
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index 34261f9486b9..307a4def8d3a 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -254,3 +254,71 @@
mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line
mcr p15, 0, ip, c7, c10, 4 @ data write barrier
.endm
+
+.macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0
+ .type \name\()_processor_functions, #object
+ .align 2
+ENTRY(\name\()_processor_functions)
+ .word \dabort
+ .word \pabort
+ .word cpu_\name\()_proc_init
+ .word cpu_\name\()_proc_fin
+ .word cpu_\name\()_reset
+ .word cpu_\name\()_do_idle
+ .word cpu_\name\()_dcache_clean_area
+ .word cpu_\name\()_switch_mm
+
+ .if \nommu
+ .word 0
+ .else
+ .word cpu_\name\()_set_pte_ext
+ .endif
+
+ .if \suspend
+ .word cpu_\name\()_suspend_size
+#ifdef CONFIG_PM_SLEEP
+ .word cpu_\name\()_do_suspend
+ .word cpu_\name\()_do_resume
+#else
+ .word 0
+ .word 0
+#endif
+ .else
+ .word 0
+ .word 0
+ .word 0
+ .endif
+
+ .size \name\()_processor_functions, . - \name\()_processor_functions
+.endm
+
+.macro define_cache_functions name:req
+ .align 2
+ .type \name\()_cache_fns, #object
+ENTRY(\name\()_cache_fns)
+ .long \name\()_flush_icache_all
+ .long \name\()_flush_kern_cache_all
+ .long \name\()_flush_user_cache_all
+ .long \name\()_flush_user_cache_range
+ .long \name\()_coherent_kern_range
+ .long \name\()_coherent_user_range
+ .long \name\()_flush_kern_dcache_area
+ .long \name\()_dma_map_area
+ .long \name\()_dma_unmap_area
+ .long \name\()_dma_flush_range
+ .size \name\()_cache_fns, . - \name\()_cache_fns
+.endm
+
+.macro define_tlb_functions name:req, flags_up:req, flags_smp
+ .type \name\()_tlb_fns, #object
+ENTRY(\name\()_tlb_fns)
+ .long \name\()_flush_user_tlb_range
+ .long \name\()_flush_kern_tlb_range
+ .ifnb \flags_smp
+ ALT_SMP(.long \flags_smp )
+ ALT_UP(.long \flags_up )
+ .else
+ .long \flags_up
+ .endif
+ .size \name\()_tlb_fns, . - \name\()_tlb_fns
+.endm
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S
index 9d4f2ae63370..db52b0fb14a0 100644
--- a/arch/arm/mm/proc-mohawk.S
+++ b/arch/arm/mm/proc-mohawk.S
@@ -93,6 +93,17 @@ ENTRY(cpu_mohawk_do_idle)
mov pc, lr
/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(mohawk_flush_icache_all)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mov pc, lr
+ENDPROC(mohawk_flush_icache_all)
+
+/*
* flush_user_cache_all()
*
* Clean and invalidate all cache entries in a particular
@@ -288,16 +299,8 @@ ENTRY(mohawk_dma_unmap_area)
mov pc, lr
ENDPROC(mohawk_dma_unmap_area)
-ENTRY(mohawk_cache_fns)
- .long mohawk_flush_kern_cache_all
- .long mohawk_flush_user_cache_all
- .long mohawk_flush_user_cache_range
- .long mohawk_coherent_kern_range
- .long mohawk_coherent_user_range
- .long mohawk_flush_kern_dcache_area
- .long mohawk_dma_map_area
- .long mohawk_dma_unmap_area
- .long mohawk_dma_flush_range
+ @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
+ define_cache_functions mohawk
ENTRY(cpu_mohawk_dcache_clean_area)
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -373,42 +376,14 @@ mohawk_crval:
__INITDATA
-/*
- * Purpose : Function pointers used to access above functions - all calls
- * come through these
- */
- .type mohawk_processor_functions, #object
-mohawk_processor_functions:
- .word v5t_early_abort
- .word legacy_pabort
- .word cpu_mohawk_proc_init
- .word cpu_mohawk_proc_fin
- .word cpu_mohawk_reset
- .word cpu_mohawk_do_idle
- .word cpu_mohawk_dcache_clean_area
- .word cpu_mohawk_switch_mm
- .word cpu_mohawk_set_pte_ext
- .word 0
- .word 0
- .word 0
- .size mohawk_processor_functions, . - mohawk_processor_functions
+ @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
+ define_processor_functions mohawk, dabort=v5t_early_abort, pabort=legacy_pabort
.section ".rodata"
- .type cpu_arch_name, #object
-cpu_arch_name:
- .asciz "armv5te"
- .size cpu_arch_name, . - cpu_arch_name
-
- .type cpu_elf_name, #object
-cpu_elf_name:
- .asciz "v5"
- .size cpu_elf_name, . - cpu_elf_name
-
- .type cpu_mohawk_name, #object
-cpu_mohawk_name:
- .asciz "Marvell 88SV331x"
- .size cpu_mohawk_name, . - cpu_mohawk_name
+ string cpu_arch_name, "armv5te"
+ string cpu_elf_name, "v5"
+ string cpu_mohawk_name, "Marvell 88SV331x"
.align
diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S
index 46f09ed16b98..d50ada26edd6 100644
--- a/arch/arm/mm/proc-sa110.S
+++ b/arch/arm/mm/proc-sa110.S
@@ -187,43 +187,14 @@ sa110_crval:
__INITDATA
-/*
- * Purpose : Function pointers used to access above functions - all calls
- * come through these
- */
-
- .type sa110_processor_functions, #object
-ENTRY(sa110_processor_functions)
- .word v4_early_abort
- .word legacy_pabort
- .word cpu_sa110_proc_init
- .word cpu_sa110_proc_fin
- .word cpu_sa110_reset
- .word cpu_sa110_do_idle
- .word cpu_sa110_dcache_clean_area
- .word cpu_sa110_switch_mm
- .word cpu_sa110_set_pte_ext
- .word 0
- .word 0
- .word 0
- .size sa110_processor_functions, . - sa110_processor_functions
+ @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
+ define_processor_functions sa110, dabort=v4_early_abort, pabort=legacy_pabort
.section ".rodata"
- .type cpu_arch_name, #object
-cpu_arch_name:
- .asciz "armv4"
- .size cpu_arch_name, . - cpu_arch_name
-
- .type cpu_elf_name, #object
-cpu_elf_name:
- .asciz "v4"
- .size cpu_elf_name, . - cpu_elf_name
-
- .type cpu_sa110_name, #object
-cpu_sa110_name:
- .asciz "StrongARM-110"
- .size cpu_sa110_name, . - cpu_sa110_name
+ string cpu_arch_name, "armv4"
+ string cpu_elf_name, "v4"
+ string cpu_sa110_name, "StrongARM-110"
.align
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index 184a9c997e36..07219c2ae114 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -34,7 +34,7 @@
*/
#define DCACHELINESIZE 32
- __INIT
+ .section .text
/*
* cpu_sa1100_proc_init()
@@ -45,8 +45,6 @@ ENTRY(cpu_sa1100_proc_init)
mcr p15, 0, r0, c9, c0, 5 @ Allow read-buffer operations from userland
mov pc, lr
- .section .text
-
/*
* cpu_sa1100_proc_fin()
*
@@ -200,9 +198,6 @@ ENTRY(cpu_sa1100_do_resume)
PMD_SECT_CACHEABLE | PMD_SECT_AP_WRITE
b cpu_resume_mmu
ENDPROC(cpu_sa1100_do_resume)
-#else
-#define cpu_sa1100_do_suspend 0
-#define cpu_sa1100_do_resume 0
#endif
__CPUINIT
@@ -236,59 +231,28 @@ sa1100_crval:
__INITDATA
/*
- * Purpose : Function pointers used to access above functions - all calls
- * come through these
- */
-
-/*
* SA1100 and SA1110 share the same function calls
*/
- .type sa1100_processor_functions, #object
-ENTRY(sa1100_processor_functions)
- .word v4_early_abort
- .word legacy_pabort
- .word cpu_sa1100_proc_init
- .word cpu_sa1100_proc_fin
- .word cpu_sa1100_reset
- .word cpu_sa1100_do_idle
- .word cpu_sa1100_dcache_clean_area
- .word cpu_sa1100_switch_mm
- .word cpu_sa1100_set_pte_ext
- .word cpu_sa1100_suspend_size
- .word cpu_sa1100_do_suspend
- .word cpu_sa1100_do_resume
- .size sa1100_processor_functions, . - sa1100_processor_functions
- .section ".rodata"
-
- .type cpu_arch_name, #object
-cpu_arch_name:
- .asciz "armv4"
- .size cpu_arch_name, . - cpu_arch_name
+ @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
+ define_processor_functions sa1100, dabort=v4_early_abort, pabort=legacy_pabort, suspend=1
- .type cpu_elf_name, #object
-cpu_elf_name:
- .asciz "v4"
- .size cpu_elf_name, . - cpu_elf_name
-
- .type cpu_sa1100_name, #object
-cpu_sa1100_name:
- .asciz "StrongARM-1100"
- .size cpu_sa1100_name, . - cpu_sa1100_name
+ .section ".rodata"
- .type cpu_sa1110_name, #object
-cpu_sa1110_name:
- .asciz "StrongARM-1110"
- .size cpu_sa1110_name, . - cpu_sa1110_name
+ string cpu_arch_name, "armv4"
+ string cpu_elf_name, "v4"
+ string cpu_sa1100_name, "StrongARM-1100"
+ string cpu_sa1110_name, "StrongARM-1110"
.align
.section ".proc.info.init", #alloc, #execinstr
- .type __sa1100_proc_info,#object
-__sa1100_proc_info:
- .long 0x4401a110
- .long 0xfffffff0
+.macro sa1100_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req
+ .type __\name\()_proc_info,#object
+__\name\()_proc_info:
+ .long \cpu_val
+ .long \cpu_mask
.long PMD_TYPE_SECT | \
PMD_SECT_BUFFERABLE | \
PMD_SECT_CACHEABLE | \
@@ -301,32 +265,13 @@ __sa1100_proc_info:
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT
- .long cpu_sa1100_name
+ .long \cpu_name
.long sa1100_processor_functions
.long v4wb_tlb_fns
.long v4_mc_user_fns
.long v4wb_cache_fns
- .size __sa1100_proc_info, . - __sa1100_proc_info
+ .size __\name\()_proc_info, . - __\name\()_proc_info
+.endm
- .type __sa1110_proc_info,#object
-__sa1110_proc_info:
- .long 0x6901b110
- .long 0xfffffff0
- .long PMD_TYPE_SECT | \
- PMD_SECT_BUFFERABLE | \
- PMD_SECT_CACHEABLE | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- .long PMD_TYPE_SECT | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- b __sa1100_setup
- .long cpu_arch_name
- .long cpu_elf_name
- .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT
- .long cpu_sa1110_name
- .long sa1100_processor_functions
- .long v4wb_tlb_fns
- .long v4_mc_user_fns
- .long v4wb_cache_fns
- .size __sa1110_proc_info, . - __sa1110_proc_info
+ sa1100_proc_info sa1100, 0x4401a110, 0xfffffff0, cpu_sa1100_name
+ sa1100_proc_info sa1110, 0x6901b110, 0xfffffff0, cpu_sa1110_name
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 1d2b8451bf25..219138d2f158 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -56,6 +56,11 @@ ENTRY(cpu_v6_proc_fin)
*/
.align 5
ENTRY(cpu_v6_reset)
+ mrc p15, 0, r1, c1, c0, 0 @ ctrl register
+ bic r1, r1, #0x1 @ ...............m
+ mcr p15, 0, r1, c1, c0, 0 @ disable MMU
+ mov r1, #0
+ mcr p15, 0, r1, c7, c5, 4 @ ISB
mov pc, r0
/*
@@ -164,16 +169,9 @@ ENDPROC(cpu_v6_do_resume)
cpu_resume_l1_flags:
ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_SMP)
ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_UP)
-#else
-#define cpu_v6_do_suspend 0
-#define cpu_v6_do_resume 0
#endif
-
- .type cpu_v6_name, #object
-cpu_v6_name:
- .asciz "ARMv6-compatible processor"
- .size cpu_v6_name, . - cpu_v6_name
+ string cpu_v6_name, "ARMv6-compatible processor"
.align
@@ -239,33 +237,13 @@ v6_crval:
__INITDATA
- .type v6_processor_functions, #object
-ENTRY(v6_processor_functions)
- .word v6_early_abort
- .word v6_pabort
- .word cpu_v6_proc_init
- .word cpu_v6_proc_fin
- .word cpu_v6_reset
- .word cpu_v6_do_idle
- .word cpu_v6_dcache_clean_area
- .word cpu_v6_switch_mm
- .word cpu_v6_set_pte_ext
- .word cpu_v6_suspend_size
- .word cpu_v6_do_suspend
- .word cpu_v6_do_resume
- .size v6_processor_functions, . - v6_processor_functions
+ @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
+ define_processor_functions v6, dabort=v6_early_abort, pabort=v6_pabort, suspend=1
.section ".rodata"
- .type cpu_arch_name, #object
-cpu_arch_name:
- .asciz "armv6"
- .size cpu_arch_name, . - cpu_arch_name
-
- .type cpu_elf_name, #object
-cpu_elf_name:
- .asciz "v6"
- .size cpu_elf_name, . - cpu_elf_name
+ string cpu_arch_name, "armv6"
+ string cpu_elf_name, "v6"
.align
.section ".proc.info.init", #alloc, #execinstr
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 089c0b5e454f..a30e78542ccf 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -58,9 +58,16 @@ ENDPROC(cpu_v7_proc_fin)
* to what would be the reset vector.
*
* - loc - location to jump to for soft reset
+ *
+ * This code must be executed using a flat identity mapping with
+ * caches disabled.
*/
.align 5
ENTRY(cpu_v7_reset)
+ mrc p15, 0, r1, c1, c0, 0 @ ctrl register
+ bic r1, r1, #0x1 @ ...............m
+ mcr p15, 0, r1, c1, c0, 0 @ disable MMU
+ isb
mov pc, r0
ENDPROC(cpu_v7_reset)
@@ -173,8 +180,7 @@ ENTRY(cpu_v7_set_pte_ext)
mov pc, lr
ENDPROC(cpu_v7_set_pte_ext)
-cpu_v7_name:
- .ascii "ARMv7 Processor"
+ string cpu_v7_name, "ARMv7 Processor"
.align
/*
@@ -257,9 +263,6 @@ ENDPROC(cpu_v7_do_resume)
cpu_resume_l1_flags:
ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_SMP)
ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_UP)
-#else
-#define cpu_v7_do_suspend 0
-#define cpu_v7_do_resume 0
#endif
__CPUINIT
@@ -279,13 +282,20 @@ cpu_resume_l1_flags:
* It is assumed that:
* - cache type register is implemented
*/
+__v7_ca5mp_setup:
__v7_ca9mp_setup:
+ mov r10, #(1 << 0) @ TLB ops broadcasting
+ b 1f
+__v7_ca15mp_setup:
+ mov r10, #0
+1:
#ifdef CONFIG_SMP
ALT_SMP(mrc p15, 0, r0, c1, c0, 1)
ALT_UP(mov r0, #(1 << 6)) @ fake it for UP
tst r0, #(1 << 6) @ SMP/nAMP mode enabled?
- orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and
- mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting
+ orreq r0, r0, #(1 << 6) @ Enable SMP/nAMP mode
+ orreq r0, r0, r10 @ Enable CPU-specific SMP bits
+ mcreq p15, 0, r0, c1, c0, 1
#endif
__v7_setup:
adr r12, __v7_setup_stack @ the local stack
@@ -411,94 +421,75 @@ __v7_setup_stack:
__INITDATA
- .type v7_processor_functions, #object
-ENTRY(v7_processor_functions)
- .word v7_early_abort
- .word v7_pabort
- .word cpu_v7_proc_init
- .word cpu_v7_proc_fin
- .word cpu_v7_reset
- .word cpu_v7_do_idle
- .word cpu_v7_dcache_clean_area
- .word cpu_v7_switch_mm
- .word cpu_v7_set_pte_ext
- .word cpu_v7_suspend_size
- .word cpu_v7_do_suspend
- .word cpu_v7_do_resume
- .size v7_processor_functions, . - v7_processor_functions
+ @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
+ define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
.section ".rodata"
- .type cpu_arch_name, #object
-cpu_arch_name:
- .asciz "armv7"
- .size cpu_arch_name, . - cpu_arch_name
-
- .type cpu_elf_name, #object
-cpu_elf_name:
- .asciz "v7"
- .size cpu_elf_name, . - cpu_elf_name
+ string cpu_arch_name, "armv7"
+ string cpu_elf_name, "v7"
.align
.section ".proc.info.init", #alloc, #execinstr
- .type __v7_ca9mp_proc_info, #object
-__v7_ca9mp_proc_info:
- .long 0x410fc090 @ Required ID value
- .long 0xff0ffff0 @ Mask for ID
- ALT_SMP(.long \
- PMD_TYPE_SECT | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ | \
- PMD_FLAGS_SMP)
- ALT_UP(.long \
- PMD_TYPE_SECT | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ | \
- PMD_FLAGS_UP)
- .long PMD_TYPE_SECT | \
- PMD_SECT_XN | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- W(b) __v7_ca9mp_setup
+ /*
+ * Standard v7 proc info content
+ */
+.macro __v7_proc initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0
+ ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \
+ PMD_FLAGS_SMP | \mm_mmuflags)
+ ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \
+ PMD_FLAGS_UP | \mm_mmuflags)
+ .long PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ | \io_mmuflags
+ W(b) \initfunc
.long cpu_arch_name
.long cpu_elf_name
- .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS
+ .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_FAST_MULT | \
+ HWCAP_EDSP | HWCAP_TLS | \hwcaps
.long cpu_v7_name
.long v7_processor_functions
.long v7wbi_tlb_fns
.long v6_user_fns
.long v7_cache_fns
+.endm
+
+ /*
+ * ARM Ltd. Cortex A5 processor.
+ */
+ .type __v7_ca5mp_proc_info, #object
+__v7_ca5mp_proc_info:
+ .long 0x410fc050
+ .long 0xff0ffff0
+ __v7_proc __v7_ca5mp_setup
+ .size __v7_ca5mp_proc_info, . - __v7_ca5mp_proc_info
+
+ /*
+ * ARM Ltd. Cortex A9 processor.
+ */
+ .type __v7_ca9mp_proc_info, #object
+__v7_ca9mp_proc_info:
+ .long 0x410fc090
+ .long 0xff0ffff0
+ __v7_proc __v7_ca9mp_setup
.size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info
/*
+ * ARM Ltd. Cortex A15 processor.
+ */
+ .type __v7_ca15mp_proc_info, #object
+__v7_ca15mp_proc_info:
+ .long 0x410fc0f0
+ .long 0xff0ffff0
+ __v7_proc __v7_ca15mp_setup, hwcaps = HWCAP_IDIV
+ .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info
+
+ /*
* Match any ARMv7 processor core.
*/
.type __v7_proc_info, #object
__v7_proc_info:
.long 0x000f0000 @ Required ID value
.long 0x000f0000 @ Mask for ID
- ALT_SMP(.long \
- PMD_TYPE_SECT | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ | \
- PMD_FLAGS_SMP)
- ALT_UP(.long \
- PMD_TYPE_SECT | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ | \
- PMD_FLAGS_UP)
- .long PMD_TYPE_SECT | \
- PMD_SECT_XN | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- W(b) __v7_setup
- .long cpu_arch_name
- .long cpu_elf_name
- .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS
- .long cpu_v7_name
- .long v7_processor_functions
- .long v7wbi_tlb_fns
- .long v6_user_fns
- .long v7_cache_fns
+ __v7_proc __v7_setup
.size __v7_proc_info, . - __v7_proc_info
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index 596213699f37..64f1fc7edf0a 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -335,17 +335,8 @@ ENTRY(xsc3_dma_unmap_area)
mov pc, lr
ENDPROC(xsc3_dma_unmap_area)
-ENTRY(xsc3_cache_fns)
- .long xsc3_flush_icache_all
- .long xsc3_flush_kern_cache_all
- .long xsc3_flush_user_cache_all
- .long xsc3_flush_user_cache_range
- .long xsc3_coherent_kern_range
- .long xsc3_coherent_user_range
- .long xsc3_flush_kern_dcache_area
- .long xsc3_dma_map_area
- .long xsc3_dma_unmap_area
- .long xsc3_dma_flush_range
+ @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
+ define_cache_functions xsc3
ENTRY(cpu_xsc3_dcache_clean_area)
1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line
@@ -454,9 +445,6 @@ ENTRY(cpu_xsc3_do_resume)
ldr r3, =0x542e @ section flags
b cpu_resume_mmu
ENDPROC(cpu_xsc3_do_resume)
-#else
-#define cpu_xsc3_do_suspend 0
-#define cpu_xsc3_do_resume 0
#endif
__CPUINIT
@@ -503,52 +491,24 @@ xsc3_crval:
__INITDATA
-/*
- * Purpose : Function pointers used to access above functions - all calls
- * come through these
- */
-
- .type xsc3_processor_functions, #object
-ENTRY(xsc3_processor_functions)
- .word v5t_early_abort
- .word legacy_pabort
- .word cpu_xsc3_proc_init
- .word cpu_xsc3_proc_fin
- .word cpu_xsc3_reset
- .word cpu_xsc3_do_idle
- .word cpu_xsc3_dcache_clean_area
- .word cpu_xsc3_switch_mm
- .word cpu_xsc3_set_pte_ext
- .word cpu_xsc3_suspend_size
- .word cpu_xsc3_do_suspend
- .word cpu_xsc3_do_resume
- .size xsc3_processor_functions, . - xsc3_processor_functions
+ @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
+ define_processor_functions xsc3, dabort=v5t_early_abort, pabort=legacy_pabort, suspend=1
.section ".rodata"
- .type cpu_arch_name, #object
-cpu_arch_name:
- .asciz "armv5te"
- .size cpu_arch_name, . - cpu_arch_name
-
- .type cpu_elf_name, #object
-cpu_elf_name:
- .asciz "v5"
- .size cpu_elf_name, . - cpu_elf_name
-
- .type cpu_xsc3_name, #object
-cpu_xsc3_name:
- .asciz "XScale-V3 based processor"
- .size cpu_xsc3_name, . - cpu_xsc3_name
+ string cpu_arch_name, "armv5te"
+ string cpu_elf_name, "v5"
+ string cpu_xsc3_name, "XScale-V3 based processor"
.align
.section ".proc.info.init", #alloc, #execinstr
- .type __xsc3_proc_info,#object
-__xsc3_proc_info:
- .long 0x69056000
- .long 0xffffe000
+.macro xsc3_proc_info name:req, cpu_val:req, cpu_mask:req
+ .type __\name\()_proc_info,#object
+__\name\()_proc_info:
+ .long \cpu_val
+ .long \cpu_mask
.long PMD_TYPE_SECT | \
PMD_SECT_BUFFERABLE | \
PMD_SECT_CACHEABLE | \
@@ -566,29 +526,10 @@ __xsc3_proc_info:
.long v4wbi_tlb_fns
.long xsc3_mc_user_fns
.long xsc3_cache_fns
- .size __xsc3_proc_info, . - __xsc3_proc_info
+ .size __\name\()_proc_info, . - __\name\()_proc_info
+.endm
-/* Note: PXA935 changed its implementor ID from Intel to Marvell */
+ xsc3_proc_info xsc3, 0x69056000, 0xffffe000
- .type __xsc3_pxa935_proc_info,#object
-__xsc3_pxa935_proc_info:
- .long 0x56056000
- .long 0xffffe000
- .long PMD_TYPE_SECT | \
- PMD_SECT_BUFFERABLE | \
- PMD_SECT_CACHEABLE | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- .long PMD_TYPE_SECT | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- b __xsc3_setup
- .long cpu_arch_name
- .long cpu_elf_name
- .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
- .long cpu_xsc3_name
- .long xsc3_processor_functions
- .long v4wbi_tlb_fns
- .long xsc3_mc_user_fns
- .long xsc3_cache_fns
- .size __xsc3_pxa935_proc_info, . - __xsc3_pxa935_proc_info
+/* Note: PXA935 changed its implementor ID from Intel to Marvell */
+ xsc3_proc_info xsc3_pxa935, 0x56056000, 0xffffe000
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index 42af97664c9d..fbc06e55b87a 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -390,12 +390,12 @@ ENDPROC(xscale_dma_map_area)
* - size - size of region
* - dir - DMA direction
*/
-ENTRY(xscale_dma_a0_map_area)
+ENTRY(xscale_80200_A0_A1_dma_map_area)
add r1, r1, r0
teq r2, #DMA_TO_DEVICE
beq xscale_dma_clean_range
b xscale_dma_flush_range
-ENDPROC(xscale_dma_a0_map_area)
+ENDPROC(xscale_80200_A0_A1_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
@@ -407,17 +407,8 @@ ENTRY(xscale_dma_unmap_area)
mov pc, lr
ENDPROC(xscale_dma_unmap_area)
-ENTRY(xscale_cache_fns)
- .long xscale_flush_icache_all
- .long xscale_flush_kern_cache_all
- .long xscale_flush_user_cache_all
- .long xscale_flush_user_cache_range
- .long xscale_coherent_kern_range
- .long xscale_coherent_user_range
- .long xscale_flush_kern_dcache_area
- .long xscale_dma_map_area
- .long xscale_dma_unmap_area
- .long xscale_dma_flush_range
+ @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
+ define_cache_functions xscale
/*
* On stepping A0/A1 of the 80200, invalidating D-cache by line doesn't
@@ -432,16 +423,28 @@ ENTRY(xscale_cache_fns)
* revision January 22, 2003, available at:
* http://www.intel.com/design/iio/specupdt/273415.htm
*/
-ENTRY(xscale_80200_A0_A1_cache_fns)
- .long xscale_flush_kern_cache_all
- .long xscale_flush_user_cache_all
- .long xscale_flush_user_cache_range
- .long xscale_coherent_kern_range
- .long xscale_coherent_user_range
- .long xscale_flush_kern_dcache_area
- .long xscale_dma_a0_map_area
- .long xscale_dma_unmap_area
- .long xscale_dma_flush_range
+.macro a0_alias basename
+ .globl xscale_80200_A0_A1_\basename
+ .type xscale_80200_A0_A1_\basename , %function
+ .equ xscale_80200_A0_A1_\basename , xscale_\basename
+.endm
+
+/*
+ * Most of the cache functions are unchanged for these processor revisions.
+ * Export suitable alias symbols for the unchanged functions:
+ */
+ a0_alias flush_icache_all
+ a0_alias flush_user_cache_all
+ a0_alias flush_kern_cache_all
+ a0_alias flush_user_cache_range
+ a0_alias coherent_kern_range
+ a0_alias coherent_user_range
+ a0_alias flush_kern_dcache_area
+ a0_alias dma_flush_range
+ a0_alias dma_unmap_area
+
+ @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
+ define_cache_functions xscale_80200_A0_A1
ENTRY(cpu_xscale_dcache_clean_area)
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -551,9 +554,6 @@ ENTRY(cpu_xscale_do_resume)
PMD_SECT_CACHEABLE | PMD_SECT_AP_WRITE
b cpu_resume_mmu
ENDPROC(cpu_xscale_do_resume)
-#else
-#define cpu_xscale_do_suspend 0
-#define cpu_xscale_do_resume 0
#endif
__CPUINIT
@@ -587,432 +587,74 @@ xscale_crval:
__INITDATA
-/*
- * Purpose : Function pointers used to access above functions - all calls
- * come through these
- */
-
- .type xscale_processor_functions, #object
-ENTRY(xscale_processor_functions)
- .word v5t_early_abort
- .word legacy_pabort
- .word cpu_xscale_proc_init
- .word cpu_xscale_proc_fin
- .word cpu_xscale_reset
- .word cpu_xscale_do_idle
- .word cpu_xscale_dcache_clean_area
- .word cpu_xscale_switch_mm
- .word cpu_xscale_set_pte_ext
- .word cpu_xscale_suspend_size
- .word cpu_xscale_do_suspend
- .word cpu_xscale_do_resume
- .size xscale_processor_functions, . - xscale_processor_functions
+ @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
+ define_processor_functions xscale, dabort=v5t_early_abort, pabort=legacy_pabort, suspend=1
.section ".rodata"
- .type cpu_arch_name, #object
-cpu_arch_name:
- .asciz "armv5te"
- .size cpu_arch_name, . - cpu_arch_name
-
- .type cpu_elf_name, #object
-cpu_elf_name:
- .asciz "v5"
- .size cpu_elf_name, . - cpu_elf_name
-
- .type cpu_80200_A0_A1_name, #object
-cpu_80200_A0_A1_name:
- .asciz "XScale-80200 A0/A1"
- .size cpu_80200_A0_A1_name, . - cpu_80200_A0_A1_name
-
- .type cpu_80200_name, #object
-cpu_80200_name:
- .asciz "XScale-80200"
- .size cpu_80200_name, . - cpu_80200_name
-
- .type cpu_80219_name, #object
-cpu_80219_name:
- .asciz "XScale-80219"
- .size cpu_80219_name, . - cpu_80219_name
-
- .type cpu_8032x_name, #object
-cpu_8032x_name:
- .asciz "XScale-IOP8032x Family"
- .size cpu_8032x_name, . - cpu_8032x_name
-
- .type cpu_8033x_name, #object
-cpu_8033x_name:
- .asciz "XScale-IOP8033x Family"
- .size cpu_8033x_name, . - cpu_8033x_name
-
- .type cpu_pxa250_name, #object
-cpu_pxa250_name:
- .asciz "XScale-PXA250"
- .size cpu_pxa250_name, . - cpu_pxa250_name
-
- .type cpu_pxa210_name, #object
-cpu_pxa210_name:
- .asciz "XScale-PXA210"
- .size cpu_pxa210_name, . - cpu_pxa210_name
-
- .type cpu_ixp42x_name, #object
-cpu_ixp42x_name:
- .asciz "XScale-IXP42x Family"
- .size cpu_ixp42x_name, . - cpu_ixp42x_name
-
- .type cpu_ixp43x_name, #object
-cpu_ixp43x_name:
- .asciz "XScale-IXP43x Family"
- .size cpu_ixp43x_name, . - cpu_ixp43x_name
-
- .type cpu_ixp46x_name, #object
-cpu_ixp46x_name:
- .asciz "XScale-IXP46x Family"
- .size cpu_ixp46x_name, . - cpu_ixp46x_name
-
- .type cpu_ixp2400_name, #object
-cpu_ixp2400_name:
- .asciz "XScale-IXP2400"
- .size cpu_ixp2400_name, . - cpu_ixp2400_name
-
- .type cpu_ixp2800_name, #object
-cpu_ixp2800_name:
- .asciz "XScale-IXP2800"
- .size cpu_ixp2800_name, . - cpu_ixp2800_name
-
- .type cpu_pxa255_name, #object
-cpu_pxa255_name:
- .asciz "XScale-PXA255"
- .size cpu_pxa255_name, . - cpu_pxa255_name
-
- .type cpu_pxa270_name, #object
-cpu_pxa270_name:
- .asciz "XScale-PXA270"
- .size cpu_pxa270_name, . - cpu_pxa270_name
+ string cpu_arch_name, "armv5te"
+ string cpu_elf_name, "v5"
+
+ string cpu_80200_A0_A1_name, "XScale-80200 A0/A1"
+ string cpu_80200_name, "XScale-80200"
+ string cpu_80219_name, "XScale-80219"
+ string cpu_8032x_name, "XScale-IOP8032x Family"
+ string cpu_8033x_name, "XScale-IOP8033x Family"
+ string cpu_pxa250_name, "XScale-PXA250"
+ string cpu_pxa210_name, "XScale-PXA210"
+ string cpu_ixp42x_name, "XScale-IXP42x Family"
+ string cpu_ixp43x_name, "XScale-IXP43x Family"
+ string cpu_ixp46x_name, "XScale-IXP46x Family"
+ string cpu_ixp2400_name, "XScale-IXP2400"
+ string cpu_ixp2800_name, "XScale-IXP2800"
+ string cpu_pxa255_name, "XScale-PXA255"
+ string cpu_pxa270_name, "XScale-PXA270"
.align
.section ".proc.info.init", #alloc, #execinstr
- .type __80200_A0_A1_proc_info,#object
-__80200_A0_A1_proc_info:
- .long 0x69052000
- .long 0xfffffffe
- .long PMD_TYPE_SECT | \
- PMD_SECT_BUFFERABLE | \
- PMD_SECT_CACHEABLE | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- .long PMD_TYPE_SECT | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- b __xscale_setup
- .long cpu_arch_name
- .long cpu_elf_name
- .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
- .long cpu_80200_name
- .long xscale_processor_functions
- .long v4wbi_tlb_fns
- .long xscale_mc_user_fns
- .long xscale_80200_A0_A1_cache_fns
- .size __80200_A0_A1_proc_info, . - __80200_A0_A1_proc_info
-
- .type __80200_proc_info,#object
-__80200_proc_info:
- .long 0x69052000
- .long 0xfffffff0
- .long PMD_TYPE_SECT | \
+.macro xscale_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache
+ .type __\name\()_proc_info,#object
+__\name\()_proc_info:
+ .long \cpu_val
+ .long \cpu_mask
+ .long PMD_TYPE_SECT | \
PMD_SECT_BUFFERABLE | \
PMD_SECT_CACHEABLE | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
- .long PMD_TYPE_SECT | \
+ .long PMD_TYPE_SECT | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
b __xscale_setup
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
- .long cpu_80200_name
+ .long \cpu_name
.long xscale_processor_functions
.long v4wbi_tlb_fns
.long xscale_mc_user_fns
- .long xscale_cache_fns
- .size __80200_proc_info, . - __80200_proc_info
-
- .type __80219_proc_info,#object
-__80219_proc_info:
- .long 0x69052e20
- .long 0xffffffe0
- .long PMD_TYPE_SECT | \
- PMD_SECT_BUFFERABLE | \
- PMD_SECT_CACHEABLE | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- .long PMD_TYPE_SECT | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- b __xscale_setup
- .long cpu_arch_name
- .long cpu_elf_name
- .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
- .long cpu_80219_name
- .long xscale_processor_functions
- .long v4wbi_tlb_fns
- .long xscale_mc_user_fns
- .long xscale_cache_fns
- .size __80219_proc_info, . - __80219_proc_info
-
- .type __8032x_proc_info,#object
-__8032x_proc_info:
- .long 0x69052420
- .long 0xfffff7e0
- .long PMD_TYPE_SECT | \
- PMD_SECT_BUFFERABLE | \
- PMD_SECT_CACHEABLE | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- .long PMD_TYPE_SECT | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- b __xscale_setup
- .long cpu_arch_name
- .long cpu_elf_name
- .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
- .long cpu_8032x_name
- .long xscale_processor_functions
- .long v4wbi_tlb_fns
- .long xscale_mc_user_fns
- .long xscale_cache_fns
- .size __8032x_proc_info, . - __8032x_proc_info
-
- .type __8033x_proc_info,#object
-__8033x_proc_info:
- .long 0x69054010
- .long 0xfffffd30
- .long PMD_TYPE_SECT | \
- PMD_SECT_BUFFERABLE | \
- PMD_SECT_CACHEABLE | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- .long PMD_TYPE_SECT | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- b __xscale_setup
- .long cpu_arch_name
- .long cpu_elf_name
- .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
- .long cpu_8033x_name
- .long xscale_processor_functions
- .long v4wbi_tlb_fns
- .long xscale_mc_user_fns
- .long xscale_cache_fns
- .size __8033x_proc_info, . - __8033x_proc_info
-
- .type __pxa250_proc_info,#object
-__pxa250_proc_info:
- .long 0x69052100
- .long 0xfffff7f0
- .long PMD_TYPE_SECT | \
- PMD_SECT_BUFFERABLE | \
- PMD_SECT_CACHEABLE | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- .long PMD_TYPE_SECT | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- b __xscale_setup
- .long cpu_arch_name
- .long cpu_elf_name
- .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
- .long cpu_pxa250_name
- .long xscale_processor_functions
- .long v4wbi_tlb_fns
- .long xscale_mc_user_fns
- .long xscale_cache_fns
- .size __pxa250_proc_info, . - __pxa250_proc_info
-
- .type __pxa210_proc_info,#object
-__pxa210_proc_info:
- .long 0x69052120
- .long 0xfffff3f0
- .long PMD_TYPE_SECT | \
- PMD_SECT_BUFFERABLE | \
- PMD_SECT_CACHEABLE | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- .long PMD_TYPE_SECT | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- b __xscale_setup
- .long cpu_arch_name
- .long cpu_elf_name
- .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
- .long cpu_pxa210_name
- .long xscale_processor_functions
- .long v4wbi_tlb_fns
- .long xscale_mc_user_fns
- .long xscale_cache_fns
- .size __pxa210_proc_info, . - __pxa210_proc_info
-
- .type __ixp2400_proc_info, #object
-__ixp2400_proc_info:
- .long 0x69054190
- .long 0xfffffff0
- .long PMD_TYPE_SECT | \
- PMD_SECT_BUFFERABLE | \
- PMD_SECT_CACHEABLE | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- .long PMD_TYPE_SECT | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- b __xscale_setup
- .long cpu_arch_name
- .long cpu_elf_name
- .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
- .long cpu_ixp2400_name
- .long xscale_processor_functions
- .long v4wbi_tlb_fns
- .long xscale_mc_user_fns
- .long xscale_cache_fns
- .size __ixp2400_proc_info, . - __ixp2400_proc_info
-
- .type __ixp2800_proc_info, #object
-__ixp2800_proc_info:
- .long 0x690541a0
- .long 0xfffffff0
- .long PMD_TYPE_SECT | \
- PMD_SECT_BUFFERABLE | \
- PMD_SECT_CACHEABLE | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- .long PMD_TYPE_SECT | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- b __xscale_setup
- .long cpu_arch_name
- .long cpu_elf_name
- .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
- .long cpu_ixp2800_name
- .long xscale_processor_functions
- .long v4wbi_tlb_fns
- .long xscale_mc_user_fns
- .long xscale_cache_fns
- .size __ixp2800_proc_info, . - __ixp2800_proc_info
-
- .type __ixp42x_proc_info, #object
-__ixp42x_proc_info:
- .long 0x690541c0
- .long 0xffffffc0
- .long PMD_TYPE_SECT | \
- PMD_SECT_BUFFERABLE | \
- PMD_SECT_CACHEABLE | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- .long PMD_TYPE_SECT | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- b __xscale_setup
- .long cpu_arch_name
- .long cpu_elf_name
- .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
- .long cpu_ixp42x_name
- .long xscale_processor_functions
- .long v4wbi_tlb_fns
- .long xscale_mc_user_fns
- .long xscale_cache_fns
- .size __ixp42x_proc_info, . - __ixp42x_proc_info
-
- .type __ixp43x_proc_info, #object
-__ixp43x_proc_info:
- .long 0x69054040
- .long 0xfffffff0
- .long PMD_TYPE_SECT | \
- PMD_SECT_BUFFERABLE | \
- PMD_SECT_CACHEABLE | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- .long PMD_TYPE_SECT | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- b __xscale_setup
- .long cpu_arch_name
- .long cpu_elf_name
- .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
- .long cpu_ixp43x_name
- .long xscale_processor_functions
- .long v4wbi_tlb_fns
- .long xscale_mc_user_fns
- .long xscale_cache_fns
- .size __ixp43x_proc_info, . - __ixp43x_proc_info
-
- .type __ixp46x_proc_info, #object
-__ixp46x_proc_info:
- .long 0x69054200
- .long 0xffffff00
- .long PMD_TYPE_SECT | \
- PMD_SECT_BUFFERABLE | \
- PMD_SECT_CACHEABLE | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- .long PMD_TYPE_SECT | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- b __xscale_setup
- .long cpu_arch_name
- .long cpu_elf_name
- .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
- .long cpu_ixp46x_name
- .long xscale_processor_functions
- .long v4wbi_tlb_fns
- .long xscale_mc_user_fns
- .long xscale_cache_fns
- .size __ixp46x_proc_info, . - __ixp46x_proc_info
-
- .type __pxa255_proc_info,#object
-__pxa255_proc_info:
- .long 0x69052d00
- .long 0xfffffff0
- .long PMD_TYPE_SECT | \
- PMD_SECT_BUFFERABLE | \
- PMD_SECT_CACHEABLE | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- .long PMD_TYPE_SECT | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- b __xscale_setup
- .long cpu_arch_name
- .long cpu_elf_name
- .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
- .long cpu_pxa255_name
- .long xscale_processor_functions
- .long v4wbi_tlb_fns
- .long xscale_mc_user_fns
- .long xscale_cache_fns
- .size __pxa255_proc_info, . - __pxa255_proc_info
-
- .type __pxa270_proc_info,#object
-__pxa270_proc_info:
- .long 0x69054110
- .long 0xfffffff0
- .long PMD_TYPE_SECT | \
- PMD_SECT_BUFFERABLE | \
- PMD_SECT_CACHEABLE | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- .long PMD_TYPE_SECT | \
- PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
- b __xscale_setup
- .long cpu_arch_name
- .long cpu_elf_name
- .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
- .long cpu_pxa270_name
- .long xscale_processor_functions
- .long v4wbi_tlb_fns
- .long xscale_mc_user_fns
- .long xscale_cache_fns
- .size __pxa270_proc_info, . - __pxa270_proc_info
-
+ .ifb \cache
+ .long xscale_cache_fns
+ .else
+ .long \cache
+ .endif
+ .size __\name\()_proc_info, . - __\name\()_proc_info
+.endm
+
+ xscale_proc_info 80200_A0_A1, 0x69052000, 0xfffffffe, cpu_80200_name, \
+ cache=xscale_80200_A0_A1_cache_fns
+ xscale_proc_info 80200, 0x69052000, 0xfffffff0, cpu_80200_name
+ xscale_proc_info 80219, 0x69052e20, 0xffffffe0, cpu_80219_name
+ xscale_proc_info 8032x, 0x69052420, 0xfffff7e0, cpu_8032x_name
+ xscale_proc_info 8033x, 0x69054010, 0xfffffd30, cpu_8033x_name
+ xscale_proc_info pxa250, 0x69052100, 0xfffff7f0, cpu_pxa250_name
+ xscale_proc_info pxa210, 0x69052120, 0xfffff3f0, cpu_pxa210_name
+ xscale_proc_info ixp2400, 0x69054190, 0xfffffff0, cpu_ixp2400_name
+ xscale_proc_info ixp2800, 0x690541a0, 0xfffffff0, cpu_ixp2800_name
+ xscale_proc_info ixp42x, 0x690541c0, 0xffffffc0, cpu_ixp42x_name
+ xscale_proc_info ixp43x, 0x69054040, 0xfffffff0, cpu_ixp43x_name
+ xscale_proc_info ixp46x, 0x69054200, 0xffffff00, cpu_ixp46x_name
+ xscale_proc_info pxa255, 0x69052d00, 0xfffffff0, cpu_pxa255_name
+ xscale_proc_info pxa270, 0x69054110, 0xfffffff0, cpu_pxa270_name
diff --git a/arch/arm/mm/tlb-fa.S b/arch/arm/mm/tlb-fa.S
index 9694f1f6f485..d3ddcf9a76ca 100644
--- a/arch/arm/mm/tlb-fa.S
+++ b/arch/arm/mm/tlb-fa.S
@@ -46,7 +46,6 @@ ENTRY(fa_flush_user_tlb_range)
add r0, r0, #PAGE_SZ
cmp r0, r1
blo 1b
- mcr p15, 0, r3, c7, c5, 6 @ invalidate BTB
mcr p15, 0, r3, c7, c10, 4 @ data write barrier
mov pc, lr
@@ -60,16 +59,11 @@ ENTRY(fa_flush_kern_tlb_range)
add r0, r0, #PAGE_SZ
cmp r0, r1
blo 1b
- mcr p15, 0, r3, c7, c5, 6 @ invalidate BTB
mcr p15, 0, r3, c7, c10, 4 @ data write barrier
- mcr p15, 0, r3, c7, c5, 4 @ prefetch flush
+ mcr p15, 0, r3, c7, c5, 4 @ prefetch flush (isb)
mov pc, lr
__INITDATA
- .type fa_tlb_fns, #object
-ENTRY(fa_tlb_fns)
- .long fa_flush_user_tlb_range
- .long fa_flush_kern_tlb_range
- .long fa_tlb_flags
- .size fa_tlb_fns, . - fa_tlb_fns
+ /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
+ define_tlb_functions fa, fa_tlb_flags
diff --git a/arch/arm/mm/tlb-v3.S b/arch/arm/mm/tlb-v3.S
index c10786ec8e0a..d253995ec4ca 100644
--- a/arch/arm/mm/tlb-v3.S
+++ b/arch/arm/mm/tlb-v3.S
@@ -44,9 +44,5 @@ ENTRY(v3_flush_kern_tlb_range)
__INITDATA
- .type v3_tlb_fns, #object
-ENTRY(v3_tlb_fns)
- .long v3_flush_user_tlb_range
- .long v3_flush_kern_tlb_range
- .long v3_tlb_flags
- .size v3_tlb_fns, . - v3_tlb_fns
+ /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
+ define_tlb_functions v3, v3_tlb_flags
diff --git a/arch/arm/mm/tlb-v4.S b/arch/arm/mm/tlb-v4.S
index d6c94457c2b9..17a025ade573 100644
--- a/arch/arm/mm/tlb-v4.S
+++ b/arch/arm/mm/tlb-v4.S
@@ -57,9 +57,5 @@ ENTRY(v4_flush_user_tlb_range)
__INITDATA
- .type v4_tlb_fns, #object
-ENTRY(v4_tlb_fns)
- .long v4_flush_user_tlb_range
- .long v4_flush_kern_tlb_range
- .long v4_tlb_flags
- .size v4_tlb_fns, . - v4_tlb_fns
+ /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
+ define_tlb_functions v4, v4_tlb_flags
diff --git a/arch/arm/mm/tlb-v4wb.S b/arch/arm/mm/tlb-v4wb.S
index cb829ca7845d..c04598fa4d4a 100644
--- a/arch/arm/mm/tlb-v4wb.S
+++ b/arch/arm/mm/tlb-v4wb.S
@@ -69,9 +69,5 @@ ENTRY(v4wb_flush_kern_tlb_range)
__INITDATA
- .type v4wb_tlb_fns, #object
-ENTRY(v4wb_tlb_fns)
- .long v4wb_flush_user_tlb_range
- .long v4wb_flush_kern_tlb_range
- .long v4wb_tlb_flags
- .size v4wb_tlb_fns, . - v4wb_tlb_fns
+ /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
+ define_tlb_functions v4wb, v4wb_tlb_flags
diff --git a/arch/arm/mm/tlb-v4wbi.S b/arch/arm/mm/tlb-v4wbi.S
index 60cfc4a25dd5..1f6062b6c1c1 100644
--- a/arch/arm/mm/tlb-v4wbi.S
+++ b/arch/arm/mm/tlb-v4wbi.S
@@ -60,9 +60,5 @@ ENTRY(v4wbi_flush_kern_tlb_range)
__INITDATA
- .type v4wbi_tlb_fns, #object
-ENTRY(v4wbi_tlb_fns)
- .long v4wbi_flush_user_tlb_range
- .long v4wbi_flush_kern_tlb_range
- .long v4wbi_tlb_flags
- .size v4wbi_tlb_fns, . - v4wbi_tlb_fns
+ /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
+ define_tlb_functions v4wbi, v4wbi_tlb_flags
diff --git a/arch/arm/mm/tlb-v6.S b/arch/arm/mm/tlb-v6.S
index 73d7d89b04c4..eca07f550a0b 100644
--- a/arch/arm/mm/tlb-v6.S
+++ b/arch/arm/mm/tlb-v6.S
@@ -54,7 +54,6 @@ ENTRY(v6wbi_flush_user_tlb_range)
add r0, r0, #PAGE_SZ
cmp r0, r1
blo 1b
- mcr p15, 0, ip, c7, c5, 6 @ flush BTAC/BTB
mcr p15, 0, ip, c7, c10, 4 @ data synchronization barrier
mov pc, lr
@@ -83,16 +82,11 @@ ENTRY(v6wbi_flush_kern_tlb_range)
add r0, r0, #PAGE_SZ
cmp r0, r1
blo 1b
- mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
mcr p15, 0, r2, c7, c10, 4 @ data synchronization barrier
- mcr p15, 0, r2, c7, c5, 4 @ prefetch flush
+ mcr p15, 0, r2, c7, c5, 4 @ prefetch flush (isb)
mov pc, lr
__INIT
- .type v6wbi_tlb_fns, #object
-ENTRY(v6wbi_tlb_fns)
- .long v6wbi_flush_user_tlb_range
- .long v6wbi_flush_kern_tlb_range
- .long v6wbi_tlb_flags
- .size v6wbi_tlb_fns, . - v6wbi_tlb_fns
+ /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
+ define_tlb_functions v6wbi, v6wbi_tlb_flags
diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
index 53cd5b454673..845f461f8ec1 100644
--- a/arch/arm/mm/tlb-v7.S
+++ b/arch/arm/mm/tlb-v7.S
@@ -48,9 +48,6 @@ ENTRY(v7wbi_flush_user_tlb_range)
add r0, r0, #PAGE_SZ
cmp r0, r1
blo 1b
- mov ip, #0
- ALT_SMP(mcr p15, 0, ip, c7, c1, 6) @ flush BTAC/BTB Inner Shareable
- ALT_UP(mcr p15, 0, ip, c7, c5, 6) @ flush BTAC/BTB
dsb
mov pc, lr
ENDPROC(v7wbi_flush_user_tlb_range)
@@ -75,9 +72,6 @@ ENTRY(v7wbi_flush_kern_tlb_range)
add r0, r0, #PAGE_SZ
cmp r0, r1
blo 1b
- mov r2, #0
- ALT_SMP(mcr p15, 0, r2, c7, c1, 6) @ flush BTAC/BTB Inner Shareable
- ALT_UP(mcr p15, 0, r2, c7, c5, 6) @ flush BTAC/BTB
dsb
isb
mov pc, lr
@@ -85,10 +79,5 @@ ENDPROC(v7wbi_flush_kern_tlb_range)
__INIT
- .type v7wbi_tlb_fns, #object
-ENTRY(v7wbi_tlb_fns)
- .long v7wbi_flush_user_tlb_range
- .long v7wbi_flush_kern_tlb_range
- ALT_SMP(.long v7wbi_tlb_flags_smp)
- ALT_UP(.long v7wbi_tlb_flags_up)
- .size v7wbi_tlb_fns, . - v7wbi_tlb_fns
+ /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
+ define_tlb_functions v7wbi, v7wbi_tlb_flags_up, flags_smp=v7wbi_tlb_flags_smp
diff --git a/arch/arm/plat-mxc/avic.c b/arch/arm/plat-mxc/avic.c
index 09e2bd0fcdca..55d2534ec727 100644
--- a/arch/arm/plat-mxc/avic.c
+++ b/arch/arm/plat-mxc/avic.c
@@ -46,6 +46,8 @@
#define AVIC_FIPNDH 0x60 /* fast int pending high */
#define AVIC_FIPNDL 0x64 /* fast int pending low */
+#define AVIC_NUM_IRQS 64
+
void __iomem *avic_base;
#ifdef CONFIG_MXC_IRQ_PRIOR
@@ -54,7 +56,7 @@ static int avic_irq_set_priority(unsigned char irq, unsigned char prio)
unsigned int temp;
unsigned int mask = 0x0F << irq % 8 * 4;
- if (irq >= MXC_INTERNAL_IRQS)
+ if (irq >= AVIC_NUM_IRQS)
return -EINVAL;;
temp = __raw_readl(avic_base + AVIC_NIPRIORITY(irq / 8));
@@ -72,14 +74,14 @@ static int avic_set_irq_fiq(unsigned int irq, unsigned int type)
{
unsigned int irqt;
- if (irq >= MXC_INTERNAL_IRQS)
+ if (irq >= AVIC_NUM_IRQS)
return -EINVAL;
- if (irq < MXC_INTERNAL_IRQS / 2) {
+ if (irq < AVIC_NUM_IRQS / 2) {
irqt = __raw_readl(avic_base + AVIC_INTTYPEL) & ~(1 << irq);
__raw_writel(irqt | (!!type << irq), avic_base + AVIC_INTTYPEL);
} else {
- irq -= MXC_INTERNAL_IRQS / 2;
+ irq -= AVIC_NUM_IRQS / 2;
irqt = __raw_readl(avic_base + AVIC_INTTYPEH) & ~(1 << irq);
__raw_writel(irqt | (!!type << irq), avic_base + AVIC_INTTYPEH);
}
@@ -138,7 +140,7 @@ void __init mxc_init_irq(void __iomem *irqbase)
/* all IRQ no FIQ */
__raw_writel(0, avic_base + AVIC_INTTYPEH);
__raw_writel(0, avic_base + AVIC_INTTYPEL);
- for (i = 0; i < MXC_INTERNAL_IRQS; i++) {
+ for (i = 0; i < AVIC_NUM_IRQS; i++) {
irq_set_chip_and_handler(i, &mxc_avic_chip.base,
handle_level_irq);
set_irq_flags(i, IRQF_VALID);
diff --git a/arch/arm/plat-mxc/devices/platform-imx-dma.c b/arch/arm/plat-mxc/devices/platform-imx-dma.c
index b130f60ca6b7..c64f015e031b 100644
--- a/arch/arm/plat-mxc/devices/platform-imx-dma.c
+++ b/arch/arm/plat-mxc/devices/platform-imx-dma.c
@@ -33,22 +33,22 @@ struct imx_imx_sdma_data {
#ifdef CONFIG_SOC_IMX25
struct imx_imx_sdma_data imx25_imx_sdma_data __initconst =
- imx_imx_sdma_data_entry_single(MX25, 1, "imx25", 0);
+ imx_imx_sdma_data_entry_single(MX25, 2, "imx25", 1);
#endif /* ifdef CONFIG_SOC_IMX25 */
#ifdef CONFIG_SOC_IMX31
struct imx_imx_sdma_data imx31_imx_sdma_data __initdata =
- imx_imx_sdma_data_entry_single(MX31, 1, "imx31", 0);
+ imx_imx_sdma_data_entry_single(MX31, 1, "imx31", 1);
#endif /* ifdef CONFIG_SOC_IMX31 */
#ifdef CONFIG_SOC_IMX35
struct imx_imx_sdma_data imx35_imx_sdma_data __initdata =
- imx_imx_sdma_data_entry_single(MX35, 2, "imx35", 0);
+ imx_imx_sdma_data_entry_single(MX35, 2, "imx35", 1);
#endif /* ifdef CONFIG_SOC_IMX35 */
#ifdef CONFIG_SOC_IMX51
struct imx_imx_sdma_data imx51_imx_sdma_data __initconst =
- imx_imx_sdma_data_entry_single(MX51, 2, "imx51", 0);
+ imx_imx_sdma_data_entry_single(MX51, 2, "imx51", 1);
#endif /* ifdef CONFIG_SOC_IMX51 */
static struct platform_device __init __maybe_unused *imx_add_imx_sdma(
@@ -57,7 +57,7 @@ static struct platform_device __init __maybe_unused *imx_add_imx_sdma(
struct resource res[] = {
{
.start = data->iobase,
- .end = data->iobase + SZ_4K - 1,
+ .end = data->iobase + SZ_16K - 1,
.flags = IORESOURCE_MEM,
}, {
.start = data->irq,
@@ -77,7 +77,7 @@ static struct platform_device __init __maybe_unused *imx_add_imx_dma(void)
}
#ifdef CONFIG_ARCH_MX25
-static struct sdma_script_start_addrs addr_imx25_to1 = {
+static struct sdma_script_start_addrs addr_imx25 = {
.ap_2_ap_addr = 729,
.uart_2_mcu_addr = 904,
.per_2_app_addr = 1255,
@@ -165,7 +165,7 @@ static int __init imxXX_add_imx_dma(void)
#if defined(CONFIG_SOC_IMX25)
if (cpu_is_mx25()) {
- imx25_imx_sdma_data.pdata.script_addrs = &addr_imx25_to1;
+ imx25_imx_sdma_data.pdata.script_addrs = &addr_imx25;
ret = imx_add_imx_sdma(&imx25_imx_sdma_data);
} else
#endif
diff --git a/arch/arm/plat-mxc/devices/platform-imx-ssi.c b/arch/arm/plat-mxc/devices/platform-imx-ssi.c
index 2569c8d8a2ef..66b8593e9b69 100644
--- a/arch/arm/plat-mxc/devices/platform-imx-ssi.c
+++ b/arch/arm/plat-mxc/devices/platform-imx-ssi.c
@@ -69,7 +69,7 @@ const struct imx_imx_ssi_data imx35_imx_ssi_data[] __initconst = {
#ifdef CONFIG_SOC_IMX51
const struct imx_imx_ssi_data imx51_imx_ssi_data[] __initconst = {
#define imx51_imx_ssi_data_entry(_id, _hwid) \
- imx_imx_ssi_data_entry(MX51, _id, _hwid, SZ_4K)
+ imx_imx_ssi_data_entry(MX51, _id, _hwid, SZ_16K)
imx51_imx_ssi_data_entry(0, 1),
imx51_imx_ssi_data_entry(1, 2),
imx51_imx_ssi_data_entry(2, 3),
diff --git a/arch/arm/plat-mxc/include/mach/debug-macro.S b/arch/arm/plat-mxc/include/mach/debug-macro.S
index 8e8d175e5077..91fc7cdb5dc9 100644
--- a/arch/arm/plat-mxc/include/mach/debug-macro.S
+++ b/arch/arm/plat-mxc/include/mach/debug-macro.S
@@ -12,32 +12,32 @@
*/
#include <mach/hardware.h>
-#ifdef CONFIG_ARCH_MX1
+#ifdef CONFIG_SOC_IMX1
#define UART_PADDR MX1_UART1_BASE_ADDR
#endif
-#ifdef CONFIG_ARCH_MX25
+#ifdef CONFIG_SOC_IMX25
#ifdef UART_PADDR
#error "CONFIG_DEBUG_LL is incompatible with multiple archs"
#endif
#define UART_PADDR MX25_UART1_BASE_ADDR
#endif
-#ifdef CONFIG_ARCH_MX2
+#if defined(CONFIG_SOC_IMX21) || defined (CONFIG_SOC_IMX27)
#ifdef UART_PADDR
#error "CONFIG_DEBUG_LL is incompatible with multiple archs"
#endif
#define UART_PADDR MX2x_UART1_BASE_ADDR
#endif
-#ifdef CONFIG_ARCH_MX3
+#if defined(CONFIG_SOC_IMX31) || defined(CONFIG_SOC_IMX35)
#ifdef UART_PADDR
#error "CONFIG_DEBUG_LL is incompatible with multiple archs"
#endif
#define UART_PADDR MX3x_UART1_BASE_ADDR
#endif
-#ifdef CONFIG_ARCH_MX5
+#ifdef CONFIG_SOC_IMX51
#ifdef UART_PADDR
#error "CONFIG_DEBUG_LL is incompatible with multiple archs"
#endif
diff --git a/arch/arm/plat-mxc/include/mach/entry-macro.S b/arch/arm/plat-mxc/include/mach/entry-macro.S
index 2e49e71b1b98..066d464d322d 100644
--- a/arch/arm/plat-mxc/include/mach/entry-macro.S
+++ b/arch/arm/plat-mxc/include/mach/entry-macro.S
@@ -78,7 +78,3 @@
movs \irqnr, \irqnr
#endif
.endm
-
- @ irq priority table (not used)
- .macro irq_prio_table
- .endm
diff --git a/arch/arm/plat-mxc/include/mach/hardware.h b/arch/arm/plat-mxc/include/mach/hardware.h
index 67d3e2bed065..a8bfd565dcad 100644
--- a/arch/arm/plat-mxc/include/mach/hardware.h
+++ b/arch/arm/plat-mxc/include/mach/hardware.h
@@ -97,35 +97,17 @@
#include <mach/mxc.h>
-#ifdef CONFIG_ARCH_MX5
#include <mach/mx50.h>
#include <mach/mx51.h>
#include <mach/mx53.h>
-#endif
-
-#ifdef CONFIG_ARCH_MX3
#include <mach/mx3x.h>
#include <mach/mx31.h>
#include <mach/mx35.h>
-#endif
-
-#ifdef CONFIG_ARCH_MX2
-# include <mach/mx2x.h>
-# ifdef CONFIG_MACH_MX21
-# include <mach/mx21.h>
-# endif
-# ifdef CONFIG_MACH_MX27
-# include <mach/mx27.h>
-# endif
-#endif
-
-#ifdef CONFIG_ARCH_MX1
-# include <mach/mx1.h>
-#endif
-
-#ifdef CONFIG_ARCH_MX25
-# include <mach/mx25.h>
-#endif
+#include <mach/mx2x.h>
+#include <mach/mx21.h>
+#include <mach/mx27.h>
+#include <mach/mx1.h>
+#include <mach/mx25.h>
#define imx_map_entry(soc, name, _type) { \
.virtual = soc ## _IO_P2V(soc ## _ ## name ## _BASE_ADDR), \
diff --git a/arch/arm/plat-mxc/include/mach/iomux-mx25.h b/arch/arm/plat-mxc/include/mach/iomux-mx25.h
index 2e5244de7ff5..bf64e1e594ed 100644
--- a/arch/arm/plat-mxc/include/mach/iomux-mx25.h
+++ b/arch/arm/plat-mxc/include/mach/iomux-mx25.h
@@ -457,7 +457,7 @@
#define MX25_PAD_GPIO_A__USBOTG_PWR IOMUX_PAD(0x3f0, 0x1f4, 0x12, 0, 0, PAD_CTL_PKE)
#define MX25_PAD_GPIO_B__GPIO_B IOMUX_PAD(0x3f4, 0x1f8, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_GPIO_B__CAN1_RX IOMUX_PAD(0x3f4, 0x1f8, 0x16, 0x480, 1, PAD_CTL_PUS_22K)
+#define MX25_PAD_GPIO_B__CAN1_RX IOMUX_PAD(0x3f4, 0x1f8, 0x16, 0x480, 1, PAD_CTL_PUS_22K_UP)
#define MX25_PAD_GPIO_B__USBOTG_OC IOMUX_PAD(0x3f4, 0x1f8, 0x12, 0x57c, 1, PAD_CTL_PUS_100K_UP)
#define MX25_PAD_GPIO_C__GPIO_C IOMUX_PAD(0x3f8, 0x1fc, 0x10, 0, 0, NO_PAD_CTRL)
diff --git a/arch/arm/plat-mxc/include/mach/iomux-mx53.h b/arch/arm/plat-mxc/include/mach/iomux-mx53.h
index e95d9cb8aeb7..9440b9e00e89 100644
--- a/arch/arm/plat-mxc/include/mach/iomux-mx53.h
+++ b/arch/arm/plat-mxc/include/mach/iomux-mx53.h
@@ -39,10 +39,10 @@
#define _MX53_PAD_GPIO_19__ECSPI1_RDY IOMUX_PAD(0x348, 0x20, 5, 0x0, 0, 0)
#define _MX53_PAD_GPIO_19__FEC_TDATA_3 IOMUX_PAD(0x348, 0x20, 6, 0x0, 0, 0)
#define _MX53_PAD_GPIO_19__SRC_INT_BOOT IOMUX_PAD(0x348, 0x20,7, 0x0, 0, 0)
-#define _MX53_PAD_KEY_COL0__KPP_COL_0 IOMUX_PAD(0x34C, 0x24, o, 0x0, 0, 0)
+#define _MX53_PAD_KEY_COL0__KPP_COL_0 IOMUX_PAD(0x34C, 0x24, 0, 0x0, 0, 0)
#define _MX53_PAD_KEY_COL0__GPIO4_6 IOMUX_PAD(0x34C, 0x24, 1, 0x0, 0, 0)
#define _MX53_PAD_KEY_COL0__AUDMUX_AUD5_TXC IOMUX_PAD(0x34C, 0x24, 2, 0x758, 0, 0)
-#define _MX53_PAD_KEY_COL0__UART4_TXD_MUX IOMUX_PAD(0x34C, 0x24, 4, 0x890, 0, 0)
+#define _MX53_PAD_KEY_COL0__UART4_TXD_MUX IOMUX_PAD(0x34C, 0x24, 4, 0x0, 0, 0)
#define _MX53_PAD_KEY_COL0__ECSPI1_SCLK IOMUX_PAD(0x34C, 0x24, 5, 0x79C, 0, 0)
#define _MX53_PAD_KEY_COL0__FEC_RDATA_3 IOMUX_PAD(0x34C, 0x24, 6, 0x0, 0, 0)
#define _MX53_PAD_KEY_COL0__SRC_ANY_PU_RST IOMUX_PAD(0x34C, 0x24, 7, 0x0, 0, 0)
@@ -55,7 +55,7 @@
#define _MX53_PAD_KEY_COL1__KPP_COL_1 IOMUX_PAD(0x354, 0x2C, 0, 0x0, 0, 0)
#define _MX53_PAD_KEY_COL1__GPIO4_8 IOMUX_PAD(0x354, 0x2C, 1, 0x0, 0, 0)
#define _MX53_PAD_KEY_COL1__AUDMUX_AUD5_TXFS IOMUX_PAD(0x354, 0x2C, 2, 0x75C, 0, 0)
-#define _MX53_PAD_KEY_COL1__UART5_TXD_MUX IOMUX_PAD(0x354, 0x2C, 4, 0x898, 0, 0)
+#define _MX53_PAD_KEY_COL1__UART5_TXD_MUX IOMUX_PAD(0x354, 0x2C, 4, 0x0, 0, 0)
#define _MX53_PAD_KEY_COL1__ECSPI1_MISO IOMUX_PAD(0x354, 0x2C, 5, 0x7A0, 0, 0)
#define _MX53_PAD_KEY_COL1__FEC_RX_CLK IOMUX_PAD(0x354, 0x2C, 6, 0x808, 0, 0)
#define _MX53_PAD_KEY_COL1__USBPHY1_TXREADY IOMUX_PAD(0x354, 0x2C, 7, 0x0, 0, 0)
@@ -107,7 +107,7 @@
#define _MX53_PAD_KEY_ROW4__GPIO4_15 IOMUX_PAD(0x370, 0x48, 1, 0x0, 0, 0)
#define _MX53_PAD_KEY_ROW4__CAN2_RXCAN IOMUX_PAD(0x370, 0x48, 2, 0x764, 0, 0)
#define _MX53_PAD_KEY_ROW4__IPU_SISG_5 IOMUX_PAD(0x370, 0x48, 3, 0x0, 0, 0)
-#define _MX53_PAD_KEY_ROW4__UART5_CTS IOMUX_PAD(0x370, 0x48, 4, 0x894, 1, 0)
+#define _MX53_PAD_KEY_ROW4__UART5_CTS IOMUX_PAD(0x370, 0x48, 4, 0x0, 0, 0)
#define _MX53_PAD_KEY_ROW4__USBOH3_USBOTG_PWR IOMUX_PAD(0x370, 0x48, 5, 0x0, 0, 0)
#define _MX53_PAD_KEY_ROW4__USBPHY1_VBUSVALID IOMUX_PAD(0x370, 0x48, 7, 0x0, 0, 0)
#define _MX53_PAD_DI0_DISP_CLK__IPU_DI0_DISP_CLK IOMUX_PAD(0x378, 0x4C, 0, 0x0, 0, 0)
@@ -377,7 +377,7 @@
#define _MX53_PAD_CSI0_DAT9__TPIU_TRACE_6 IOMUX_PAD(0x410, 0xE4, 7, 0x0, 0, 0)
#define _MX53_PAD_CSI0_DAT10__IPU_CSI0_D_10 IOMUX_PAD(0x414, 0xE8, 0, 0x0, 0, 0)
#define _MX53_PAD_CSI0_DAT10__GPIO5_28 IOMUX_PAD(0x414, 0xE8, 1, 0x0, 0, 0)
-#define _MX53_PAD_CSI0_DAT10__UART1_TXD_MUX IOMUX_PAD(0x414, 0xE8, 2, 0x878, 0, 0)
+#define _MX53_PAD_CSI0_DAT10__UART1_TXD_MUX IOMUX_PAD(0x414, 0xE8, 2, 0x0, 0, 0)
#define _MX53_PAD_CSI0_DAT10__ECSPI2_MISO IOMUX_PAD(0x414, 0xE8, 3, 0x7BC, 1, 0)
#define _MX53_PAD_CSI0_DAT10__AUDMUX_AUD3_RXC IOMUX_PAD(0x414, 0xE8, 4, 0x0, 0, 0)
#define _MX53_PAD_CSI0_DAT10__SDMA_DEBUG_PC_4 IOMUX_PAD(0x414, 0xE8, 5, 0x0, 0, 0)
@@ -393,7 +393,7 @@
#define _MX53_PAD_CSI0_DAT11__TPIU_TRACE_8 IOMUX_PAD(0x418, 0xEC, 7, 0x0, 0, 0)
#define _MX53_PAD_CSI0_DAT12__IPU_CSI0_D_12 IOMUX_PAD(0x41C, 0xF0, 0, 0x0, 0, 0)
#define _MX53_PAD_CSI0_DAT12__GPIO5_30 IOMUX_PAD(0x41C, 0xF0, 1, 0x0, 0, 0)
-#define _MX53_PAD_CSI0_DAT12__UART4_TXD_MUX IOMUX_PAD(0x41C, 0xF0, 2, 0x890, 2, 0)
+#define _MX53_PAD_CSI0_DAT12__UART4_TXD_MUX IOMUX_PAD(0x41C, 0xF0, 2, 0x0, 0, 0)
#define _MX53_PAD_CSI0_DAT12__USBOH3_USBH3_DATA_0 IOMUX_PAD(0x41C, 0xF0, 4, 0x0, 0, 0)
#define _MX53_PAD_CSI0_DAT12__SDMA_DEBUG_PC_6 IOMUX_PAD(0x41C, 0xF0, 5, 0x0, 0, 0)
#define _MX53_PAD_CSI0_DAT12__EMI_EMI_DEBUG_41 IOMUX_PAD(0x41C, 0xF0, 6, 0x0, 0, 0)
@@ -407,7 +407,7 @@
#define _MX53_PAD_CSI0_DAT13__TPIU_TRACE_10 IOMUX_PAD(0x420, 0xF4, 7, 0x0, 0, 0)
#define _MX53_PAD_CSI0_DAT14__IPU_CSI0_D_14 IOMUX_PAD(0x424, 0xF8, 0, 0x0, 0, 0)
#define _MX53_PAD_CSI0_DAT14__GPIO6_0 IOMUX_PAD(0x424, 0xF8, 1, 0x0, 0, 0)
-#define _MX53_PAD_CSI0_DAT14__UART5_TXD_MUX IOMUX_PAD(0x424, 0xF8, 2, 0x898, 2, 0)
+#define _MX53_PAD_CSI0_DAT14__UART5_TXD_MUX IOMUX_PAD(0x424, 0xF8, 2, 0x0, 0, 0)
#define _MX53_PAD_CSI0_DAT14__USBOH3_USBH3_DATA_2 IOMUX_PAD(0x424, 0xF8, 4, 0x0, 0, 0)
#define _MX53_PAD_CSI0_DAT14__SDMA_DEBUG_PC_8 IOMUX_PAD(0x424, 0xF8, 5, 0x0, 0, 0)
#define _MX53_PAD_CSI0_DAT14__EMI_EMI_DEBUG_43 IOMUX_PAD(0x424, 0xF8, 6, 0x0, 0, 0)
@@ -428,7 +428,7 @@
#define _MX53_PAD_CSI0_DAT16__TPIU_TRACE_13 IOMUX_PAD(0x42C, 0x100, 7, 0x0, 0, 0)
#define _MX53_PAD_CSI0_DAT17__IPU_CSI0_D_17 IOMUX_PAD(0x430, 0x104, 0, 0x0, 0, 0)
#define _MX53_PAD_CSI0_DAT17__GPIO6_3 IOMUX_PAD(0x430, 0x104, 1, 0x0, 0, 0)
-#define _MX53_PAD_CSI0_DAT17__UART4_CTS IOMUX_PAD(0x430, 0x104, 2, 0x88C, 1, 0)
+#define _MX53_PAD_CSI0_DAT17__UART4_CTS IOMUX_PAD(0x430, 0x104, 2, 0x0, 0, 0)
#define _MX53_PAD_CSI0_DAT17__USBOH3_USBH3_DATA_5 IOMUX_PAD(0x430, 0x104, 4, 0x0, 0, 0)
#define _MX53_PAD_CSI0_DAT17__SDMA_DEBUG_PC_11 IOMUX_PAD(0x430, 0x104, 5, 0x0, 0, 0)
#define _MX53_PAD_CSI0_DAT17__EMI_EMI_DEBUG_46 IOMUX_PAD(0x430, 0x104, 6, 0x0, 0, 0)
@@ -442,7 +442,7 @@
#define _MX53_PAD_CSI0_DAT18__TPIU_TRACE_15 IOMUX_PAD(0x434, 0x108, 7, 0x0, 0, 0)
#define _MX53_PAD_CSI0_DAT19__IPU_CSI0_D_19 IOMUX_PAD(0x438, 0x10C, 0, 0x0, 0, 0)
#define _MX53_PAD_CSI0_DAT19__GPIO6_5 IOMUX_PAD(0x438, 0x10C, 1, 0x0, 0, 0)
-#define _MX53_PAD_CSI0_DAT19__UART5_CTS IOMUX_PAD(0x438, 0x10C, 2, 0x894, 3, 0)
+#define _MX53_PAD_CSI0_DAT19__UART5_CTS IOMUX_PAD(0x438, 0x10C, 2, 0x0, 0, 0)
#define _MX53_PAD_CSI0_DAT19__USBOH3_USBH3_DATA_7 IOMUX_PAD(0x438, 0x10C, 4, 0x0, 0, 0)
#define _MX53_PAD_CSI0_DAT19__SDMA_DEBUG_PC_13 IOMUX_PAD(0x438, 0x10C, 5, 0x0, 0, 0)
#define _MX53_PAD_CSI0_DAT19__EMI_EMI_DEBUG_48 IOMUX_PAD(0x438, 0x10C, 6, 0x0, 0, 0)
@@ -465,19 +465,19 @@
#define _MX53_PAD_EIM_D16__IPU_DI0_PIN5 IOMUX_PAD(0x460, 0x118, 2, 0x0, 0, 0)
#define _MX53_PAD_EIM_D16__IPU_DISPB1_SER_CLK IOMUX_PAD(0x460, 0x118, 3, 0x0, 0, 0)
#define _MX53_PAD_EIM_D16__ECSPI1_SCLK IOMUX_PAD(0x460, 0x118, 4, 0x79C, 3, 0)
-#define _MX53_PAD_EIM_D16__I2C2_SDA IOMUX_PAD(0x460, 0x118, 5, 0x820, 1, 0)
+#define _MX53_PAD_EIM_D16__I2C2_SDA IOMUX_PAD(0x460, 0x118, 5 | IOMUX_CONFIG_SION, 0x820, 1, 0)
#define _MX53_PAD_EIM_D17__EMI_WEIM_D_17 IOMUX_PAD(0x464, 0x11C, 0, 0x0, 0, 0)
#define _MX53_PAD_EIM_D17__GPIO3_17 IOMUX_PAD(0x464, 0x11C, 1, 0x0, 0, 0)
#define _MX53_PAD_EIM_D17__IPU_DI0_PIN6 IOMUX_PAD(0x464, 0x11C, 2, 0x0, 0, 0)
#define _MX53_PAD_EIM_D17__IPU_DISPB1_SER_DIN IOMUX_PAD(0x464, 0x11C, 3, 0x830, 0, 0)
#define _MX53_PAD_EIM_D17__ECSPI1_MISO IOMUX_PAD(0x464, 0x11C, 4, 0x7A0, 3, 0)
-#define _MX53_PAD_EIM_D17__I2C3_SCL IOMUX_PAD(0x464, 0x11C, 5, 0x824, 0, 0)
+#define _MX53_PAD_EIM_D17__I2C3_SCL IOMUX_PAD(0x464, 0x11C, 5 | IOMUX_CONFIG_SION, 0x824, 0, 0)
#define _MX53_PAD_EIM_D18__EMI_WEIM_D_18 IOMUX_PAD(0x468, 0x120, 0, 0x0, 0, 0)
#define _MX53_PAD_EIM_D18__GPIO3_18 IOMUX_PAD(0x468, 0x120, 1, 0x0, 0, 0)
#define _MX53_PAD_EIM_D18__IPU_DI0_PIN7 IOMUX_PAD(0x468, 0x120, 2, 0x0, 0, 0)
#define _MX53_PAD_EIM_D18__IPU_DISPB1_SER_DIO IOMUX_PAD(0x468, 0x120, 3, 0x830, 1, 0)
#define _MX53_PAD_EIM_D18__ECSPI1_MOSI IOMUX_PAD(0x468, 0x120, 4, 0x7A4, 3, 0)
-#define _MX53_PAD_EIM_D18__I2C3_SDA IOMUX_PAD(0x468, 0x120, 5, 0x828, 0, 0)
+#define _MX53_PAD_EIM_D18__I2C3_SDA IOMUX_PAD(0x468, 0x120, 5 | IOMUX_CONFIG_SION, 0x828, 0, 0)
#define _MX53_PAD_EIM_D18__IPU_DI1_D0_CS IOMUX_PAD(0x468, 0x120, 6, 0x0, 0, 0)
#define _MX53_PAD_EIM_D19__EMI_WEIM_D_19 IOMUX_PAD(0x46C, 0x124, 0, 0x0, 0, 0)
#define _MX53_PAD_EIM_D19__GPIO3_19 IOMUX_PAD(0x46C, 0x124, 1, 0x0, 0, 0)
@@ -485,7 +485,7 @@
#define _MX53_PAD_EIM_D19__IPU_DISPB1_SER_RS IOMUX_PAD(0x46C, 0x124, 3, 0x0, 0, 0)
#define _MX53_PAD_EIM_D19__ECSPI1_SS1 IOMUX_PAD(0x46C, 0x124, 4, 0x7AC, 2, 0)
#define _MX53_PAD_EIM_D19__EPIT1_EPITO IOMUX_PAD(0x46C, 0x124, 5, 0x0, 0, 0)
-#define _MX53_PAD_EIM_D19__UART1_CTS IOMUX_PAD(0x46C, 0x124, 6, 0x874, 0, 0)
+#define _MX53_PAD_EIM_D19__UART1_CTS IOMUX_PAD(0x46C, 0x124, 6, 0x0, 0, 0)
#define _MX53_PAD_EIM_D19__USBOH3_USBH2_OC IOMUX_PAD(0x46C, 0x124, 7, 0x8A4, 0, 0)
#define _MX53_PAD_EIM_D20__EMI_WEIM_D_20 IOMUX_PAD(0x470, 0x128, 0, 0x0, 0, 0)
#define _MX53_PAD_EIM_D20__GPIO3_20 IOMUX_PAD(0x470, 0x128, 1, 0x0, 0, 0)
@@ -500,7 +500,7 @@
#define _MX53_PAD_EIM_D21__IPU_DI0_PIN17 IOMUX_PAD(0x474, 0x12C, 2, 0x0, 0, 0)
#define _MX53_PAD_EIM_D21__IPU_DISPB0_SER_CLK IOMUX_PAD(0x474, 0x12C, 3, 0x0, 0, 0)
#define _MX53_PAD_EIM_D21__CSPI_SCLK IOMUX_PAD(0x474, 0x12C, 4, 0x780, 1, 0)
-#define _MX53_PAD_EIM_D21__I2C1_SCL IOMUX_PAD(0x474, 0x12C, 5, 0x814, 1, 0)
+#define _MX53_PAD_EIM_D21__I2C1_SCL IOMUX_PAD(0x474, 0x12C, 5 | IOMUX_CONFIG_SION, 0x814, 1, 0)
#define _MX53_PAD_EIM_D21__USBOH3_USBOTG_OC IOMUX_PAD(0x474, 0x12C, 6, 0x89C, 1, 0)
#define _MX53_PAD_EIM_D22__EMI_WEIM_D_22 IOMUX_PAD(0x478, 0x130, 0, 0x0, 0, 0)
#define _MX53_PAD_EIM_D22__GPIO3_22 IOMUX_PAD(0x478, 0x130, 1, 0x0, 0, 0)
@@ -510,7 +510,7 @@
#define _MX53_PAD_EIM_D22__USBOH3_USBOTG_PWR IOMUX_PAD(0x478, 0x130, 6, 0x0, 0, 0)
#define _MX53_PAD_EIM_D23__EMI_WEIM_D_23 IOMUX_PAD(0x47C, 0x134, 0, 0x0, 0, 0)
#define _MX53_PAD_EIM_D23__GPIO3_23 IOMUX_PAD(0x47C, 0x134, 1, 0x0, 0, 0)
-#define _MX53_PAD_EIM_D23__UART3_CTS IOMUX_PAD(0x47C, 0x134, 2, 0x884, 0, 0)
+#define _MX53_PAD_EIM_D23__UART3_CTS IOMUX_PAD(0x47C, 0x134, 2, 0x0, 0, 0)
#define _MX53_PAD_EIM_D23__UART1_DCD IOMUX_PAD(0x47C, 0x134, 3, 0x0, 0, 0)
#define _MX53_PAD_EIM_D23__IPU_DI0_D0_CS IOMUX_PAD(0x47C, 0x134, 4, 0x0, 0, 0)
#define _MX53_PAD_EIM_D23__IPU_DI1_PIN2 IOMUX_PAD(0x47C, 0x134, 5, 0x0, 0, 0)
@@ -525,7 +525,7 @@
#define _MX53_PAD_EIM_EB3__IPU_DI1_PIN16 IOMUX_PAD(0x480, 0x138, 7, 0x0, 0, 0)
#define _MX53_PAD_EIM_D24__EMI_WEIM_D_24 IOMUX_PAD(0x484, 0x13C, 0, 0x0, 0, 0)
#define _MX53_PAD_EIM_D24__GPIO3_24 IOMUX_PAD(0x484, 0x13C, 1, 0x0, 0, 0)
-#define _MX53_PAD_EIM_D24__UART3_TXD_MUX IOMUX_PAD(0x484, 0x13C, 2, 0x888, 0, 0)
+#define _MX53_PAD_EIM_D24__UART3_TXD_MUX IOMUX_PAD(0x484, 0x13C, 2, 0x0, 0, 0)
#define _MX53_PAD_EIM_D24__ECSPI1_SS2 IOMUX_PAD(0x484, 0x13C, 3, 0x7B0, 1, 0)
#define _MX53_PAD_EIM_D24__CSPI_SS2 IOMUX_PAD(0x484, 0x13C, 4, 0x794, 1, 0)
#define _MX53_PAD_EIM_D24__AUDMUX_AUD5_RXFS IOMUX_PAD(0x484, 0x13C, 5, 0x754, 1, 0)
@@ -541,7 +541,7 @@
#define _MX53_PAD_EIM_D25__UART1_DSR IOMUX_PAD(0x488, 0x140, 7, 0x0, 0, 0)
#define _MX53_PAD_EIM_D26__EMI_WEIM_D_26 IOMUX_PAD(0x48C, 0x144, 0, 0x0, 0, 0)
#define _MX53_PAD_EIM_D26__GPIO3_26 IOMUX_PAD(0x48C, 0x144, 1, 0x0, 0, 0)
-#define _MX53_PAD_EIM_D26__UART2_TXD_MUX IOMUX_PAD(0x48C, 0x144, 2, 0x880, 0, 0)
+#define _MX53_PAD_EIM_D26__UART2_TXD_MUX IOMUX_PAD(0x48C, 0x144, 2, 0x0, 0, 0)
#define _MX53_PAD_EIM_D26__FIRI_RXD IOMUX_PAD(0x48C, 0x144, 3, 0x80C, 0, 0)
#define _MX53_PAD_EIM_D26__IPU_CSI0_D_1 IOMUX_PAD(0x48C, 0x144, 4, 0x0, 0, 0)
#define _MX53_PAD_EIM_D26__IPU_DI1_PIN11 IOMUX_PAD(0x48C, 0x144, 5, 0x0, 0, 0)
@@ -557,10 +557,10 @@
#define _MX53_PAD_EIM_D27__IPU_DISP1_DAT_23 IOMUX_PAD(0x490, 0x148, 7, 0x0, 0, 0)
#define _MX53_PAD_EIM_D28__EMI_WEIM_D_28 IOMUX_PAD(0x494, 0x14C, 0, 0x0, 0, 0)
#define _MX53_PAD_EIM_D28__GPIO3_28 IOMUX_PAD(0x494, 0x14C, 1, 0x0, 0, 0)
-#define _MX53_PAD_EIM_D28__UART2_CTS IOMUX_PAD(0x494, 0x14C, 2, 0x87C, 0, 0)
+#define _MX53_PAD_EIM_D28__UART2_CTS IOMUX_PAD(0x494, 0x14C, 2, 0x0, 0, 0)
#define _MX53_PAD_EIM_D28__IPU_DISPB0_SER_DIO IOMUX_PAD(0x494, 0x14C, 3, 0x82C, 1, 0)
#define _MX53_PAD_EIM_D28__CSPI_MOSI IOMUX_PAD(0x494, 0x14C, 4, 0x788, 1, 0)
-#define _MX53_PAD_EIM_D28__I2C1_SDA IOMUX_PAD(0x494, 0x14C, 5, 0x818, 1, 0)
+#define _MX53_PAD_EIM_D28__I2C1_SDA IOMUX_PAD(0x494, 0x14C, 5 | IOMUX_CONFIG_SION, 0x818, 1, 0)
#define _MX53_PAD_EIM_D28__IPU_EXT_TRIG IOMUX_PAD(0x494, 0x14C, 6, 0x0, 0, 0)
#define _MX53_PAD_EIM_D28__IPU_DI0_PIN13 IOMUX_PAD(0x494, 0x14C, 7, 0x0, 0, 0)
#define _MX53_PAD_EIM_D29__EMI_WEIM_D_29 IOMUX_PAD(0x498, 0x150, 0, 0x0, 0, 0)
@@ -573,7 +573,7 @@
#define _MX53_PAD_EIM_D29__IPU_DI0_PIN14 IOMUX_PAD(0x498, 0x150, 7, 0x0, 0, 0)
#define _MX53_PAD_EIM_D30__EMI_WEIM_D_30 IOMUX_PAD(0x49C, 0x154, 0, 0x0, 0, 0)
#define _MX53_PAD_EIM_D30__GPIO3_30 IOMUX_PAD(0x49C, 0x154, 1, 0x0, 0, 0)
-#define _MX53_PAD_EIM_D30__UART3_CTS IOMUX_PAD(0x49C, 0x154, 2, 0x884, 2, 0)
+#define _MX53_PAD_EIM_D30__UART3_CTS IOMUX_PAD(0x49C, 0x154, 2, 0x0, 0, 0)
#define _MX53_PAD_EIM_D30__IPU_CSI0_D_3 IOMUX_PAD(0x49C, 0x154, 3, 0x0, 0, 0)
#define _MX53_PAD_EIM_D30__IPU_DI0_PIN11 IOMUX_PAD(0x49C, 0x154, 4, 0x0, 0, 0)
#define _MX53_PAD_EIM_D30__IPU_DISP1_DAT_21 IOMUX_PAD(0x49C, 0x154, 5, 0x0, 0, 0)
@@ -697,7 +697,7 @@
#define _MX53_PAD_EIM_DA5__GPIO3_5 IOMUX_PAD(0x500, 0x1B0, 1, 0x0, 0, 0)
#define _MX53_PAD_EIM_DA5__IPU_DISP1_DAT_4 IOMUX_PAD(0x500, 0x1B0, 3, 0x0, 0, 0)
#define _MX53_PAD_EIM_DA5__IPU_CSI1_D_4 IOMUX_PAD(0x500, 0x1B0, 4, 0x0, 0, 0)
-#define _MX53_PAD_EIM_DA5__SRC_BT_CFG3_6 IOMUX_PAD(0x500, 0x1B0, 17, 0x0, 0, 0)
+#define _MX53_PAD_EIM_DA5__SRC_BT_CFG3_6 IOMUX_PAD(0x500, 0x1B0, 7 | IOMUX_CONFIG_SION, 0x0, 0, 0)
#define _MX53_PAD_EIM_DA6__EMI_NAND_WEIM_DA_6 IOMUX_PAD(0x504, 0x1B4, 0, 0x0, 0, 0)
#define _MX53_PAD_EIM_DA6__GPIO3_6 IOMUX_PAD(0x504, 0x1B4, 1, 0x0, 0, 0)
#define _MX53_PAD_EIM_DA6__IPU_DISP1_DAT_3 IOMUX_PAD(0x504, 0x1B4, 3, 0x0, 0, 0)
@@ -859,7 +859,7 @@
#define _MX53_PAD_FEC_MDC__USBPHY2_DATAOUT_1 IOMUX_PAD(0x5E8, 0x26C, 7, 0x0, 0, 0)
#define _MX53_PAD_PATA_DIOW__PATA_DIOW IOMUX_PAD(0x5F0, 0x270, 0, 0x0, 0, 0)
#define _MX53_PAD_PATA_DIOW__GPIO6_17 IOMUX_PAD(0x5F0, 0x270, 1, 0x0, 0, 0)
-#define _MX53_PAD_PATA_DIOW__UART1_TXD_MUX IOMUX_PAD(0x5F0, 0x270, 3, 0x878, 2, 0)
+#define _MX53_PAD_PATA_DIOW__UART1_TXD_MUX IOMUX_PAD(0x5F0, 0x270, 3, 0x0, 0, 0)
#define _MX53_PAD_PATA_DIOW__USBPHY2_DATAOUT_2 IOMUX_PAD(0x5F0, 0x270, 7, 0x0, 0, 0)
#define _MX53_PAD_PATA_DMACK__PATA_DMACK IOMUX_PAD(0x5F4, 0x274, 0, 0x0, 0, 0)
#define _MX53_PAD_PATA_DMACK__GPIO6_18 IOMUX_PAD(0x5F4, 0x274, 1, 0x0, 0, 0)
@@ -867,7 +867,7 @@
#define _MX53_PAD_PATA_DMACK__USBPHY2_DATAOUT_3 IOMUX_PAD(0x5F4, 0x274, 7, 0x0, 0, 0)
#define _MX53_PAD_PATA_DMARQ__PATA_DMARQ IOMUX_PAD(0x5F8, 0x278, 0, 0x0, 0, 0)
#define _MX53_PAD_PATA_DMARQ__GPIO7_0 IOMUX_PAD(0x5F8, 0x278, 1, 0x0, 0, 0)
-#define _MX53_PAD_PATA_DMARQ__UART2_TXD_MUX IOMUX_PAD(0x5F8, 0x278, 3, 0x880, 2, 0)
+#define _MX53_PAD_PATA_DMARQ__UART2_TXD_MUX IOMUX_PAD(0x5F8, 0x278, 3, 0x0, 0, 0)
#define _MX53_PAD_PATA_DMARQ__CCM_CCM_OUT_0 IOMUX_PAD(0x5F8, 0x278, 5, 0x0, 0, 0)
#define _MX53_PAD_PATA_DMARQ__USBPHY2_DATAOUT_4 IOMUX_PAD(0x5F8, 0x278, 7, 0x0, 0, 0)
#define _MX53_PAD_PATA_BUFFER_EN__PATA_BUFFER_EN IOMUX_PAD(0x5FC, 0x27C, 0, 0x0, 0, 0)
@@ -877,7 +877,7 @@
#define _MX53_PAD_PATA_BUFFER_EN__USBPHY2_DATAOUT_5 IOMUX_PAD(0x5FC, 0x27C, 7, 0x0, 0, 0)
#define _MX53_PAD_PATA_INTRQ__PATA_INTRQ IOMUX_PAD(0x600, 0x280, 0, 0x0, 0, 0)
#define _MX53_PAD_PATA_INTRQ__GPIO7_2 IOMUX_PAD(0x600, 0x280, 1, 0x0, 0, 0)
-#define _MX53_PAD_PATA_INTRQ__UART2_CTS IOMUX_PAD(0x600, 0x280, 3, 0x87C, 2, 0)
+#define _MX53_PAD_PATA_INTRQ__UART2_CTS IOMUX_PAD(0x600, 0x280, 3, 0x0, 0, 0)
#define _MX53_PAD_PATA_INTRQ__CAN1_TXCAN IOMUX_PAD(0x600, 0x280, 4, 0x0, 0, 0)
#define _MX53_PAD_PATA_INTRQ__CCM_CCM_OUT_2 IOMUX_PAD(0x600, 0x280, 5, 0x0, 0, 0)
#define _MX53_PAD_PATA_INTRQ__USBPHY2_DATAOUT_6 IOMUX_PAD(0x600, 0x280, 7, 0x0, 0, 0)
@@ -889,7 +889,7 @@
#define _MX53_PAD_PATA_RESET_B__PATA_PATA_RESET_B IOMUX_PAD(0x608, 0x288, 0, 0x0, 0, 0)
#define _MX53_PAD_PATA_RESET_B__GPIO7_4 IOMUX_PAD(0x608, 0x288, 1, 0x0, 0, 0)
#define _MX53_PAD_PATA_RESET_B__ESDHC3_CMD IOMUX_PAD(0x608, 0x288, 2, 0x0, 0, 0)
-#define _MX53_PAD_PATA_RESET_B__UART1_CTS IOMUX_PAD(0x608, 0x288, 3, 0x874, 2, 0)
+#define _MX53_PAD_PATA_RESET_B__UART1_CTS IOMUX_PAD(0x608, 0x288, 3, 0x0, 0, 0)
#define _MX53_PAD_PATA_RESET_B__CAN2_TXCAN IOMUX_PAD(0x608, 0x288, 4, 0x0, 0, 0)
#define _MX53_PAD_PATA_RESET_B__USBPHY1_DATAOUT_0 IOMUX_PAD(0x608, 0x288, 7, 0x0, 0, 0)
#define _MX53_PAD_PATA_IORDY__PATA_IORDY IOMUX_PAD(0x60C, 0x28C, 0, 0x0, 0, 0)
@@ -906,7 +906,7 @@
#define _MX53_PAD_PATA_DA_1__PATA_DA_1 IOMUX_PAD(0x614, 0x294, 0, 0x0, 0, 0)
#define _MX53_PAD_PATA_DA_1__GPIO7_7 IOMUX_PAD(0x614, 0x294, 1, 0x0, 0, 0)
#define _MX53_PAD_PATA_DA_1__ESDHC4_CMD IOMUX_PAD(0x614, 0x294, 2, 0x0, 0, 0)
-#define _MX53_PAD_PATA_DA_1__UART3_CTS IOMUX_PAD(0x614, 0x294, 4, 0x884, 4, 0)
+#define _MX53_PAD_PATA_DA_1__UART3_CTS IOMUX_PAD(0x614, 0x294, 4, 0x0, 0, 0)
#define _MX53_PAD_PATA_DA_1__USBPHY1_DATAOUT_3 IOMUX_PAD(0x614, 0x294, 7, 0x0, 0, 0)
#define _MX53_PAD_PATA_DA_2__PATA_DA_2 IOMUX_PAD(0x618, 0x298, 0, 0x0, 0, 0)
#define _MX53_PAD_PATA_DA_2__GPIO7_8 IOMUX_PAD(0x618, 0x298, 1, 0x0, 0, 0)
@@ -915,7 +915,7 @@
#define _MX53_PAD_PATA_DA_2__USBPHY1_DATAOUT_4 IOMUX_PAD(0x618, 0x298, 7, 0x0, 0, 0)
#define _MX53_PAD_PATA_CS_0__PATA_CS_0 IOMUX_PAD(0x61C, 0x29C, 0, 0x0, 0, 0)
#define _MX53_PAD_PATA_CS_0__GPIO7_9 IOMUX_PAD(0x61C, 0x29C, 1, 0x0, 0, 0)
-#define _MX53_PAD_PATA_CS_0__UART3_TXD_MUX IOMUX_PAD(0x61C, 0x29C, 4, 0x888, 2, 0)
+#define _MX53_PAD_PATA_CS_0__UART3_TXD_MUX IOMUX_PAD(0x61C, 0x29C, 4, 0x0, 0, 0)
#define _MX53_PAD_PATA_CS_0__USBPHY1_DATAOUT_5 IOMUX_PAD(0x61C, 0x29C, 7, 0x0, 0, 0)
#define _MX53_PAD_PATA_CS_1__PATA_CS_1 IOMUX_PAD(0x620, 0x2A0, 0, 0x0, 0, 0)
#define _MX53_PAD_PATA_CS_1__GPIO7_10 IOMUX_PAD(0x620, 0x2A0, 1, 0x0, 0, 0)
@@ -958,12 +958,12 @@
#define _MX53_PAD_PATA_DATA5__ESDHC4_DAT5 IOMUX_PAD(0x63C, 0x2B8, 4, 0x0, 0, 0)
#define _MX53_PAD_PATA_DATA5__GPU3d_GPU_DEBUG_OUT_5 IOMUX_PAD(0x63C, 0x2B8, 5, 0x0, 0, 0)
#define _MX53_PAD_PATA_DATA5__IPU_DIAG_BUS_5 IOMUX_PAD(0x63C, 0x2B8, 6, 0x0, 0, 0)
-#define _MX53_PAD_PATA_DATA6__PATA_DATA_6 IOMUX_PAD(0x640, 0x2BC, 1, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA6__PATA_DATA_6 IOMUX_PAD(0x640, 0x2BC, 0, 0x0, 0, 0)
#define _MX53_PAD_PATA_DATA6__GPIO2_6 IOMUX_PAD(0x640, 0x2BC, 1, 0x0, 0, 0)
-#define _MX53_PAD_PATA_DATA6__EMI_NANDF_D_6 IOMUX_PAD(0x640, 0x2BC, 1, 0x0, 0, 0)
-#define _MX53_PAD_PATA_DATA6__ESDHC4_DAT6 IOMUX_PAD(0x640, 0x2BC, 1, 0x0, 0, 0)
-#define _MX53_PAD_PATA_DATA6__GPU3d_GPU_DEBUG_OUT_6 IOMUX_PAD(0x640, 0x2BC, 1, 0x0, 0, 0)
-#define _MX53_PAD_PATA_DATA6__IPU_DIAG_BUS_6 IOMUX_PAD(0x640, 0x2BC, 1, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA6__EMI_NANDF_D_6 IOMUX_PAD(0x640, 0x2BC, 3, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA6__ESDHC4_DAT6 IOMUX_PAD(0x640, 0x2BC, 4, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA6__GPU3d_GPU_DEBUG_OUT_6 IOMUX_PAD(0x640, 0x2BC, 5, 0x0, 0, 0)
+#define _MX53_PAD_PATA_DATA6__IPU_DIAG_BUS_6 IOMUX_PAD(0x640, 0x2BC, 6, 0x0, 0, 0)
#define _MX53_PAD_PATA_DATA7__PATA_DATA_7 IOMUX_PAD(0x644, 0x2C0, 0, 0x0, 0, 0)
#define _MX53_PAD_PATA_DATA7__GPIO2_7 IOMUX_PAD(0x644, 0x2C0, 1, 0x0, 0, 0)
#define _MX53_PAD_PATA_DATA7__EMI_NANDF_D_7 IOMUX_PAD(0x644, 0x2C0, 3, 0x0, 0, 0)
@@ -1161,13 +1161,13 @@
#define _MX53_PAD_GPIO_5__CCM_CLKO IOMUX_PAD(0x6C0, 0x330, 3, 0x0, 0, 0)
#define _MX53_PAD_GPIO_5__CSU_CSU_ALARM_AUT_2 IOMUX_PAD(0x6C0, 0x330, 4, 0x0, 0, 0)
#define _MX53_PAD_GPIO_5__OBSERVE_MUX_OBSRV_INT_OUT4 IOMUX_PAD(0x6C0, 0x330, 5, 0x0, 0, 0)
-#define _MX53_PAD_GPIO_5__I2C3_SCL IOMUX_PAD(0x6C0, 0x330, 6, 0x824, 2, 0)
+#define _MX53_PAD_GPIO_5__I2C3_SCL IOMUX_PAD(0x6C0, 0x330, 6 | IOMUX_CONFIG_SION, 0x824, 2, 0)
#define _MX53_PAD_GPIO_5__CCM_PLL1_BYP IOMUX_PAD(0x6C0, 0x330, 7, 0x770, 1, 0)
#define _MX53_PAD_GPIO_7__ESAI1_TX4_RX1 IOMUX_PAD(0x6C4, 0x334, 0, 0x7F4, 1, 0)
#define _MX53_PAD_GPIO_7__GPIO1_7 IOMUX_PAD(0x6C4, 0x334, 1, 0x0, 0, 0)
#define _MX53_PAD_GPIO_7__EPIT1_EPITO IOMUX_PAD(0x6C4, 0x334, 2, 0x0, 0, 0)
#define _MX53_PAD_GPIO_7__CAN1_TXCAN IOMUX_PAD(0x6C4, 0x334, 3, 0x0, 0, 0)
-#define _MX53_PAD_GPIO_7__UART2_TXD_MUX IOMUX_PAD(0x6C4, 0x334, 4, 0x880, 4, 0)
+#define _MX53_PAD_GPIO_7__UART2_TXD_MUX IOMUX_PAD(0x6C4, 0x334, 4, 0x0, 0, 0)
#define _MX53_PAD_GPIO_7__FIRI_RXD IOMUX_PAD(0x6C4, 0x334, 5, 0x80C, 1, 0)
#define _MX53_PAD_GPIO_7__SPDIF_PLOCK IOMUX_PAD(0x6C4, 0x334, 6, 0x0, 0, 0)
#define _MX53_PAD_GPIO_7__CCM_PLL2_BYP IOMUX_PAD(0x6C4, 0x334, 7, 0x774, 1, 0)
@@ -1214,27 +1214,27 @@
#define MX53_PAD_KEY_COL0__KPP_COL_0 (_MX53_PAD_KEY_COL0__KPP_COL_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_KEY_COL0__GPIO4_6 (_MX53_PAD_KEY_COL0__GPIO4_6 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_KEY_COL0__AUDMUX_AUD5_TXC (_MX53_PAD_KEY_COL0__AUDMUX_AUD5_TXC | MUX_PAD_CTRL(NO_PAD_CTRL))
-#define MX53_PAD_KEY_COL0__UART4_TXD_MUX (_MX53_PAD_KEY_COL0__UART4_TXD_MUX | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_COL0__UART4_TXD_MUX (_MX53_PAD_KEY_COL0__UART4_TXD_MUX | MUX_PAD_CTRL(MX53_UART_PAD_CTRL))
#define MX53_PAD_KEY_COL0__ECSPI1_SCLK (_MX53_PAD_KEY_COL0__ECSPI1_SCLK | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_KEY_COL0__FEC_RDATA_3 (_MX53_PAD_KEY_COL0__FEC_RDATA_3 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_KEY_COL0__SRC_ANY_PU_RST (_MX53_PAD_KEY_COL0__SRC_ANY_PU_RST | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_KEY_ROW0__KPP_ROW_0 (_MX53_PAD_KEY_ROW0__KPP_ROW_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_KEY_ROW0__GPIO4_7 (_MX53_PAD_KEY_ROW0__GPIO4_7 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_KEY_ROW0__AUDMUX_AUD5_TXD (_MX53_PAD_KEY_ROW0__AUDMUX_AUD5_TXD | MUX_PAD_CTRL(NO_PAD_CTRL))
-#define MX53_PAD_KEY_ROW0__UART4_RXD_MUX (_MX53_PAD_KEY_ROW0__UART4_RXD_MUX | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_ROW0__UART4_RXD_MUX (_MX53_PAD_KEY_ROW0__UART4_RXD_MUX | MUX_PAD_CTRL(MX53_UART_PAD_CTRL))
#define MX53_PAD_KEY_ROW0__ECSPI1_MOSI (_MX53_PAD_KEY_ROW0__ECSPI1_MOSI | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_KEY_ROW0__FEC_TX_ER (_MX53_PAD_KEY_ROW0__FEC_TX_ER | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_KEY_COL1__KPP_COL_1 (_MX53_PAD_KEY_COL1__KPP_COL_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_KEY_COL1__GPIO4_8 (_MX53_PAD_KEY_COL1__GPIO4_8 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_KEY_COL1__AUDMUX_AUD5_TXFS (_MX53_PAD_KEY_COL1__AUDMUX_AUD5_TXFS | MUX_PAD_CTRL(NO_PAD_CTRL))
-#define MX53_PAD_KEY_COL1__UART5_TXD_MUX (_MX53_PAD_KEY_COL1__UART5_TXD_MUX | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_COL1__UART5_TXD_MUX (_MX53_PAD_KEY_COL1__UART5_TXD_MUX | MUX_PAD_CTRL(MX53_UART_PAD_CTRL))
#define MX53_PAD_KEY_COL1__ECSPI1_MISO (_MX53_PAD_KEY_COL1__ECSPI1_MISO | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_KEY_COL1__FEC_RX_CLK (_MX53_PAD_KEY_COL1__FEC_RX_CLK | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_KEY_COL1__USBPHY1_TXREADY (_MX53_PAD_KEY_COL1__USBPHY1_TXREADY | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_KEY_ROW1__KPP_ROW_1 (_MX53_PAD_KEY_ROW1__KPP_ROW_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_KEY_ROW1__GPIO4_9 (_MX53_PAD_KEY_ROW1__GPIO4_9 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_KEY_ROW1__AUDMUX_AUD5_RXD (_MX53_PAD_KEY_ROW1__AUDMUX_AUD5_RXD | MUX_PAD_CTRL(NO_PAD_CTRL))
-#define MX53_PAD_KEY_ROW1__UART5_RXD_MUX (_MX53_PAD_KEY_ROW1__UART5_RXD_MUX | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_ROW1__UART5_RXD_MUX (_MX53_PAD_KEY_ROW1__UART5_RXD_MUX | MUX_PAD_CTRL(MX53_UART_PAD_CTRL))
#define MX53_PAD_KEY_ROW1__ECSPI1_SS0 (_MX53_PAD_KEY_ROW1__ECSPI1_SS0 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_KEY_ROW1__FEC_COL (_MX53_PAD_KEY_ROW1__FEC_COL | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_KEY_ROW1__USBPHY1_RXVALID (_MX53_PAD_KEY_ROW1__USBPHY1_RXVALID | MUX_PAD_CTRL(NO_PAD_CTRL))
@@ -1272,14 +1272,14 @@
#define MX53_PAD_KEY_COL4__GPIO4_14 (_MX53_PAD_KEY_COL4__GPIO4_14 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_KEY_COL4__CAN2_TXCAN (_MX53_PAD_KEY_COL4__CAN2_TXCAN | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_KEY_COL4__IPU_SISG_4 (_MX53_PAD_KEY_COL4__IPU_SISG_4 | MUX_PAD_CTRL(NO_PAD_CTRL))
-#define MX53_PAD_KEY_COL4__UART5_RTS (_MX53_PAD_KEY_COL4__UART5_RTS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_COL4__UART5_RTS (_MX53_PAD_KEY_COL4__UART5_RTS | MUX_PAD_CTRL(MX53_UART_PAD_CTRL))
#define MX53_PAD_KEY_COL4__USBOH3_USBOTG_OC (_MX53_PAD_KEY_COL4__USBOH3_USBOTG_OC | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_KEY_COL4__USBPHY1_LINESTATE_1 (_MX53_PAD_KEY_COL4__USBPHY1_LINESTATE_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_KEY_ROW4__KPP_ROW_4 (_MX53_PAD_KEY_ROW4__KPP_ROW_4 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_KEY_ROW4__GPIO4_15 (_MX53_PAD_KEY_ROW4__GPIO4_15 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_KEY_ROW4__CAN2_RXCAN (_MX53_PAD_KEY_ROW4__CAN2_RXCAN | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_KEY_ROW4__IPU_SISG_5 (_MX53_PAD_KEY_ROW4__IPU_SISG_5 | MUX_PAD_CTRL(NO_PAD_CTRL))
-#define MX53_PAD_KEY_ROW4__UART5_CTS (_MX53_PAD_KEY_ROW4__UART5_CTS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_KEY_ROW4__UART5_CTS (_MX53_PAD_KEY_ROW4__UART5_CTS | MUX_PAD_CTRL(MX53_UART_PAD_CTRL))
#define MX53_PAD_KEY_ROW4__USBOH3_USBOTG_PWR (_MX53_PAD_KEY_ROW4__USBOH3_USBOTG_PWR | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_KEY_ROW4__USBPHY1_VBUSVALID (_MX53_PAD_KEY_ROW4__USBPHY1_VBUSVALID | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_DI0_DISP_CLK__IPU_DI0_DISP_CLK (_MX53_PAD_DI0_DISP_CLK__IPU_DI0_DISP_CLK | MUX_PAD_CTRL(NO_PAD_CTRL))
@@ -1565,56 +1565,56 @@
#define MX53_PAD_CSI0_DAT11__TPIU_TRACE_8 (_MX53_PAD_CSI0_DAT11__TPIU_TRACE_8 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT12__IPU_CSI0_D_12 (_MX53_PAD_CSI0_DAT12__IPU_CSI0_D_12 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT12__GPIO5_30 (_MX53_PAD_CSI0_DAT12__GPIO5_30 | MUX_PAD_CTRL(NO_PAD_CTRL))
-#define MX53_PAD_CSI0_DAT12__UART4_TXD_MUX (_MX53_PAD_CSI0_DAT12__UART4_TXD_MUX | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT12__UART4_TXD_MUX (_MX53_PAD_CSI0_DAT12__UART4_TXD_MUX | MUX_PAD_CTRL(MX53_UART_PAD_CTRL))
#define MX53_PAD_CSI0_DAT12__USBOH3_USBH3_DATA_0 (_MX53_PAD_CSI0_DAT12__USBOH3_USBH3_DATA_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT12__SDMA_DEBUG_PC_6 (_MX53_PAD_CSI0_DAT12__SDMA_DEBUG_PC_6 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT12__EMI_EMI_DEBUG_41 (_MX53_PAD_CSI0_DAT12__EMI_EMI_DEBUG_41 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT12__TPIU_TRACE_9 (_MX53_PAD_CSI0_DAT12__TPIU_TRACE_9 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT13__IPU_CSI0_D_13 (_MX53_PAD_CSI0_DAT13__IPU_CSI0_D_13 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT13__GPIO5_31 (_MX53_PAD_CSI0_DAT13__GPIO5_31 | MUX_PAD_CTRL(NO_PAD_CTRL))
-#define MX53_PAD_CSI0_DAT13__UART4_RXD_MUX (_MX53_PAD_CSI0_DAT13__UART4_RXD_MUX | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT13__UART4_RXD_MUX (_MX53_PAD_CSI0_DAT13__UART4_RXD_MUX | MUX_PAD_CTRL(MX53_UART_PAD_CTRL))
#define MX53_PAD_CSI0_DAT13__USBOH3_USBH3_DATA_1 (_MX53_PAD_CSI0_DAT13__USBOH3_USBH3_DATA_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT13__SDMA_DEBUG_PC_7 (_MX53_PAD_CSI0_DAT13__SDMA_DEBUG_PC_7 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT13__EMI_EMI_DEBUG_42 (_MX53_PAD_CSI0_DAT13__EMI_EMI_DEBUG_42 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT13__TPIU_TRACE_10 (_MX53_PAD_CSI0_DAT13__TPIU_TRACE_10 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT14__IPU_CSI0_D_14 (_MX53_PAD_CSI0_DAT14__IPU_CSI0_D_14 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT14__GPIO6_0 (_MX53_PAD_CSI0_DAT14__GPIO6_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
-#define MX53_PAD_CSI0_DAT14__UART5_TXD_MUX (_MX53_PAD_CSI0_DAT14__UART5_TXD_MUX | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT14__UART5_TXD_MUX (_MX53_PAD_CSI0_DAT14__UART5_TXD_MUX | MUX_PAD_CTRL(MX53_UART_PAD_CTRL))
#define MX53_PAD_CSI0_DAT14__USBOH3_USBH3_DATA_2 (_MX53_PAD_CSI0_DAT14__USBOH3_USBH3_DATA_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT14__SDMA_DEBUG_PC_8 (_MX53_PAD_CSI0_DAT14__SDMA_DEBUG_PC_8 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT14__EMI_EMI_DEBUG_43 (_MX53_PAD_CSI0_DAT14__EMI_EMI_DEBUG_43 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT14__TPIU_TRACE_11 (_MX53_PAD_CSI0_DAT14__TPIU_TRACE_11 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT15__IPU_CSI0_D_15 (_MX53_PAD_CSI0_DAT15__IPU_CSI0_D_15 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT15__GPIO6_1 (_MX53_PAD_CSI0_DAT15__GPIO6_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
-#define MX53_PAD_CSI0_DAT15__UART5_RXD_MUX (_MX53_PAD_CSI0_DAT15__UART5_RXD_MUX | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT15__UART5_RXD_MUX (_MX53_PAD_CSI0_DAT15__UART5_RXD_MUX | MUX_PAD_CTRL(MX53_UART_PAD_CTRL))
#define MX53_PAD_CSI0_DAT15__USBOH3_USBH3_DATA_3 (_MX53_PAD_CSI0_DAT15__USBOH3_USBH3_DATA_3 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT15__SDMA_DEBUG_PC_9 (_MX53_PAD_CSI0_DAT15__SDMA_DEBUG_PC_9 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT15__EMI_EMI_DEBUG_44 (_MX53_PAD_CSI0_DAT15__EMI_EMI_DEBUG_44 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT15__TPIU_TRACE_12 (_MX53_PAD_CSI0_DAT15__TPIU_TRACE_12 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT16__IPU_CSI0_D_16 (_MX53_PAD_CSI0_DAT16__IPU_CSI0_D_16 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT16__GPIO6_2 (_MX53_PAD_CSI0_DAT16__GPIO6_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
-#define MX53_PAD_CSI0_DAT16__UART4_RTS (_MX53_PAD_CSI0_DAT16__UART4_RTS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT16__UART4_RTS (_MX53_PAD_CSI0_DAT16__UART4_RTS | MUX_PAD_CTRL(MX53_UART_PAD_CTRL))
#define MX53_PAD_CSI0_DAT16__USBOH3_USBH3_DATA_4 (_MX53_PAD_CSI0_DAT16__USBOH3_USBH3_DATA_4 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT16__SDMA_DEBUG_PC_10 (_MX53_PAD_CSI0_DAT16__SDMA_DEBUG_PC_10 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT16__EMI_EMI_DEBUG_45 (_MX53_PAD_CSI0_DAT16__EMI_EMI_DEBUG_45 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT16__TPIU_TRACE_13 (_MX53_PAD_CSI0_DAT16__TPIU_TRACE_13 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT17__IPU_CSI0_D_17 (_MX53_PAD_CSI0_DAT17__IPU_CSI0_D_17 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT17__GPIO6_3 (_MX53_PAD_CSI0_DAT17__GPIO6_3 | MUX_PAD_CTRL(NO_PAD_CTRL))
-#define MX53_PAD_CSI0_DAT17__UART4_CTS (_MX53_PAD_CSI0_DAT17__UART4_CTS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT17__UART4_CTS (_MX53_PAD_CSI0_DAT17__UART4_CTS | MUX_PAD_CTRL(MX53_UART_PAD_CTRL))
#define MX53_PAD_CSI0_DAT17__USBOH3_USBH3_DATA_5 (_MX53_PAD_CSI0_DAT17__USBOH3_USBH3_DATA_5 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT17__SDMA_DEBUG_PC_11 (_MX53_PAD_CSI0_DAT17__SDMA_DEBUG_PC_11 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT17__EMI_EMI_DEBUG_46 (_MX53_PAD_CSI0_DAT17__EMI_EMI_DEBUG_46 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT17__TPIU_TRACE_14 (_MX53_PAD_CSI0_DAT17__TPIU_TRACE_14 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT18__IPU_CSI0_D_18 (_MX53_PAD_CSI0_DAT18__IPU_CSI0_D_18 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT18__GPIO6_4 (_MX53_PAD_CSI0_DAT18__GPIO6_4 | MUX_PAD_CTRL(NO_PAD_CTRL))
-#define MX53_PAD_CSI0_DAT18__UART5_RTS (_MX53_PAD_CSI0_DAT18__UART5_RTS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT18__UART5_RTS (_MX53_PAD_CSI0_DAT18__UART5_RTS | MUX_PAD_CTRL(MX53_UART_PAD_CTRL))
#define MX53_PAD_CSI0_DAT18__USBOH3_USBH3_DATA_6 (_MX53_PAD_CSI0_DAT18__USBOH3_USBH3_DATA_6 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT18__SDMA_DEBUG_PC_12 (_MX53_PAD_CSI0_DAT18__SDMA_DEBUG_PC_12 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT18__EMI_EMI_DEBUG_47 (_MX53_PAD_CSI0_DAT18__EMI_EMI_DEBUG_47 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT18__TPIU_TRACE_15 (_MX53_PAD_CSI0_DAT18__TPIU_TRACE_15 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT19__IPU_CSI0_D_19 (_MX53_PAD_CSI0_DAT19__IPU_CSI0_D_19 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT19__GPIO6_5 (_MX53_PAD_CSI0_DAT19__GPIO6_5 | MUX_PAD_CTRL(NO_PAD_CTRL))
-#define MX53_PAD_CSI0_DAT19__UART5_CTS (_MX53_PAD_CSI0_DAT19__UART5_CTS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_CSI0_DAT19__UART5_CTS (_MX53_PAD_CSI0_DAT19__UART5_CTS | MUX_PAD_CTRL(MX53_UART_PAD_CTRL))
#define MX53_PAD_CSI0_DAT19__USBOH3_USBH3_DATA_7 (_MX53_PAD_CSI0_DAT19__USBOH3_USBH3_DATA_7 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT19__SDMA_DEBUG_PC_13 (_MX53_PAD_CSI0_DAT19__SDMA_DEBUG_PC_13 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_CSI0_DAT19__EMI_EMI_DEBUG_48 (_MX53_PAD_CSI0_DAT19__EMI_EMI_DEBUG_48 | MUX_PAD_CTRL(NO_PAD_CTRL))
@@ -1657,7 +1657,7 @@
#define MX53_PAD_EIM_D19__IPU_DISPB1_SER_RS (_MX53_PAD_EIM_D19__IPU_DISPB1_SER_RS | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D19__ECSPI1_SS1 (_MX53_PAD_EIM_D19__ECSPI1_SS1 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D19__EPIT1_EPITO (_MX53_PAD_EIM_D19__EPIT1_EPITO | MUX_PAD_CTRL(NO_PAD_CTRL))
-#define MX53_PAD_EIM_D19__UART1_CTS (_MX53_PAD_EIM_D19__UART1_CTS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D19__UART1_CTS (_MX53_PAD_EIM_D19__UART1_CTS | MUX_PAD_CTRL(MX53_UART_PAD_CTRL))
#define MX53_PAD_EIM_D19__USBOH3_USBH2_OC (_MX53_PAD_EIM_D19__USBOH3_USBH2_OC | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D20__EMI_WEIM_D_20 (_MX53_PAD_EIM_D20__EMI_WEIM_D_20 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D20__GPIO3_20 (_MX53_PAD_EIM_D20__GPIO3_20 | MUX_PAD_CTRL(NO_PAD_CTRL))
@@ -1665,7 +1665,7 @@
#define MX53_PAD_EIM_D20__IPU_SER_DISP0_CS (_MX53_PAD_EIM_D20__IPU_SER_DISP0_CS | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D20__CSPI_SS0 (_MX53_PAD_EIM_D20__CSPI_SS0 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D20__EPIT2_EPITO (_MX53_PAD_EIM_D20__EPIT2_EPITO | MUX_PAD_CTRL(NO_PAD_CTRL))
-#define MX53_PAD_EIM_D20__UART1_RTS (_MX53_PAD_EIM_D20__UART1_RTS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D20__UART1_RTS (_MX53_PAD_EIM_D20__UART1_RTS | MUX_PAD_CTRL(MX53_UART_PAD_CTRL))
#define MX53_PAD_EIM_D20__USBOH3_USBH2_PWR (_MX53_PAD_EIM_D20__USBOH3_USBH2_PWR | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D21__EMI_WEIM_D_21 (_MX53_PAD_EIM_D21__EMI_WEIM_D_21 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D21__GPIO3_21 (_MX53_PAD_EIM_D21__GPIO3_21 | MUX_PAD_CTRL(NO_PAD_CTRL))
@@ -1682,7 +1682,7 @@
#define MX53_PAD_EIM_D22__USBOH3_USBOTG_PWR (_MX53_PAD_EIM_D22__USBOH3_USBOTG_PWR | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D23__EMI_WEIM_D_23 (_MX53_PAD_EIM_D23__EMI_WEIM_D_23 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D23__GPIO3_23 (_MX53_PAD_EIM_D23__GPIO3_23 | MUX_PAD_CTRL(NO_PAD_CTRL))
-#define MX53_PAD_EIM_D23__UART3_CTS (_MX53_PAD_EIM_D23__UART3_CTS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D23__UART3_CTS (_MX53_PAD_EIM_D23__UART3_CTS | MUX_PAD_CTRL(MX53_UART_PAD_CTRL))
#define MX53_PAD_EIM_D23__UART1_DCD (_MX53_PAD_EIM_D23__UART1_DCD | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D23__IPU_DI0_D0_CS (_MX53_PAD_EIM_D23__IPU_DI0_D0_CS | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D23__IPU_DI1_PIN2 (_MX53_PAD_EIM_D23__IPU_DI1_PIN2 | MUX_PAD_CTRL(NO_PAD_CTRL))
@@ -1690,14 +1690,14 @@
#define MX53_PAD_EIM_D23__IPU_DI1_PIN14 (_MX53_PAD_EIM_D23__IPU_DI1_PIN14 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_EB3__EMI_WEIM_EB_3 (_MX53_PAD_EIM_EB3__EMI_WEIM_EB_3 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_EB3__GPIO2_31 (_MX53_PAD_EIM_EB3__GPIO2_31 | MUX_PAD_CTRL(NO_PAD_CTRL))
-#define MX53_PAD_EIM_EB3__UART3_RTS (_MX53_PAD_EIM_EB3__UART3_RTS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_EB3__UART3_RTS (_MX53_PAD_EIM_EB3__UART3_RTS | MUX_PAD_CTRL(MX53_UART_PAD_CTRL))
#define MX53_PAD_EIM_EB3__UART1_RI (_MX53_PAD_EIM_EB3__UART1_RI | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_EB3__IPU_DI1_PIN3 (_MX53_PAD_EIM_EB3__IPU_DI1_PIN3 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_EB3__IPU_CSI1_HSYNC (_MX53_PAD_EIM_EB3__IPU_CSI1_HSYNC | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_EB3__IPU_DI1_PIN16 (_MX53_PAD_EIM_EB3__IPU_DI1_PIN16 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D24__EMI_WEIM_D_24 (_MX53_PAD_EIM_D24__EMI_WEIM_D_24 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D24__GPIO3_24 (_MX53_PAD_EIM_D24__GPIO3_24 | MUX_PAD_CTRL(NO_PAD_CTRL))
-#define MX53_PAD_EIM_D24__UART3_TXD_MUX (_MX53_PAD_EIM_D24__UART3_TXD_MUX | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D24__UART3_TXD_MUX (_MX53_PAD_EIM_D24__UART3_TXD_MUX | MUX_PAD_CTRL(MX53_UART_PAD_CTRL))
#define MX53_PAD_EIM_D24__ECSPI1_SS2 (_MX53_PAD_EIM_D24__ECSPI1_SS2 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D24__CSPI_SS2 (_MX53_PAD_EIM_D24__CSPI_SS2 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D24__AUDMUX_AUD5_RXFS (_MX53_PAD_EIM_D24__AUDMUX_AUD5_RXFS | MUX_PAD_CTRL(NO_PAD_CTRL))
@@ -1705,7 +1705,7 @@
#define MX53_PAD_EIM_D24__UART1_DTR (_MX53_PAD_EIM_D24__UART1_DTR | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D25__EMI_WEIM_D_25 (_MX53_PAD_EIM_D25__EMI_WEIM_D_25 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D25__GPIO3_25 (_MX53_PAD_EIM_D25__GPIO3_25 | MUX_PAD_CTRL(NO_PAD_CTRL))
-#define MX53_PAD_EIM_D25__UART3_RXD_MUX (_MX53_PAD_EIM_D25__UART3_RXD_MUX | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D25__UART3_RXD_MUX (_MX53_PAD_EIM_D25__UART3_RXD_MUX | MUX_PAD_CTRL(MX53_UART_PAD_CTRL))
#define MX53_PAD_EIM_D25__ECSPI1_SS3 (_MX53_PAD_EIM_D25__ECSPI1_SS3 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D25__CSPI_SS3 (_MX53_PAD_EIM_D25__CSPI_SS3 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D25__AUDMUX_AUD5_RXC (_MX53_PAD_EIM_D25__AUDMUX_AUD5_RXC | MUX_PAD_CTRL(NO_PAD_CTRL))
@@ -1713,7 +1713,7 @@
#define MX53_PAD_EIM_D25__UART1_DSR (_MX53_PAD_EIM_D25__UART1_DSR | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D26__EMI_WEIM_D_26 (_MX53_PAD_EIM_D26__EMI_WEIM_D_26 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D26__GPIO3_26 (_MX53_PAD_EIM_D26__GPIO3_26 | MUX_PAD_CTRL(NO_PAD_CTRL))
-#define MX53_PAD_EIM_D26__UART2_TXD_MUX (_MX53_PAD_EIM_D26__UART2_TXD_MUX | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D26__UART2_TXD_MUX (_MX53_PAD_EIM_D26__UART2_TXD_MUX | MUX_PAD_CTRL(MX53_UART_PAD_CTRL))
#define MX53_PAD_EIM_D26__FIRI_RXD (_MX53_PAD_EIM_D26__FIRI_RXD | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D26__IPU_CSI0_D_1 (_MX53_PAD_EIM_D26__IPU_CSI0_D_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D26__IPU_DI1_PIN11 (_MX53_PAD_EIM_D26__IPU_DI1_PIN11 | MUX_PAD_CTRL(NO_PAD_CTRL))
@@ -1721,7 +1721,7 @@
#define MX53_PAD_EIM_D26__IPU_DISP1_DAT_22 (_MX53_PAD_EIM_D26__IPU_DISP1_DAT_22 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D27__EMI_WEIM_D_27 (_MX53_PAD_EIM_D27__EMI_WEIM_D_27 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D27__GPIO3_27 (_MX53_PAD_EIM_D27__GPIO3_27 | MUX_PAD_CTRL(NO_PAD_CTRL))
-#define MX53_PAD_EIM_D27__UART2_RXD_MUX (_MX53_PAD_EIM_D27__UART2_RXD_MUX | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D27__UART2_RXD_MUX (_MX53_PAD_EIM_D27__UART2_RXD_MUX | MUX_PAD_CTRL(MX53_UART_PAD_CTRL))
#define MX53_PAD_EIM_D27__FIRI_TXD (_MX53_PAD_EIM_D27__FIRI_TXD | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D27__IPU_CSI0_D_0 (_MX53_PAD_EIM_D27__IPU_CSI0_D_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D27__IPU_DI1_PIN13 (_MX53_PAD_EIM_D27__IPU_DI1_PIN13 | MUX_PAD_CTRL(NO_PAD_CTRL))
@@ -1729,7 +1729,7 @@
#define MX53_PAD_EIM_D27__IPU_DISP1_DAT_23 (_MX53_PAD_EIM_D27__IPU_DISP1_DAT_23 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D28__EMI_WEIM_D_28 (_MX53_PAD_EIM_D28__EMI_WEIM_D_28 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D28__GPIO3_28 (_MX53_PAD_EIM_D28__GPIO3_28 | MUX_PAD_CTRL(NO_PAD_CTRL))
-#define MX53_PAD_EIM_D28__UART2_CTS (_MX53_PAD_EIM_D28__UART2_CTS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D28__UART2_CTS (_MX53_PAD_EIM_D28__UART2_CTS | MUX_PAD_CTRL(MX53_UART_PAD_CTRL))
#define MX53_PAD_EIM_D28__IPU_DISPB0_SER_DIO (_MX53_PAD_EIM_D28__IPU_DISPB0_SER_DIO | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D28__CSPI_MOSI (_MX53_PAD_EIM_D28__CSPI_MOSI | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D28__I2C1_SDA (_MX53_PAD_EIM_D28__I2C1_SDA | MUX_PAD_CTRL(NO_PAD_CTRL))
@@ -1737,7 +1737,7 @@
#define MX53_PAD_EIM_D28__IPU_DI0_PIN13 (_MX53_PAD_EIM_D28__IPU_DI0_PIN13 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D29__EMI_WEIM_D_29 (_MX53_PAD_EIM_D29__EMI_WEIM_D_29 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D29__GPIO3_29 (_MX53_PAD_EIM_D29__GPIO3_29 | MUX_PAD_CTRL(NO_PAD_CTRL))
-#define MX53_PAD_EIM_D29__UART2_RTS (_MX53_PAD_EIM_D29__UART2_RTS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D29__UART2_RTS (_MX53_PAD_EIM_D29__UART2_RTS | MUX_PAD_CTRL(MX53_UART_PAD_CTRL))
#define MX53_PAD_EIM_D29__IPU_DISPB0_SER_RS (_MX53_PAD_EIM_D29__IPU_DISPB0_SER_RS | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D29__CSPI_SS0 (_MX53_PAD_EIM_D29__CSPI_SS0 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D29__IPU_DI1_PIN15 (_MX53_PAD_EIM_D29__IPU_DI1_PIN15 | MUX_PAD_CTRL(NO_PAD_CTRL))
@@ -1745,7 +1745,7 @@
#define MX53_PAD_EIM_D29__IPU_DI0_PIN14 (_MX53_PAD_EIM_D29__IPU_DI0_PIN14 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D30__EMI_WEIM_D_30 (_MX53_PAD_EIM_D30__EMI_WEIM_D_30 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D30__GPIO3_30 (_MX53_PAD_EIM_D30__GPIO3_30 | MUX_PAD_CTRL(NO_PAD_CTRL))
-#define MX53_PAD_EIM_D30__UART3_CTS (_MX53_PAD_EIM_D30__UART3_CTS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D30__UART3_CTS (_MX53_PAD_EIM_D30__UART3_CTS | MUX_PAD_CTRL(MX53_UART_PAD_CTRL))
#define MX53_PAD_EIM_D30__IPU_CSI0_D_3 (_MX53_PAD_EIM_D30__IPU_CSI0_D_3 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D30__IPU_DI0_PIN11 (_MX53_PAD_EIM_D30__IPU_DI0_PIN11 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D30__IPU_DISP1_DAT_21 (_MX53_PAD_EIM_D30__IPU_DISP1_DAT_21 | MUX_PAD_CTRL(NO_PAD_CTRL))
@@ -1753,7 +1753,7 @@
#define MX53_PAD_EIM_D30__USBOH3_USBH2_OC (_MX53_PAD_EIM_D30__USBOH3_USBH2_OC | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D31__EMI_WEIM_D_31 (_MX53_PAD_EIM_D31__EMI_WEIM_D_31 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D31__GPIO3_31 (_MX53_PAD_EIM_D31__GPIO3_31 | MUX_PAD_CTRL(NO_PAD_CTRL))
-#define MX53_PAD_EIM_D31__UART3_RTS (_MX53_PAD_EIM_D31__UART3_RTS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_EIM_D31__UART3_RTS (_MX53_PAD_EIM_D31__UART3_RTS | MUX_PAD_CTRL(MX53_UART_PAD_CTRL))
#define MX53_PAD_EIM_D31__IPU_CSI0_D_2 (_MX53_PAD_EIM_D31__IPU_CSI0_D_2 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D31__IPU_DI0_PIN12 (_MX53_PAD_EIM_D31__IPU_DI0_PIN12 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_EIM_D31__IPU_DISP1_DAT_20 (_MX53_PAD_EIM_D31__IPU_DISP1_DAT_20 | MUX_PAD_CTRL(NO_PAD_CTRL))
@@ -2061,13 +2061,13 @@
#define MX53_PAD_PATA_RESET_B__PATA_PATA_RESET_B (_MX53_PAD_PATA_RESET_B__PATA_PATA_RESET_B | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_PATA_RESET_B__GPIO7_4 (_MX53_PAD_PATA_RESET_B__GPIO7_4 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_PATA_RESET_B__ESDHC3_CMD (_MX53_PAD_PATA_RESET_B__ESDHC3_CMD | MUX_PAD_CTRL(MX53_SDHC_PAD_CTRL))
-#define MX53_PAD_PATA_RESET_B__UART1_CTS (_MX53_PAD_PATA_RESET_B__UART1_CTS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_RESET_B__UART1_CTS (_MX53_PAD_PATA_RESET_B__UART1_CTS | MUX_PAD_CTRL(MX53_UART_PAD_CTRL))
#define MX53_PAD_PATA_RESET_B__CAN2_TXCAN (_MX53_PAD_PATA_RESET_B__CAN2_TXCAN | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_PATA_RESET_B__USBPHY1_DATAOUT_0 (_MX53_PAD_PATA_RESET_B__USBPHY1_DATAOUT_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_PATA_IORDY__PATA_IORDY (_MX53_PAD_PATA_IORDY__PATA_IORDY | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_PATA_IORDY__GPIO7_5 (_MX53_PAD_PATA_IORDY__GPIO7_5 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_PATA_IORDY__ESDHC3_CLK (_MX53_PAD_PATA_IORDY__ESDHC3_CLK | MUX_PAD_CTRL(MX53_SDHC_PAD_CTRL))
-#define MX53_PAD_PATA_IORDY__UART1_RTS (_MX53_PAD_PATA_IORDY__UART1_RTS | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_PATA_IORDY__UART1_RTS (_MX53_PAD_PATA_IORDY__UART1_RTS | MUX_PAD_CTRL(MX53_UART_PAD_CTRL))
#define MX53_PAD_PATA_IORDY__CAN2_RXCAN (_MX53_PAD_PATA_IORDY__CAN2_RXCAN | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_PATA_IORDY__USBPHY1_DATAOUT_1 (_MX53_PAD_PATA_IORDY__USBPHY1_DATAOUT_1 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_PATA_DA_0__PATA_DA_0 (_MX53_PAD_PATA_DA_0__PATA_DA_0 | MUX_PAD_CTRL(NO_PAD_CTRL))
@@ -2339,7 +2339,7 @@
#define MX53_PAD_GPIO_7__GPIO1_7 (_MX53_PAD_GPIO_7__GPIO1_7 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_GPIO_7__EPIT1_EPITO (_MX53_PAD_GPIO_7__EPIT1_EPITO | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_GPIO_7__CAN1_TXCAN (_MX53_PAD_GPIO_7__CAN1_TXCAN | MUX_PAD_CTRL(NO_PAD_CTRL))
-#define MX53_PAD_GPIO_7__UART2_TXD_MUX (_MX53_PAD_GPIO_7__UART2_TXD_MUX | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_7__UART2_TXD_MUX (_MX53_PAD_GPIO_7__UART2_TXD_MUX | MUX_PAD_CTRL(MX53_UART_PAD_CTRL))
#define MX53_PAD_GPIO_7__FIRI_RXD (_MX53_PAD_GPIO_7__FIRI_RXD | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_GPIO_7__SPDIF_PLOCK (_MX53_PAD_GPIO_7__SPDIF_PLOCK | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_GPIO_7__CCM_PLL2_BYP (_MX53_PAD_GPIO_7__CCM_PLL2_BYP | MUX_PAD_CTRL(NO_PAD_CTRL))
@@ -2347,7 +2347,7 @@
#define MX53_PAD_GPIO_8__GPIO1_8 (_MX53_PAD_GPIO_8__GPIO1_8 | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_GPIO_8__EPIT2_EPITO (_MX53_PAD_GPIO_8__EPIT2_EPITO | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_GPIO_8__CAN1_RXCAN (_MX53_PAD_GPIO_8__CAN1_RXCAN | MUX_PAD_CTRL(NO_PAD_CTRL))
-#define MX53_PAD_GPIO_8__UART2_RXD_MUX (_MX53_PAD_GPIO_8__UART2_RXD_MUX | MUX_PAD_CTRL(NO_PAD_CTRL))
+#define MX53_PAD_GPIO_8__UART2_RXD_MUX (_MX53_PAD_GPIO_8__UART2_RXD_MUX | MUX_PAD_CTRL(MX53_UART_PAD_CTRL))
#define MX53_PAD_GPIO_8__FIRI_TXD (_MX53_PAD_GPIO_8__FIRI_TXD | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_GPIO_8__SPDIF_SRCLK (_MX53_PAD_GPIO_8__SPDIF_SRCLK | MUX_PAD_CTRL(NO_PAD_CTRL))
#define MX53_PAD_GPIO_8__CCM_PLL3_BYP (_MX53_PAD_GPIO_8__CCM_PLL3_BYP | MUX_PAD_CTRL(NO_PAD_CTRL))
diff --git a/arch/arm/plat-mxc/include/mach/iomux-v1.h b/arch/arm/plat-mxc/include/mach/iomux-v1.h
index c07d30210c57..6fa8a707b9a0 100644
--- a/arch/arm/plat-mxc/include/mach/iomux-v1.h
+++ b/arch/arm/plat-mxc/include/mach/iomux-v1.h
@@ -85,9 +85,6 @@
#define GPIO_BOUT_0 (2 << GPIO_BOUT_SHIFT)
#define GPIO_BOUT_1 (3 << GPIO_BOUT_SHIFT)
-/* decode irq number to use with IMR(x), ISR(x) and friends */
-#define IRQ_TO_REG(irq) ((irq - MXC_INTERNAL_IRQS) >> 5)
-
#define IRQ_GPIOA(x) (MXC_GPIO_IRQ_START + x)
#define IRQ_GPIOB(x) (IRQ_GPIOA(32) + x)
#define IRQ_GPIOC(x) (IRQ_GPIOB(32) + x)
@@ -98,7 +95,6 @@
extern int mxc_gpio_mode(int gpio_mode);
extern int mxc_gpio_setup_multiple_pins(const int *pin_list, unsigned count,
const char *label);
-extern void mxc_gpio_release_multiple_pins(const int *pin_list, int count);
extern int __init imx_iomuxv1_init(void __iomem *base, int numports);
diff --git a/arch/arm/plat-mxc/include/mach/iomux-v3.h b/arch/arm/plat-mxc/include/mach/iomux-v3.h
index 82620af1922f..ebbce33097a7 100644
--- a/arch/arm/plat-mxc/include/mach/iomux-v3.h
+++ b/arch/arm/plat-mxc/include/mach/iomux-v3.h
@@ -66,7 +66,6 @@ typedef u64 iomux_v3_cfg_t;
#define MUX_MODE_MASK ((iomux_v3_cfg_t)0x1f << MUX_MODE_SHIFT)
#define MUX_PAD_CTRL_SHIFT 41
#define MUX_PAD_CTRL_MASK ((iomux_v3_cfg_t)0x1ffff << MUX_PAD_CTRL_SHIFT)
-#define NO_PAD_CTRL ((iomux_v3_cfg_t)1 << (MUX_PAD_CTRL_SHIFT + 16))
#define MUX_SEL_INPUT_SHIFT 58
#define MUX_SEL_INPUT_MASK ((iomux_v3_cfg_t)0xf << MUX_SEL_INPUT_SHIFT)
@@ -85,6 +84,7 @@ typedef u64 iomux_v3_cfg_t;
* Use to set PAD control
*/
+#define NO_PAD_CTRL (1 << 16)
#define PAD_CTL_DVS (1 << 13)
#define PAD_CTL_HYS (1 << 8)
diff --git a/arch/arm/plat-mxc/include/mach/iomux.h b/arch/arm/plat-mxc/include/mach/iomux.h
deleted file mode 100644
index 3d226d7e7be2..000000000000
--- a/arch/arm/plat-mxc/include/mach/iomux.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright (C) 2010 Uwe Kleine-Koenig, Pengutronix
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- */
-#ifndef __MACH_IOMUX_H__
-#define __MACH_IOMUX_H__
-
-/* This file will go away, please include mach/iomux-mx... directly */
-
-#ifdef CONFIG_ARCH_MX1
-#include <mach/iomux-mx1.h>
-#endif
-#ifdef CONFIG_ARCH_MX2
-#include <mach/iomux-mx2x.h>
-#ifdef CONFIG_MACH_MX21
-#include <mach/iomux-mx21.h>
-#endif
-#ifdef CONFIG_MACH_MX27
-#include <mach/iomux-mx27.h>
-#endif
-#endif
-
-#endif /* __MACH_IOMUX_H__ */
diff --git a/arch/arm/plat-mxc/include/mach/mx53.h b/arch/arm/plat-mxc/include/mach/mx53.h
index 9d2a1ef84de2..74cd093203e0 100644
--- a/arch/arm/plat-mxc/include/mach/mx53.h
+++ b/arch/arm/plat-mxc/include/mach/mx53.h
@@ -145,14 +145,14 @@
/*
* Memory regions and CS
*/
-#define MX53_CSD0_BASE_ADDR 0x90000000
-#define MX53_CSD1_BASE_ADDR 0xA0000000
-#define MX53_CS0_BASE_ADDR 0xB0000000
-#define MX53_CS1_BASE_ADDR 0xB8000000
-#define MX53_CS2_BASE_ADDR 0xC0000000
-#define MX53_CS3_BASE_ADDR 0xC8000000
-#define MX53_CS4_BASE_ADDR 0xCC000000
-#define MX53_CS5_BASE_ADDR 0xCE000000
+#define MX53_CSD0_BASE_ADDR 0x70000000
+#define MX53_CSD1_BASE_ADDR 0xB0000000
+#define MX53_CS0_BASE_ADDR 0xF0000000
+#define MX53_CS1_32MB_BASE_ADDR 0xF2000000
+#define MX53_CS1_64MB_BASE_ADDR 0xF4000000
+#define MX53_CS2_64MB_BASE_ADDR 0xF4000000
+#define MX53_CS2_96MB_BASE_ADDR 0xF6000000
+#define MX53_CS3_BASE_ADDR 0xF6000000
#define MX53_IO_P2V(x) IMX_IO_P2V(x)
#define MX53_IO_ADDRESS(x) IOMEM(MX53_IO_P2V(x))
@@ -233,7 +233,7 @@
#define MX53_INT_ESDHC2 2
#define MX53_INT_ESDHC3 3
#define MX53_INT_ESDHC4 4
-#define MX53_INT_RESV5 5
+#define MX53_INT_DAP 5
#define MX53_INT_SDMA 6
#define MX53_INT_IOMUX 7
#define MX53_INT_NFC 8
@@ -262,8 +262,8 @@
#define MX53_INT_UART1 31
#define MX53_INT_UART2 32
#define MX53_INT_UART3 33
-#define MX53_INT_RESV34 34
-#define MX53_INT_RESV35 35
+#define MX53_INT_RTC 34
+#define MX53_INT_PTP 35
#define MX53_INT_ECSPI1 36
#define MX53_INT_ECSPI2 37
#define MX53_INT_CSPI 38
@@ -293,8 +293,8 @@
#define MX53_INT_I2C1 62
#define MX53_INT_I2C2 63
#define MX53_INT_I2C3 64
-#define MX53_INT_RESV65 65
-#define MX53_INT_RESV66 66
+#define MX53_INT_MLB 65
+#define MX53_INT_ASRC 66
#define MX53_INT_SPDIF 67
#define MX53_INT_SIM_DAT 68
#define MX53_INT_IIM 69
diff --git a/arch/arm/plat-mxc/include/mach/mxc.h b/arch/arm/plat-mxc/include/mach/mxc.h
index 4ac53ce97c24..09879235a9f5 100644
--- a/arch/arm/plat-mxc/include/mach/mxc.h
+++ b/arch/arm/plat-mxc/include/mach/mxc.h
@@ -68,7 +68,7 @@
extern unsigned int __mxc_cpu_type;
#endif
-#ifdef CONFIG_ARCH_MX1
+#ifdef CONFIG_SOC_IMX1
# ifdef mxc_cpu_type
# undef mxc_cpu_type
# define mxc_cpu_type __mxc_cpu_type
@@ -80,7 +80,7 @@ extern unsigned int __mxc_cpu_type;
# define cpu_is_mx1() (0)
#endif
-#ifdef CONFIG_MACH_MX21
+#ifdef CONFIG_SOC_IMX21
# ifdef mxc_cpu_type
# undef mxc_cpu_type
# define mxc_cpu_type __mxc_cpu_type
@@ -92,7 +92,7 @@ extern unsigned int __mxc_cpu_type;
# define cpu_is_mx21() (0)
#endif
-#ifdef CONFIG_ARCH_MX25
+#ifdef CONFIG_SOC_IMX25
# ifdef mxc_cpu_type
# undef mxc_cpu_type
# define mxc_cpu_type __mxc_cpu_type
@@ -104,7 +104,7 @@ extern unsigned int __mxc_cpu_type;
# define cpu_is_mx25() (0)
#endif
-#ifdef CONFIG_MACH_MX27
+#ifdef CONFIG_SOC_IMX27
# ifdef mxc_cpu_type
# undef mxc_cpu_type
# define mxc_cpu_type __mxc_cpu_type
diff --git a/arch/arm/plat-mxc/include/mach/timex.h b/arch/arm/plat-mxc/include/mach/timex.h
index d61d5c74817c..10343d1f87e1 100644
--- a/arch/arm/plat-mxc/include/mach/timex.h
+++ b/arch/arm/plat-mxc/include/mach/timex.h
@@ -16,16 +16,7 @@
#ifndef __ASM_ARCH_MXC_TIMEX_H__
#define __ASM_ARCH_MXC_TIMEX_H__
-#if defined CONFIG_ARCH_MX1
-#define CLOCK_TICK_RATE 16000000
-#elif defined CONFIG_ARCH_MX2
-#define CLOCK_TICK_RATE 13300000
-#elif defined CONFIG_ARCH_MX3
-#define CLOCK_TICK_RATE 16625000
-#elif defined CONFIG_ARCH_MX25
-#define CLOCK_TICK_RATE 16000000
-#elif defined CONFIG_ARCH_MX5
-#define CLOCK_TICK_RATE 8000000
-#endif
+/* Bogus value */
+#define CLOCK_TICK_RATE 12345678
#endif /* __ASM_ARCH_MXC_TIMEX_H__ */
diff --git a/arch/arm/plat-mxc/iomux-v1.c b/arch/arm/plat-mxc/iomux-v1.c
index 3238c10d4e02..1f73963bc13e 100644
--- a/arch/arm/plat-mxc/iomux-v1.c
+++ b/arch/arm/plat-mxc/iomux-v1.c
@@ -157,7 +157,7 @@ EXPORT_SYMBOL(mxc_gpio_mode);
static int imx_iomuxv1_setup_multiple(const int *list, unsigned count)
{
size_t i;
- int ret;
+ int ret = 0;
for (i = 0; i < count; ++i) {
ret = mxc_gpio_mode(list[i]);
@@ -172,45 +172,13 @@ static int imx_iomuxv1_setup_multiple(const int *list, unsigned count)
int mxc_gpio_setup_multiple_pins(const int *pin_list, unsigned count,
const char *label)
{
- size_t i;
int ret;
- for (i = 0; i < count; ++i) {
- unsigned gpio = pin_list[i] & (GPIO_PIN_MASK | GPIO_PORT_MASK);
-
- ret = gpio_request(gpio, label);
- if (ret)
- goto err_gpio_request;
- }
-
ret = imx_iomuxv1_setup_multiple(pin_list, count);
- if (ret)
- goto err_setup;
-
- return 0;
-
-err_setup:
- BUG_ON(i != count);
-
-err_gpio_request:
- mxc_gpio_release_multiple_pins(pin_list, i);
-
return ret;
}
EXPORT_SYMBOL(mxc_gpio_setup_multiple_pins);
-void mxc_gpio_release_multiple_pins(const int *pin_list, int count)
-{
- size_t i;
-
- for (i = 0; i < count; ++i) {
- unsigned gpio = pin_list[i] & (GPIO_PIN_MASK | GPIO_PORT_MASK);
-
- gpio_free(gpio);
- }
-}
-EXPORT_SYMBOL(mxc_gpio_release_multiple_pins);
-
int __init imx_iomuxv1_init(void __iomem *base, int numports)
{
imx_iomuxv1_baseaddr = base;
diff --git a/arch/arm/plat-mxc/pwm.c b/arch/arm/plat-mxc/pwm.c
index 7a61ef8f471a..761c3c940a68 100644
--- a/arch/arm/plat-mxc/pwm.c
+++ b/arch/arm/plat-mxc/pwm.c
@@ -214,14 +214,14 @@ static int __devinit mxc_pwm_probe(struct platform_device *pdev)
goto err_free_clk;
}
- r = request_mem_region(r->start, r->end - r->start + 1, pdev->name);
+ r = request_mem_region(r->start, resource_size(r), pdev->name);
if (r == NULL) {
dev_err(&pdev->dev, "failed to request memory resource\n");
ret = -EBUSY;
goto err_free_clk;
}
- pwm->mmio_base = ioremap(r->start, r->end - r->start + 1);
+ pwm->mmio_base = ioremap(r->start, resource_size(r));
if (pwm->mmio_base == NULL) {
dev_err(&pdev->dev, "failed to ioremap() registers\n");
ret = -ENODEV;
@@ -236,7 +236,7 @@ static int __devinit mxc_pwm_probe(struct platform_device *pdev)
return 0;
err_free_mem:
- release_mem_region(r->start, r->end - r->start + 1);
+ release_mem_region(r->start, resource_size(r));
err_free_clk:
clk_put(pwm->clk);
err_free:
@@ -260,7 +260,7 @@ static int __devexit mxc_pwm_remove(struct platform_device *pdev)
iounmap(pwm->mmio_base);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(r->start, r->end - r->start + 1);
+ release_mem_region(r->start, resource_size(r));
clk_put(pwm->clk);
diff --git a/arch/arm/plat-mxc/tzic.c b/arch/arm/plat-mxc/tzic.c
index 57f9395f87ce..710f2e7da4ce 100644
--- a/arch/arm/plat-mxc/tzic.c
+++ b/arch/arm/plat-mxc/tzic.c
@@ -49,6 +49,8 @@
void __iomem *tzic_base; /* Used as irq controller base in entry-macro.S */
+#define TZIC_NUM_IRQS 128
+
#ifdef CONFIG_FIQ
static int tzic_set_irq_fiq(unsigned int irq, unsigned int type)
{
@@ -166,7 +168,7 @@ void __init tzic_init_irq(void __iomem *irqbase)
/* all IRQ no FIQ Warning :: No selection */
- for (i = 0; i < MXC_INTERNAL_IRQS; i++) {
+ for (i = 0; i < TZIC_NUM_IRQS; i++) {
irq_set_chip_and_handler(i, &mxc_tzic_chip.base,
handle_level_irq);
set_irq_flags(i, IRQF_VALID);
diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig
index 49a4c75243fc..6e6735f04ee3 100644
--- a/arch/arm/plat-omap/Kconfig
+++ b/arch/arm/plat-omap/Kconfig
@@ -211,9 +211,6 @@ choice
depends on ARCH_OMAP
default OMAP_PM_NOOP
-config OMAP_PM_NONE
- bool "No PM layer"
-
config OMAP_PM_NOOP
bool "No-op/debug PM layer"
diff --git a/arch/arm/plat-omap/counter_32k.c b/arch/arm/plat-omap/counter_32k.c
index f7fed6080190..a6cbb712da51 100644
--- a/arch/arm/plat-omap/counter_32k.c
+++ b/arch/arm/plat-omap/counter_32k.c
@@ -18,6 +18,7 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/sched.h>
+#include <linux/clocksource.h>
#include <asm/sched_clock.h>
@@ -26,87 +27,16 @@
#include <plat/clock.h>
-
/*
* 32KHz clocksource ... always available, on pretty most chips except
* OMAP 730 and 1510. Other timers could be used as clocksources, with
* higher resolution in free-running counter modes (e.g. 12 MHz xtal),
* but systems won't necessarily want to spend resources that way.
*/
+static void __iomem *timer_32k_base;
#define OMAP16XX_TIMER_32K_SYNCHRONIZED 0xfffbc410
-#include <linux/clocksource.h>
-
-/*
- * offset_32k holds the init time counter value. It is then subtracted
- * from every counter read to achieve a counter that counts time from the
- * kernel boot (needed for sched_clock()).
- */
-static u32 offset_32k __read_mostly;
-
-#ifdef CONFIG_ARCH_OMAP16XX
-static cycle_t notrace omap16xx_32k_read(struct clocksource *cs)
-{
- return omap_readl(OMAP16XX_TIMER_32K_SYNCHRONIZED) - offset_32k;
-}
-#else
-#define omap16xx_32k_read NULL
-#endif
-
-#ifdef CONFIG_SOC_OMAP2420
-static cycle_t notrace omap2420_32k_read(struct clocksource *cs)
-{
- return omap_readl(OMAP2420_32KSYNCT_BASE + 0x10) - offset_32k;
-}
-#else
-#define omap2420_32k_read NULL
-#endif
-
-#ifdef CONFIG_SOC_OMAP2430
-static cycle_t notrace omap2430_32k_read(struct clocksource *cs)
-{
- return omap_readl(OMAP2430_32KSYNCT_BASE + 0x10) - offset_32k;
-}
-#else
-#define omap2430_32k_read NULL
-#endif
-
-#ifdef CONFIG_ARCH_OMAP3
-static cycle_t notrace omap34xx_32k_read(struct clocksource *cs)
-{
- return omap_readl(OMAP3430_32KSYNCT_BASE + 0x10) - offset_32k;
-}
-#else
-#define omap34xx_32k_read NULL
-#endif
-
-#ifdef CONFIG_ARCH_OMAP4
-static cycle_t notrace omap44xx_32k_read(struct clocksource *cs)
-{
- return omap_readl(OMAP4430_32KSYNCT_BASE + 0x10) - offset_32k;
-}
-#else
-#define omap44xx_32k_read NULL
-#endif
-
-/*
- * Kernel assumes that sched_clock can be called early but may not have
- * things ready yet.
- */
-static cycle_t notrace omap_32k_read_dummy(struct clocksource *cs)
-{
- return 0;
-}
-
-static struct clocksource clocksource_32k = {
- .name = "32k_counter",
- .rating = 250,
- .read = omap_32k_read_dummy,
- .mask = CLOCKSOURCE_MASK(32),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
-};
-
/*
* Returns current time from boot in nsecs. It's OK for this to wrap
* around for now, as it's just a relative time stamp.
@@ -122,11 +52,11 @@ static DEFINE_CLOCK_DATA(cd);
static inline unsigned long long notrace _omap_32k_sched_clock(void)
{
- u32 cyc = clocksource_32k.read(&clocksource_32k);
+ u32 cyc = timer_32k_base ? __raw_readl(timer_32k_base) : 0;
return cyc_to_fixed_sched_clock(&cd, cyc, (u32)~0, SC_MULT, SC_SHIFT);
}
-#ifndef CONFIG_OMAP_MPU_TIMER
+#if defined(CONFIG_OMAP_32K_TIMER) && !defined(CONFIG_OMAP_MPU_TIMER)
unsigned long long notrace sched_clock(void)
{
return _omap_32k_sched_clock();
@@ -140,7 +70,7 @@ unsigned long long notrace omap_32k_sched_clock(void)
static void notrace omap_update_sched_clock(void)
{
- u32 cyc = clocksource_32k.read(&clocksource_32k);
+ u32 cyc = timer_32k_base ? __raw_readl(timer_32k_base) : 0;
update_sched_clock(&cd, cyc, (u32)~0);
}
@@ -153,6 +83,7 @@ static void notrace omap_update_sched_clock(void)
*/
static struct timespec persistent_ts;
static cycles_t cycles, last_cycles;
+static unsigned int persistent_mult, persistent_shift;
void read_persistent_clock(struct timespec *ts)
{
unsigned long long nsecs;
@@ -160,11 +91,10 @@ void read_persistent_clock(struct timespec *ts)
struct timespec *tsp = &persistent_ts;
last_cycles = cycles;
- cycles = clocksource_32k.read(&clocksource_32k);
+ cycles = timer_32k_base ? __raw_readl(timer_32k_base) : 0;
delta = cycles - last_cycles;
- nsecs = clocksource_cyc2ns(delta,
- clocksource_32k.mult, clocksource_32k.shift);
+ nsecs = clocksource_cyc2ns(delta, persistent_mult, persistent_shift);
timespec_add_ns(tsp, nsecs);
*ts = *tsp;
@@ -176,29 +106,46 @@ int __init omap_init_clocksource_32k(void)
"%s: can't register clocksource!\n";
if (cpu_is_omap16xx() || cpu_class_is_omap2()) {
+ u32 pbase;
+ unsigned long size = SZ_4K;
+ void __iomem *base;
struct clk *sync_32k_ick;
- if (cpu_is_omap16xx())
- clocksource_32k.read = omap16xx_32k_read;
- else if (cpu_is_omap2420())
- clocksource_32k.read = omap2420_32k_read;
+ if (cpu_is_omap16xx()) {
+ pbase = OMAP16XX_TIMER_32K_SYNCHRONIZED;
+ size = SZ_1K;
+ } else if (cpu_is_omap2420())
+ pbase = OMAP2420_32KSYNCT_BASE + 0x10;
else if (cpu_is_omap2430())
- clocksource_32k.read = omap2430_32k_read;
+ pbase = OMAP2430_32KSYNCT_BASE + 0x10;
else if (cpu_is_omap34xx())
- clocksource_32k.read = omap34xx_32k_read;
+ pbase = OMAP3430_32KSYNCT_BASE + 0x10;
else if (cpu_is_omap44xx())
- clocksource_32k.read = omap44xx_32k_read;
+ pbase = OMAP4430_32KSYNCT_BASE + 0x10;
else
return -ENODEV;
+ /* For this to work we must have a static mapping in io.c for this area */
+ base = ioremap(pbase, size);
+ if (!base)
+ return -ENODEV;
+
sync_32k_ick = clk_get(NULL, "omap_32ksync_ick");
if (!IS_ERR(sync_32k_ick))
clk_enable(sync_32k_ick);
- offset_32k = clocksource_32k.read(&clocksource_32k);
+ timer_32k_base = base;
+
+ /*
+ * 120000 rough estimate from the calculations in
+ * __clocksource_updatefreq_scale.
+ */
+ clocks_calc_mult_shift(&persistent_mult, &persistent_shift,
+ 32768, NSEC_PER_SEC, 120000);
- if (clocksource_register_hz(&clocksource_32k, 32768))
- printk(err, clocksource_32k.name);
+ if (clocksource_mmio_init(base, "32k_counter", 32768, 250, 32,
+ clocksource_mmio_readl_up))
+ printk(err, "32k_counter");
init_fixed_sched_clock(&cd, omap_update_sched_clock, 32,
32768, SC_MULT, SC_SHIFT);
diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c
index ee9f6ebba29b..8dfb8186b2c2 100644
--- a/arch/arm/plat-omap/dmtimer.c
+++ b/arch/arm/plat-omap/dmtimer.c
@@ -41,127 +41,6 @@
#include <plat/dmtimer.h>
#include <mach/irqs.h>
-/* register offsets */
-#define _OMAP_TIMER_ID_OFFSET 0x00
-#define _OMAP_TIMER_OCP_CFG_OFFSET 0x10
-#define _OMAP_TIMER_SYS_STAT_OFFSET 0x14
-#define _OMAP_TIMER_STAT_OFFSET 0x18
-#define _OMAP_TIMER_INT_EN_OFFSET 0x1c
-#define _OMAP_TIMER_WAKEUP_EN_OFFSET 0x20
-#define _OMAP_TIMER_CTRL_OFFSET 0x24
-#define OMAP_TIMER_CTRL_GPOCFG (1 << 14)
-#define OMAP_TIMER_CTRL_CAPTMODE (1 << 13)
-#define OMAP_TIMER_CTRL_PT (1 << 12)
-#define OMAP_TIMER_CTRL_TCM_LOWTOHIGH (0x1 << 8)
-#define OMAP_TIMER_CTRL_TCM_HIGHTOLOW (0x2 << 8)
-#define OMAP_TIMER_CTRL_TCM_BOTHEDGES (0x3 << 8)
-#define OMAP_TIMER_CTRL_SCPWM (1 << 7)
-#define OMAP_TIMER_CTRL_CE (1 << 6) /* compare enable */
-#define OMAP_TIMER_CTRL_PRE (1 << 5) /* prescaler enable */
-#define OMAP_TIMER_CTRL_PTV_SHIFT 2 /* prescaler value shift */
-#define OMAP_TIMER_CTRL_POSTED (1 << 2)
-#define OMAP_TIMER_CTRL_AR (1 << 1) /* auto-reload enable */
-#define OMAP_TIMER_CTRL_ST (1 << 0) /* start timer */
-#define _OMAP_TIMER_COUNTER_OFFSET 0x28
-#define _OMAP_TIMER_LOAD_OFFSET 0x2c
-#define _OMAP_TIMER_TRIGGER_OFFSET 0x30
-#define _OMAP_TIMER_WRITE_PEND_OFFSET 0x34
-#define WP_NONE 0 /* no write pending bit */
-#define WP_TCLR (1 << 0)
-#define WP_TCRR (1 << 1)
-#define WP_TLDR (1 << 2)
-#define WP_TTGR (1 << 3)
-#define WP_TMAR (1 << 4)
-#define WP_TPIR (1 << 5)
-#define WP_TNIR (1 << 6)
-#define WP_TCVR (1 << 7)
-#define WP_TOCR (1 << 8)
-#define WP_TOWR (1 << 9)
-#define _OMAP_TIMER_MATCH_OFFSET 0x38
-#define _OMAP_TIMER_CAPTURE_OFFSET 0x3c
-#define _OMAP_TIMER_IF_CTRL_OFFSET 0x40
-#define _OMAP_TIMER_CAPTURE2_OFFSET 0x44 /* TCAR2, 34xx only */
-#define _OMAP_TIMER_TICK_POS_OFFSET 0x48 /* TPIR, 34xx only */
-#define _OMAP_TIMER_TICK_NEG_OFFSET 0x4c /* TNIR, 34xx only */
-#define _OMAP_TIMER_TICK_COUNT_OFFSET 0x50 /* TCVR, 34xx only */
-#define _OMAP_TIMER_TICK_INT_MASK_SET_OFFSET 0x54 /* TOCR, 34xx only */
-#define _OMAP_TIMER_TICK_INT_MASK_COUNT_OFFSET 0x58 /* TOWR, 34xx only */
-
-/* register offsets with the write pending bit encoded */
-#define WPSHIFT 16
-
-#define OMAP_TIMER_ID_REG (_OMAP_TIMER_ID_OFFSET \
- | (WP_NONE << WPSHIFT))
-
-#define OMAP_TIMER_OCP_CFG_REG (_OMAP_TIMER_OCP_CFG_OFFSET \
- | (WP_NONE << WPSHIFT))
-
-#define OMAP_TIMER_SYS_STAT_REG (_OMAP_TIMER_SYS_STAT_OFFSET \
- | (WP_NONE << WPSHIFT))
-
-#define OMAP_TIMER_STAT_REG (_OMAP_TIMER_STAT_OFFSET \
- | (WP_NONE << WPSHIFT))
-
-#define OMAP_TIMER_INT_EN_REG (_OMAP_TIMER_INT_EN_OFFSET \
- | (WP_NONE << WPSHIFT))
-
-#define OMAP_TIMER_WAKEUP_EN_REG (_OMAP_TIMER_WAKEUP_EN_OFFSET \
- | (WP_NONE << WPSHIFT))
-
-#define OMAP_TIMER_CTRL_REG (_OMAP_TIMER_CTRL_OFFSET \
- | (WP_TCLR << WPSHIFT))
-
-#define OMAP_TIMER_COUNTER_REG (_OMAP_TIMER_COUNTER_OFFSET \
- | (WP_TCRR << WPSHIFT))
-
-#define OMAP_TIMER_LOAD_REG (_OMAP_TIMER_LOAD_OFFSET \
- | (WP_TLDR << WPSHIFT))
-
-#define OMAP_TIMER_TRIGGER_REG (_OMAP_TIMER_TRIGGER_OFFSET \
- | (WP_TTGR << WPSHIFT))
-
-#define OMAP_TIMER_WRITE_PEND_REG (_OMAP_TIMER_WRITE_PEND_OFFSET \
- | (WP_NONE << WPSHIFT))
-
-#define OMAP_TIMER_MATCH_REG (_OMAP_TIMER_MATCH_OFFSET \
- | (WP_TMAR << WPSHIFT))
-
-#define OMAP_TIMER_CAPTURE_REG (_OMAP_TIMER_CAPTURE_OFFSET \
- | (WP_NONE << WPSHIFT))
-
-#define OMAP_TIMER_IF_CTRL_REG (_OMAP_TIMER_IF_CTRL_OFFSET \
- | (WP_NONE << WPSHIFT))
-
-#define OMAP_TIMER_CAPTURE2_REG (_OMAP_TIMER_CAPTURE2_OFFSET \
- | (WP_NONE << WPSHIFT))
-
-#define OMAP_TIMER_TICK_POS_REG (_OMAP_TIMER_TICK_POS_OFFSET \
- | (WP_TPIR << WPSHIFT))
-
-#define OMAP_TIMER_TICK_NEG_REG (_OMAP_TIMER_TICK_NEG_OFFSET \
- | (WP_TNIR << WPSHIFT))
-
-#define OMAP_TIMER_TICK_COUNT_REG (_OMAP_TIMER_TICK_COUNT_OFFSET \
- | (WP_TCVR << WPSHIFT))
-
-#define OMAP_TIMER_TICK_INT_MASK_SET_REG \
- (_OMAP_TIMER_TICK_INT_MASK_SET_OFFSET | (WP_TOCR << WPSHIFT))
-
-#define OMAP_TIMER_TICK_INT_MASK_COUNT_REG \
- (_OMAP_TIMER_TICK_INT_MASK_COUNT_OFFSET | (WP_TOWR << WPSHIFT))
-
-struct omap_dm_timer {
- unsigned long phys_base;
- int irq;
-#ifdef CONFIG_ARCH_OMAP2PLUS
- struct clk *iclk, *fclk;
-#endif
- void __iomem *io_base;
- unsigned reserved:1;
- unsigned enabled:1;
- unsigned posted:1;
-};
-
static int dm_timer_count;
#ifdef CONFIG_ARCH_OMAP1
@@ -291,11 +170,7 @@ static spinlock_t dm_timer_lock;
*/
static inline u32 omap_dm_timer_read_reg(struct omap_dm_timer *timer, u32 reg)
{
- if (timer->posted)
- while (readl(timer->io_base + (OMAP_TIMER_WRITE_PEND_REG & 0xff))
- & (reg >> WPSHIFT))
- cpu_relax();
- return readl(timer->io_base + (reg & 0xff));
+ return __omap_dm_timer_read(timer->io_base, reg, timer->posted);
}
/*
@@ -307,11 +182,7 @@ static inline u32 omap_dm_timer_read_reg(struct omap_dm_timer *timer, u32 reg)
static void omap_dm_timer_write_reg(struct omap_dm_timer *timer, u32 reg,
u32 value)
{
- if (timer->posted)
- while (readl(timer->io_base + (OMAP_TIMER_WRITE_PEND_REG & 0xff))
- & (reg >> WPSHIFT))
- cpu_relax();
- writel(value, timer->io_base + (reg & 0xff));
+ __omap_dm_timer_write(timer->io_base, reg, value, timer->posted);
}
static void omap_dm_timer_wait_for_reset(struct omap_dm_timer *timer)
@@ -330,7 +201,7 @@ static void omap_dm_timer_wait_for_reset(struct omap_dm_timer *timer)
static void omap_dm_timer_reset(struct omap_dm_timer *timer)
{
- u32 l;
+ int autoidle = 0, wakeup = 0;
if (!cpu_class_is_omap2() || timer != &dm_timers[0]) {
omap_dm_timer_write_reg(timer, OMAP_TIMER_IF_CTRL_REG, 0x06);
@@ -338,28 +209,21 @@ static void omap_dm_timer_reset(struct omap_dm_timer *timer)
}
omap_dm_timer_set_source(timer, OMAP_TIMER_SRC_32_KHZ);
- l = omap_dm_timer_read_reg(timer, OMAP_TIMER_OCP_CFG_REG);
- l |= 0x02 << 3; /* Set to smart-idle mode */
- l |= 0x2 << 8; /* Set clock activity to perserve f-clock on idle */
-
/* Enable autoidle on OMAP2 / OMAP3 */
if (cpu_is_omap24xx() || cpu_is_omap34xx())
- l |= 0x1 << 0;
+ autoidle = 1;
/*
* Enable wake-up on OMAP2 CPUs.
*/
if (cpu_class_is_omap2())
- l |= 1 << 2;
- omap_dm_timer_write_reg(timer, OMAP_TIMER_OCP_CFG_REG, l);
+ wakeup = 1;
- /* Match hardware reset default of posted mode */
- omap_dm_timer_write_reg(timer, OMAP_TIMER_IF_CTRL_REG,
- OMAP_TIMER_CTRL_POSTED);
+ __omap_dm_timer_reset(timer->io_base, autoidle, wakeup);
timer->posted = 1;
}
-static void omap_dm_timer_prepare(struct omap_dm_timer *timer)
+void omap_dm_timer_prepare(struct omap_dm_timer *timer)
{
omap_dm_timer_enable(timer);
omap_dm_timer_reset(timer);
@@ -531,25 +395,13 @@ EXPORT_SYMBOL_GPL(omap_dm_timer_start);
void omap_dm_timer_stop(struct omap_dm_timer *timer)
{
- u32 l;
+ unsigned long rate = 0;
- l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
- if (l & OMAP_TIMER_CTRL_ST) {
- l &= ~0x1;
- omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
#ifdef CONFIG_ARCH_OMAP2PLUS
- /* Readback to make sure write has completed */
- omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
- /*
- * Wait for functional clock period x 3.5 to make sure that
- * timer is stopped
- */
- udelay(3500000 / clk_get_rate(timer->fclk) + 1);
+ rate = clk_get_rate(timer->fclk);
#endif
- }
- /* Ack possibly pending interrupt */
- omap_dm_timer_write_reg(timer, OMAP_TIMER_STAT_REG,
- OMAP_TIMER_INT_OVERFLOW);
+
+ __omap_dm_timer_stop(timer->io_base, timer->posted, rate);
}
EXPORT_SYMBOL_GPL(omap_dm_timer_stop);
@@ -572,22 +424,11 @@ EXPORT_SYMBOL_GPL(omap_dm_timer_set_source);
int omap_dm_timer_set_source(struct omap_dm_timer *timer, int source)
{
- int ret = -EINVAL;
-
if (source < 0 || source >= 3)
return -EINVAL;
- clk_disable(timer->fclk);
- ret = clk_set_parent(timer->fclk, dm_source_clocks[source]);
- clk_enable(timer->fclk);
-
- /*
- * When the functional clock disappears, too quick writes seem
- * to cause an abort. XXX Is this still necessary?
- */
- __delay(300000);
-
- return ret;
+ return __omap_dm_timer_set_source(timer->fclk,
+ dm_source_clocks[source]);
}
EXPORT_SYMBOL_GPL(omap_dm_timer_set_source);
@@ -625,8 +466,7 @@ void omap_dm_timer_set_load_start(struct omap_dm_timer *timer, int autoreload,
}
l |= OMAP_TIMER_CTRL_ST;
- omap_dm_timer_write_reg(timer, OMAP_TIMER_COUNTER_REG, load);
- omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
+ __omap_dm_timer_load_start(timer->io_base, l, load, timer->posted);
}
EXPORT_SYMBOL_GPL(omap_dm_timer_set_load_start);
@@ -679,8 +519,7 @@ EXPORT_SYMBOL_GPL(omap_dm_timer_set_prescaler);
void omap_dm_timer_set_int_enable(struct omap_dm_timer *timer,
unsigned int value)
{
- omap_dm_timer_write_reg(timer, OMAP_TIMER_INT_EN_REG, value);
- omap_dm_timer_write_reg(timer, OMAP_TIMER_WAKEUP_EN_REG, value);
+ __omap_dm_timer_int_enable(timer->io_base, value);
}
EXPORT_SYMBOL_GPL(omap_dm_timer_set_int_enable);
@@ -696,17 +535,13 @@ EXPORT_SYMBOL_GPL(omap_dm_timer_read_status);
void omap_dm_timer_write_status(struct omap_dm_timer *timer, unsigned int value)
{
- omap_dm_timer_write_reg(timer, OMAP_TIMER_STAT_REG, value);
+ __omap_dm_timer_write_status(timer->io_base, value);
}
EXPORT_SYMBOL_GPL(omap_dm_timer_write_status);
unsigned int omap_dm_timer_read_counter(struct omap_dm_timer *timer)
{
- unsigned int l;
-
- l = omap_dm_timer_read_reg(timer, OMAP_TIMER_COUNTER_REG);
-
- return l;
+ return __omap_dm_timer_read_counter(timer->io_base, timer->posted);
}
EXPORT_SYMBOL_GPL(omap_dm_timer_read_counter);
@@ -737,7 +572,7 @@ int omap_dm_timers_active(void)
}
EXPORT_SYMBOL_GPL(omap_dm_timers_active);
-int __init omap_dm_timer_init(void)
+static int __init omap_dm_timer_init(void)
{
struct omap_dm_timer *timer;
int i, map_size = SZ_8K; /* Module 4KB + L4 4KB except on omap1 */
@@ -790,8 +625,16 @@ int __init omap_dm_timer_init(void)
sprintf(clk_name, "gpt%d_fck", i + 1);
timer->fclk = clk_get(NULL, clk_name);
}
+
+ /* One or two timers may be set up early for sys_timer */
+ if (sys_timer_reserved & (1 << i)) {
+ timer->reserved = 1;
+ timer->posted = 1;
+ }
#endif
}
return 0;
}
+
+arch_initcall(omap_dm_timer_init);
diff --git a/arch/arm/plat-omap/include/plat/clock.h b/arch/arm/plat-omap/include/plat/clock.h
index 006e599c6613..f57e0649ab30 100644
--- a/arch/arm/plat-omap/include/plat/clock.h
+++ b/arch/arm/plat-omap/include/plat/clock.h
@@ -152,7 +152,7 @@ struct dpll_data {
u16 max_multiplier;
u8 last_rounded_n;
u8 min_divider;
- u8 max_divider;
+ u16 max_divider;
u8 modes;
#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
void __iomem *autoidle_reg;
diff --git a/arch/arm/plat-omap/include/plat/common.h b/arch/arm/plat-omap/include/plat/common.h
index 5288130be96e..4564cc697d7f 100644
--- a/arch/arm/plat-omap/include/plat/common.h
+++ b/arch/arm/plat-omap/include/plat/common.h
@@ -34,7 +34,11 @@
struct sys_timer;
extern void omap_map_common_io(void);
-extern struct sys_timer omap_timer;
+extern struct sys_timer omap1_timer;
+extern struct sys_timer omap2_timer;
+extern struct sys_timer omap3_timer;
+extern struct sys_timer omap3_secure_timer;
+extern struct sys_timer omap4_timer;
extern bool omap_32k_timer_init(void);
extern int __init omap_init_clocksource_32k(void);
extern unsigned long long notrace omap_32k_sched_clock(void);
diff --git a/arch/arm/plat-omap/include/plat/dmtimer.h b/arch/arm/plat-omap/include/plat/dmtimer.h
index d6c70d2f4030..eb5d16c60cd9 100644
--- a/arch/arm/plat-omap/include/plat/dmtimer.h
+++ b/arch/arm/plat-omap/include/plat/dmtimer.h
@@ -32,6 +32,10 @@
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
#ifndef __ASM_ARCH_DMTIMER_H
#define __ASM_ARCH_DMTIMER_H
@@ -56,12 +60,8 @@
*/
#define OMAP_TIMER_IP_VERSION_1 0x1
struct omap_dm_timer;
-extern struct omap_dm_timer *gptimer_wakeup;
-extern struct sys_timer omap_timer;
struct clk;
-int omap_dm_timer_init(void);
-
struct omap_dm_timer *omap_dm_timer_request(void);
struct omap_dm_timer *omap_dm_timer_request_specific(int timer_id);
void omap_dm_timer_free(struct omap_dm_timer *timer);
@@ -93,5 +93,248 @@ void omap_dm_timer_write_counter(struct omap_dm_timer *timer, unsigned int value
int omap_dm_timers_active(void);
+/*
+ * Do not use the defines below, they are not needed. They should be only
+ * used by dmtimer.c and sys_timer related code.
+ */
+
+/* register offsets */
+#define _OMAP_TIMER_ID_OFFSET 0x00
+#define _OMAP_TIMER_OCP_CFG_OFFSET 0x10
+#define _OMAP_TIMER_SYS_STAT_OFFSET 0x14
+#define _OMAP_TIMER_STAT_OFFSET 0x18
+#define _OMAP_TIMER_INT_EN_OFFSET 0x1c
+#define _OMAP_TIMER_WAKEUP_EN_OFFSET 0x20
+#define _OMAP_TIMER_CTRL_OFFSET 0x24
+#define OMAP_TIMER_CTRL_GPOCFG (1 << 14)
+#define OMAP_TIMER_CTRL_CAPTMODE (1 << 13)
+#define OMAP_TIMER_CTRL_PT (1 << 12)
+#define OMAP_TIMER_CTRL_TCM_LOWTOHIGH (0x1 << 8)
+#define OMAP_TIMER_CTRL_TCM_HIGHTOLOW (0x2 << 8)
+#define OMAP_TIMER_CTRL_TCM_BOTHEDGES (0x3 << 8)
+#define OMAP_TIMER_CTRL_SCPWM (1 << 7)
+#define OMAP_TIMER_CTRL_CE (1 << 6) /* compare enable */
+#define OMAP_TIMER_CTRL_PRE (1 << 5) /* prescaler enable */
+#define OMAP_TIMER_CTRL_PTV_SHIFT 2 /* prescaler value shift */
+#define OMAP_TIMER_CTRL_POSTED (1 << 2)
+#define OMAP_TIMER_CTRL_AR (1 << 1) /* auto-reload enable */
+#define OMAP_TIMER_CTRL_ST (1 << 0) /* start timer */
+#define _OMAP_TIMER_COUNTER_OFFSET 0x28
+#define _OMAP_TIMER_LOAD_OFFSET 0x2c
+#define _OMAP_TIMER_TRIGGER_OFFSET 0x30
+#define _OMAP_TIMER_WRITE_PEND_OFFSET 0x34
+#define WP_NONE 0 /* no write pending bit */
+#define WP_TCLR (1 << 0)
+#define WP_TCRR (1 << 1)
+#define WP_TLDR (1 << 2)
+#define WP_TTGR (1 << 3)
+#define WP_TMAR (1 << 4)
+#define WP_TPIR (1 << 5)
+#define WP_TNIR (1 << 6)
+#define WP_TCVR (1 << 7)
+#define WP_TOCR (1 << 8)
+#define WP_TOWR (1 << 9)
+#define _OMAP_TIMER_MATCH_OFFSET 0x38
+#define _OMAP_TIMER_CAPTURE_OFFSET 0x3c
+#define _OMAP_TIMER_IF_CTRL_OFFSET 0x40
+#define _OMAP_TIMER_CAPTURE2_OFFSET 0x44 /* TCAR2, 34xx only */
+#define _OMAP_TIMER_TICK_POS_OFFSET 0x48 /* TPIR, 34xx only */
+#define _OMAP_TIMER_TICK_NEG_OFFSET 0x4c /* TNIR, 34xx only */
+#define _OMAP_TIMER_TICK_COUNT_OFFSET 0x50 /* TCVR, 34xx only */
+#define _OMAP_TIMER_TICK_INT_MASK_SET_OFFSET 0x54 /* TOCR, 34xx only */
+#define _OMAP_TIMER_TICK_INT_MASK_COUNT_OFFSET 0x58 /* TOWR, 34xx only */
+
+/* register offsets with the write pending bit encoded */
+#define WPSHIFT 16
+
+#define OMAP_TIMER_ID_REG (_OMAP_TIMER_ID_OFFSET \
+ | (WP_NONE << WPSHIFT))
+
+#define OMAP_TIMER_OCP_CFG_REG (_OMAP_TIMER_OCP_CFG_OFFSET \
+ | (WP_NONE << WPSHIFT))
+
+#define OMAP_TIMER_SYS_STAT_REG (_OMAP_TIMER_SYS_STAT_OFFSET \
+ | (WP_NONE << WPSHIFT))
+
+#define OMAP_TIMER_STAT_REG (_OMAP_TIMER_STAT_OFFSET \
+ | (WP_NONE << WPSHIFT))
+
+#define OMAP_TIMER_INT_EN_REG (_OMAP_TIMER_INT_EN_OFFSET \
+ | (WP_NONE << WPSHIFT))
+
+#define OMAP_TIMER_WAKEUP_EN_REG (_OMAP_TIMER_WAKEUP_EN_OFFSET \
+ | (WP_NONE << WPSHIFT))
+
+#define OMAP_TIMER_CTRL_REG (_OMAP_TIMER_CTRL_OFFSET \
+ | (WP_TCLR << WPSHIFT))
+
+#define OMAP_TIMER_COUNTER_REG (_OMAP_TIMER_COUNTER_OFFSET \
+ | (WP_TCRR << WPSHIFT))
+
+#define OMAP_TIMER_LOAD_REG (_OMAP_TIMER_LOAD_OFFSET \
+ | (WP_TLDR << WPSHIFT))
+
+#define OMAP_TIMER_TRIGGER_REG (_OMAP_TIMER_TRIGGER_OFFSET \
+ | (WP_TTGR << WPSHIFT))
+
+#define OMAP_TIMER_WRITE_PEND_REG (_OMAP_TIMER_WRITE_PEND_OFFSET \
+ | (WP_NONE << WPSHIFT))
+
+#define OMAP_TIMER_MATCH_REG (_OMAP_TIMER_MATCH_OFFSET \
+ | (WP_TMAR << WPSHIFT))
+
+#define OMAP_TIMER_CAPTURE_REG (_OMAP_TIMER_CAPTURE_OFFSET \
+ | (WP_NONE << WPSHIFT))
+
+#define OMAP_TIMER_IF_CTRL_REG (_OMAP_TIMER_IF_CTRL_OFFSET \
+ | (WP_NONE << WPSHIFT))
+
+#define OMAP_TIMER_CAPTURE2_REG (_OMAP_TIMER_CAPTURE2_OFFSET \
+ | (WP_NONE << WPSHIFT))
+
+#define OMAP_TIMER_TICK_POS_REG (_OMAP_TIMER_TICK_POS_OFFSET \
+ | (WP_TPIR << WPSHIFT))
+
+#define OMAP_TIMER_TICK_NEG_REG (_OMAP_TIMER_TICK_NEG_OFFSET \
+ | (WP_TNIR << WPSHIFT))
+
+#define OMAP_TIMER_TICK_COUNT_REG (_OMAP_TIMER_TICK_COUNT_OFFSET \
+ | (WP_TCVR << WPSHIFT))
+
+#define OMAP_TIMER_TICK_INT_MASK_SET_REG \
+ (_OMAP_TIMER_TICK_INT_MASK_SET_OFFSET | (WP_TOCR << WPSHIFT))
+
+#define OMAP_TIMER_TICK_INT_MASK_COUNT_REG \
+ (_OMAP_TIMER_TICK_INT_MASK_COUNT_OFFSET | (WP_TOWR << WPSHIFT))
+
+struct omap_dm_timer {
+ unsigned long phys_base;
+ int irq;
+#ifdef CONFIG_ARCH_OMAP2PLUS
+ struct clk *iclk, *fclk;
+#endif
+ void __iomem *io_base;
+ unsigned long rate;
+ unsigned reserved:1;
+ unsigned enabled:1;
+ unsigned posted:1;
+};
+
+extern u32 sys_timer_reserved;
+void omap_dm_timer_prepare(struct omap_dm_timer *timer);
+
+static inline u32 __omap_dm_timer_read(void __iomem *base, u32 reg,
+ int posted)
+{
+ if (posted)
+ while (__raw_readl(base + (OMAP_TIMER_WRITE_PEND_REG & 0xff))
+ & (reg >> WPSHIFT))
+ cpu_relax();
+
+ return __raw_readl(base + (reg & 0xff));
+}
+
+static inline void __omap_dm_timer_write(void __iomem *base, u32 reg, u32 val,
+ int posted)
+{
+ if (posted)
+ while (__raw_readl(base + (OMAP_TIMER_WRITE_PEND_REG & 0xff))
+ & (reg >> WPSHIFT))
+ cpu_relax();
+
+ __raw_writel(val, base + (reg & 0xff));
+}
+
+/* Assumes the source clock has been set by caller */
+static inline void __omap_dm_timer_reset(void __iomem *base, int autoidle,
+ int wakeup)
+{
+ u32 l;
+
+ l = __omap_dm_timer_read(base, OMAP_TIMER_OCP_CFG_REG, 0);
+ l |= 0x02 << 3; /* Set to smart-idle mode */
+ l |= 0x2 << 8; /* Set clock activity to perserve f-clock on idle */
+
+ if (autoidle)
+ l |= 0x1 << 0;
+
+ if (wakeup)
+ l |= 1 << 2;
+
+ __omap_dm_timer_write(base, OMAP_TIMER_OCP_CFG_REG, l, 0);
+
+ /* Match hardware reset default of posted mode */
+ __omap_dm_timer_write(base, OMAP_TIMER_IF_CTRL_REG,
+ OMAP_TIMER_CTRL_POSTED, 0);
+}
+
+static inline int __omap_dm_timer_set_source(struct clk *timer_fck,
+ struct clk *parent)
+{
+ int ret;
+
+ clk_disable(timer_fck);
+ ret = clk_set_parent(timer_fck, parent);
+ clk_enable(timer_fck);
+
+ /*
+ * When the functional clock disappears, too quick writes seem
+ * to cause an abort. XXX Is this still necessary?
+ */
+ __delay(300000);
+
+ return ret;
+}
+
+static inline void __omap_dm_timer_stop(void __iomem *base, int posted,
+ unsigned long rate)
+{
+ u32 l;
+
+ l = __omap_dm_timer_read(base, OMAP_TIMER_CTRL_REG, posted);
+ if (l & OMAP_TIMER_CTRL_ST) {
+ l &= ~0x1;
+ __omap_dm_timer_write(base, OMAP_TIMER_CTRL_REG, l, posted);
+#ifdef CONFIG_ARCH_OMAP2PLUS
+ /* Readback to make sure write has completed */
+ __omap_dm_timer_read(base, OMAP_TIMER_CTRL_REG, posted);
+ /*
+ * Wait for functional clock period x 3.5 to make sure that
+ * timer is stopped
+ */
+ udelay(3500000 / rate + 1);
+#endif
+ }
+
+ /* Ack possibly pending interrupt */
+ __omap_dm_timer_write(base, OMAP_TIMER_STAT_REG,
+ OMAP_TIMER_INT_OVERFLOW, 0);
+}
+
+static inline void __omap_dm_timer_load_start(void __iomem *base, u32 ctrl,
+ unsigned int load, int posted)
+{
+ __omap_dm_timer_write(base, OMAP_TIMER_COUNTER_REG, load, posted);
+ __omap_dm_timer_write(base, OMAP_TIMER_CTRL_REG, ctrl, posted);
+}
+
+static inline void __omap_dm_timer_int_enable(void __iomem *base,
+ unsigned int value)
+{
+ __omap_dm_timer_write(base, OMAP_TIMER_INT_EN_REG, value, 0);
+ __omap_dm_timer_write(base, OMAP_TIMER_WAKEUP_EN_REG, value, 0);
+}
+
+static inline unsigned int __omap_dm_timer_read_counter(void __iomem *base,
+ int posted)
+{
+ return __omap_dm_timer_read(base, OMAP_TIMER_COUNTER_REG, posted);
+}
+
+static inline void __omap_dm_timer_write_status(void __iomem *base,
+ unsigned int value)
+{
+ __omap_dm_timer_write(base, OMAP_TIMER_STAT_REG, value, 0);
+}
#endif /* __ASM_ARCH_DMTIMER_H */
diff --git a/arch/arm/plat-omap/include/plat/irqs.h b/arch/arm/plat-omap/include/plat/irqs.h
index 5a25098ea7ea..c88432005665 100644
--- a/arch/arm/plat-omap/include/plat/irqs.h
+++ b/arch/arm/plat-omap/include/plat/irqs.h
@@ -428,7 +428,11 @@
#define INTCPS_NR_IRQS 96
#ifndef __ASSEMBLY__
-extern void omap_init_irq(void);
+extern void __iomem *omap_irq_base;
+void omap1_init_irq(void);
+void omap2_init_irq(void);
+void omap3_init_irq(void);
+void ti816x_init_irq(void);
extern int omap_irq_pending(void);
void omap_intc_save_context(void);
void omap_intc_restore_context(void);
diff --git a/arch/arm/plat-omap/include/plat/mcbsp.h b/arch/arm/plat-omap/include/plat/mcbsp.h
index f8f690ab2997..9882c657b2d4 100644
--- a/arch/arm/plat-omap/include/plat/mcbsp.h
+++ b/arch/arm/plat-omap/include/plat/mcbsp.h
@@ -24,7 +24,6 @@
#ifndef __ASM_ARCH_OMAP_MCBSP_H
#define __ASM_ARCH_OMAP_MCBSP_H
-#include <linux/completion.h>
#include <linux/spinlock.h>
#include <mach/hardware.h>
@@ -34,7 +33,7 @@
#define OMAP_MCBSP_PLATFORM_DEVICE(port_nr) \
static struct platform_device omap_mcbsp##port_nr = { \
.name = "omap-mcbsp-dai", \
- .id = OMAP_MCBSP##port_nr, \
+ .id = port_nr - 1, \
}
#define MCBSP_CONFIG_TYPE2 0x2
@@ -333,18 +332,6 @@ struct omap_mcbsp_reg_cfg {
};
typedef enum {
- OMAP_MCBSP1 = 0,
- OMAP_MCBSP2,
- OMAP_MCBSP3,
- OMAP_MCBSP4,
- OMAP_MCBSP5
-} omap_mcbsp_id;
-
-typedef int __bitwise omap_mcbsp_io_type_t;
-#define OMAP_MCBSP_IRQ_IO ((__force omap_mcbsp_io_type_t) 1)
-#define OMAP_MCBSP_POLL_IO ((__force omap_mcbsp_io_type_t) 2)
-
-typedef enum {
OMAP_MCBSP_WORD_8 = 0,
OMAP_MCBSP_WORD_12,
OMAP_MCBSP_WORD_16,
@@ -353,38 +340,6 @@ typedef enum {
OMAP_MCBSP_WORD_32,
} omap_mcbsp_word_length;
-typedef enum {
- OMAP_MCBSP_CLK_RISING = 0,
- OMAP_MCBSP_CLK_FALLING,
-} omap_mcbsp_clk_polarity;
-
-typedef enum {
- OMAP_MCBSP_FS_ACTIVE_HIGH = 0,
- OMAP_MCBSP_FS_ACTIVE_LOW,
-} omap_mcbsp_fs_polarity;
-
-typedef enum {
- OMAP_MCBSP_CLK_STP_MODE_NO_DELAY = 0,
- OMAP_MCBSP_CLK_STP_MODE_DELAY,
-} omap_mcbsp_clk_stp_mode;
-
-
-/******* SPI specific mode **********/
-typedef enum {
- OMAP_MCBSP_SPI_MASTER = 0,
- OMAP_MCBSP_SPI_SLAVE,
-} omap_mcbsp_spi_mode;
-
-struct omap_mcbsp_spi_cfg {
- omap_mcbsp_spi_mode spi_mode;
- omap_mcbsp_clk_polarity rx_clock_polarity;
- omap_mcbsp_clk_polarity tx_clock_polarity;
- omap_mcbsp_fs_polarity fsx_polarity;
- u8 clk_div;
- omap_mcbsp_clk_stp_mode clk_stp_mode;
- omap_mcbsp_word_length word_length;
-};
-
/* Platform specific configuration */
struct omap_mcbsp_ops {
void (*request)(unsigned int);
@@ -422,25 +377,13 @@ struct omap_mcbsp {
void __iomem *io_base;
u8 id;
u8 free;
- omap_mcbsp_word_length rx_word_length;
- omap_mcbsp_word_length tx_word_length;
- omap_mcbsp_io_type_t io_type; /* IRQ or poll */
- /* IRQ based TX/RX */
int rx_irq;
int tx_irq;
/* DMA stuff */
u8 dma_rx_sync;
- short dma_rx_lch;
u8 dma_tx_sync;
- short dma_tx_lch;
-
- /* Completion queues */
- struct completion tx_irq_completion;
- struct completion rx_irq_completion;
- struct completion tx_dma_completion;
- struct completion rx_dma_completion;
/* Protect the field .free, while checking if the mcbsp is in use */
spinlock_t lock;
@@ -499,24 +442,9 @@ int omap_mcbsp_request(unsigned int id);
void omap_mcbsp_free(unsigned int id);
void omap_mcbsp_start(unsigned int id, int tx, int rx);
void omap_mcbsp_stop(unsigned int id, int tx, int rx);
-void omap_mcbsp_xmit_word(unsigned int id, u32 word);
-u32 omap_mcbsp_recv_word(unsigned int id);
-
-int omap_mcbsp_xmit_buffer(unsigned int id, dma_addr_t buffer, unsigned int length);
-int omap_mcbsp_recv_buffer(unsigned int id, dma_addr_t buffer, unsigned int length);
-int omap_mcbsp_spi_master_xmit_word_poll(unsigned int id, u32 word);
-int omap_mcbsp_spi_master_recv_word_poll(unsigned int id, u32 * word);
-
/* McBSP functional clock source changing function */
extern int omap2_mcbsp_set_clks_src(u8 id, u8 fck_src_id);
-/* SPI specific API */
-void omap_mcbsp_set_spi_mode(unsigned int id, const struct omap_mcbsp_spi_cfg * spi_cfg);
-
-/* Polled read/write functions */
-int omap_mcbsp_pollread(unsigned int id, u16 * buf);
-int omap_mcbsp_pollwrite(unsigned int id, u16 buf);
-int omap_mcbsp_set_io_type(unsigned int id, omap_mcbsp_io_type_t io_type);
/* McBSP signal muxing API */
void omap2_mcbsp1_mux_clkr_src(u8 mux);
diff --git a/arch/arm/plat-omap/include/plat/nand.h b/arch/arm/plat-omap/include/plat/nand.h
index d86d1ecf0068..67fc5060183e 100644
--- a/arch/arm/plat-omap/include/plat/nand.h
+++ b/arch/arm/plat-omap/include/plat/nand.h
@@ -19,15 +19,11 @@ enum nand_io {
};
struct omap_nand_platform_data {
- unsigned int options;
int cs;
- int gpio_irq;
struct mtd_partition *parts;
struct gpmc_timings *gpmc_t;
int nr_parts;
- int (*nand_setup)(void);
- int (*dev_ready)(struct omap_nand_platform_data *);
- int dma_channel;
+ bool dev_ready;
int gpmc_irq;
enum nand_io xfer_type;
unsigned long phys_base;
diff --git a/arch/arm/plat-omap/include/plat/omap-pm.h b/arch/arm/plat-omap/include/plat/omap-pm.h
index c0a752053039..0840df813f4f 100644
--- a/arch/arm/plat-omap/include/plat/omap-pm.h
+++ b/arch/arm/plat-omap/include/plat/omap-pm.h
@@ -40,11 +40,7 @@
* framework starts. The "_if_" is to avoid name collisions with the
* PM idle-loop code.
*/
-#ifdef CONFIG_OMAP_PM_NONE
-#define omap_pm_if_early_init() 0
-#else
int __init omap_pm_if_early_init(void);
-#endif
/**
* omap_pm_if_init - OMAP PM init code called after clock fw init
@@ -52,11 +48,7 @@ int __init omap_pm_if_early_init(void);
* The main initialization code. OPP tables are passed in here. The
* "_if_" is to avoid name collisions with the PM idle-loop code.
*/
-#ifdef CONFIG_OMAP_PM_NONE
-#define omap_pm_if_init() 0
-#else
int __init omap_pm_if_init(void);
-#endif
/**
* omap_pm_if_exit - OMAP PM exit code
diff --git a/arch/arm/plat-omap/include/plat/omap_hwmod.h b/arch/arm/plat-omap/include/plat/omap_hwmod.h
index 1adea9c62984..ce06ac6a9709 100644
--- a/arch/arm/plat-omap/include/plat/omap_hwmod.h
+++ b/arch/arm/plat-omap/include/plat/omap_hwmod.h
@@ -77,7 +77,6 @@ extern struct omap_hwmod_sysc_fields omap_hwmod_sysc_type2;
#define HWMOD_IDLEMODE_FORCE (1 << 0)
#define HWMOD_IDLEMODE_NO (1 << 1)
#define HWMOD_IDLEMODE_SMART (1 << 2)
-/* Slave idle mode flag only */
#define HWMOD_IDLEMODE_SMART_WKUP (1 << 3)
/**
@@ -98,7 +97,7 @@ struct omap_hwmod_mux_info {
/**
* struct omap_hwmod_irq_info - MPU IRQs used by the hwmod
* @name: name of the IRQ channel (module local name)
- * @irq_ch: IRQ channel ID
+ * @irq: IRQ channel ID (should be non-negative except -1 = terminator)
*
* @name should be something short, e.g., "tx" or "rx". It is for use
* by platform_get_resource_byname(). It is defined locally to the
@@ -106,13 +105,13 @@ struct omap_hwmod_mux_info {
*/
struct omap_hwmod_irq_info {
const char *name;
- u16 irq;
+ s16 irq;
};
/**
* struct omap_hwmod_dma_info - DMA channels used by the hwmod
* @name: name of the DMA channel (module local name)
- * @dma_req: DMA request ID
+ * @dma_req: DMA request ID (should be non-negative except -1 = terminator)
*
* @name should be something short, e.g., "tx" or "rx". It is for use
* by platform_get_resource_byname(). It is defined locally to the
@@ -120,7 +119,7 @@ struct omap_hwmod_irq_info {
*/
struct omap_hwmod_dma_info {
const char *name;
- u16 dma_req;
+ s16 dma_req;
};
/**
@@ -220,7 +219,6 @@ struct omap_hwmod_addr_space {
* @clk: interface clock: OMAP clock name
* @_clk: pointer to the interface struct clk (filled in at runtime)
* @fw: interface firewall data
- * @addr_cnt: ARRAY_SIZE(@addr)
* @width: OCP data width
* @user: initiators using this interface (see OCP_USER_* macros above)
* @flags: OCP interface flags (see OCPIF_* macros above)
@@ -239,7 +237,6 @@ struct omap_hwmod_ocp_if {
union {
struct omap_hwmod_omap2_firewall omap2;
} fw;
- u8 addr_cnt;
u8 width;
u8 user;
u8 flags;
@@ -258,6 +255,7 @@ struct omap_hwmod_ocp_if {
#define MSTANDBY_FORCE (HWMOD_IDLEMODE_FORCE << MASTER_STANDBY_SHIFT)
#define MSTANDBY_NO (HWMOD_IDLEMODE_NO << MASTER_STANDBY_SHIFT)
#define MSTANDBY_SMART (HWMOD_IDLEMODE_SMART << MASTER_STANDBY_SHIFT)
+#define MSTANDBY_SMART_WKUP (HWMOD_IDLEMODE_SMART_WKUP << MASTER_STANDBY_SHIFT)
/* omap_hwmod_sysconfig.sysc_flags capability flags */
#define SYSC_HAS_AUTOIDLE (1 << 0)
@@ -468,8 +466,8 @@ struct omap_hwmod_class {
* @name: name of the hwmod
* @class: struct omap_hwmod_class * to the class of this hwmod
* @od: struct omap_device currently associated with this hwmod (internal use)
- * @mpu_irqs: ptr to an array of MPU IRQs (see also mpu_irqs_cnt)
- * @sdma_reqs: ptr to an array of System DMA request IDs (see sdma_reqs_cnt)
+ * @mpu_irqs: ptr to an array of MPU IRQs
+ * @sdma_reqs: ptr to an array of System DMA request IDs
* @prcm: PRCM data pertaining to this hwmod
* @main_clk: main clock: OMAP clock name
* @_clk: pointer to the main struct clk (filled in at runtime)
@@ -482,8 +480,6 @@ struct omap_hwmod_class {
* @_sysc_cache: internal-use hwmod flags
* @_mpu_rt_va: cached register target start address (internal use)
* @_mpu_port_index: cached MPU register target slave ID (internal use)
- * @mpu_irqs_cnt: number of @mpu_irqs
- * @sdma_reqs_cnt: number of @sdma_reqs
* @opt_clks_cnt: number of @opt_clks
* @master_cnt: number of @master entries
* @slaves_cnt: number of @slave entries
@@ -531,8 +527,6 @@ struct omap_hwmod {
u16 flags;
u8 _mpu_port_index;
u8 response_lat;
- u8 mpu_irqs_cnt;
- u8 sdma_reqs_cnt;
u8 rst_lines_cnt;
u8 opt_clks_cnt;
u8 masters_cnt;
diff --git a/arch/arm/plat-omap/mcbsp.c b/arch/arm/plat-omap/mcbsp.c
index 5587acf0eb2c..3c1fbdc92468 100644
--- a/arch/arm/plat-omap/mcbsp.c
+++ b/arch/arm/plat-omap/mcbsp.c
@@ -16,8 +16,6 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/platform_device.h>
-#include <linux/wait.h>
-#include <linux/completion.h>
#include <linux/interrupt.h>
#include <linux/err.h>
#include <linux/clk.h>
@@ -25,7 +23,6 @@
#include <linux/io.h>
#include <linux/slab.h>
-#include <plat/dma.h>
#include <plat/mcbsp.h>
#include <plat/omap_device.h>
#include <linux/pm_runtime.h>
@@ -136,8 +133,6 @@ static irqreturn_t omap_mcbsp_tx_irq_handler(int irq, void *dev_id)
irqst_spcr2);
/* Writing zero to XSYNC_ERR clears the IRQ */
MCBSP_WRITE(mcbsp_tx, SPCR2, MCBSP_READ_CACHE(mcbsp_tx, SPCR2));
- } else {
- complete(&mcbsp_tx->tx_irq_completion);
}
return IRQ_HANDLED;
@@ -156,41 +151,11 @@ static irqreturn_t omap_mcbsp_rx_irq_handler(int irq, void *dev_id)
irqst_spcr1);
/* Writing zero to RSYNC_ERR clears the IRQ */
MCBSP_WRITE(mcbsp_rx, SPCR1, MCBSP_READ_CACHE(mcbsp_rx, SPCR1));
- } else {
- complete(&mcbsp_rx->rx_irq_completion);
}
return IRQ_HANDLED;
}
-static void omap_mcbsp_tx_dma_callback(int lch, u16 ch_status, void *data)
-{
- struct omap_mcbsp *mcbsp_dma_tx = data;
-
- dev_dbg(mcbsp_dma_tx->dev, "TX DMA callback : 0x%x\n",
- MCBSP_READ(mcbsp_dma_tx, SPCR2));
-
- /* We can free the channels */
- omap_free_dma(mcbsp_dma_tx->dma_tx_lch);
- mcbsp_dma_tx->dma_tx_lch = -1;
-
- complete(&mcbsp_dma_tx->tx_dma_completion);
-}
-
-static void omap_mcbsp_rx_dma_callback(int lch, u16 ch_status, void *data)
-{
- struct omap_mcbsp *mcbsp_dma_rx = data;
-
- dev_dbg(mcbsp_dma_rx->dev, "RX DMA callback : 0x%x\n",
- MCBSP_READ(mcbsp_dma_rx, SPCR2));
-
- /* We can free the channels */
- omap_free_dma(mcbsp_dma_rx->dma_rx_lch);
- mcbsp_dma_rx->dma_rx_lch = -1;
-
- complete(&mcbsp_dma_rx->rx_dma_completion);
-}
-
/*
* omap_mcbsp_config simply write a config to the
* appropriate McBSP.
@@ -758,37 +723,6 @@ static inline void omap_st_start(struct omap_mcbsp *mcbsp) {}
static inline void omap_st_stop(struct omap_mcbsp *mcbsp) {}
#endif
-/*
- * We can choose between IRQ based or polled IO.
- * This needs to be called before omap_mcbsp_request().
- */
-int omap_mcbsp_set_io_type(unsigned int id, omap_mcbsp_io_type_t io_type)
-{
- struct omap_mcbsp *mcbsp;
-
- if (!omap_mcbsp_check_valid_id(id)) {
- printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
- return -ENODEV;
- }
- mcbsp = id_to_mcbsp_ptr(id);
-
- spin_lock(&mcbsp->lock);
-
- if (!mcbsp->free) {
- dev_err(mcbsp->dev, "McBSP%d is currently in use\n",
- mcbsp->id);
- spin_unlock(&mcbsp->lock);
- return -EINVAL;
- }
-
- mcbsp->io_type = io_type;
-
- spin_unlock(&mcbsp->lock);
-
- return 0;
-}
-EXPORT_SYMBOL(omap_mcbsp_set_io_type);
-
int omap_mcbsp_request(unsigned int id)
{
struct omap_mcbsp *mcbsp;
@@ -833,29 +767,24 @@ int omap_mcbsp_request(unsigned int id)
MCBSP_WRITE(mcbsp, SPCR1, 0);
MCBSP_WRITE(mcbsp, SPCR2, 0);
- if (mcbsp->io_type == OMAP_MCBSP_IRQ_IO) {
- /* We need to get IRQs here */
- init_completion(&mcbsp->tx_irq_completion);
- err = request_irq(mcbsp->tx_irq, omap_mcbsp_tx_irq_handler,
- 0, "McBSP", (void *)mcbsp);
+ err = request_irq(mcbsp->tx_irq, omap_mcbsp_tx_irq_handler,
+ 0, "McBSP", (void *)mcbsp);
+ if (err != 0) {
+ dev_err(mcbsp->dev, "Unable to request TX IRQ %d "
+ "for McBSP%d\n", mcbsp->tx_irq,
+ mcbsp->id);
+ goto err_clk_disable;
+ }
+
+ if (mcbsp->rx_irq) {
+ err = request_irq(mcbsp->rx_irq,
+ omap_mcbsp_rx_irq_handler,
+ 0, "McBSP", (void *)mcbsp);
if (err != 0) {
- dev_err(mcbsp->dev, "Unable to request TX IRQ %d "
- "for McBSP%d\n", mcbsp->tx_irq,
+ dev_err(mcbsp->dev, "Unable to request RX IRQ %d "
+ "for McBSP%d\n", mcbsp->rx_irq,
mcbsp->id);
- goto err_clk_disable;
- }
-
- if (mcbsp->rx_irq) {
- init_completion(&mcbsp->rx_irq_completion);
- err = request_irq(mcbsp->rx_irq,
- omap_mcbsp_rx_irq_handler,
- 0, "McBSP", (void *)mcbsp);
- if (err != 0) {
- dev_err(mcbsp->dev, "Unable to request RX IRQ %d "
- "for McBSP%d\n", mcbsp->rx_irq,
- mcbsp->id);
- goto err_free_irq;
- }
+ goto err_free_irq;
}
}
@@ -901,12 +830,9 @@ void omap_mcbsp_free(unsigned int id)
pm_runtime_put_sync(mcbsp->dev);
- if (mcbsp->io_type == OMAP_MCBSP_IRQ_IO) {
- /* Free IRQs */
- if (mcbsp->rx_irq)
- free_irq(mcbsp->rx_irq, (void *)mcbsp);
- free_irq(mcbsp->tx_irq, (void *)mcbsp);
- }
+ if (mcbsp->rx_irq)
+ free_irq(mcbsp->rx_irq, (void *)mcbsp);
+ free_irq(mcbsp->tx_irq, (void *)mcbsp);
reg_cache = mcbsp->reg_cache;
@@ -943,9 +869,6 @@ void omap_mcbsp_start(unsigned int id, int tx, int rx)
if (cpu_is_omap34xx())
omap_st_start(mcbsp);
- mcbsp->rx_word_length = (MCBSP_READ_CACHE(mcbsp, RCR1) >> 5) & 0x7;
- mcbsp->tx_word_length = (MCBSP_READ_CACHE(mcbsp, XCR1) >> 5) & 0x7;
-
/* Only enable SRG, if McBSP is master */
w = MCBSP_READ_CACHE(mcbsp, PCR0);
if (w & (FSXM | FSRM | CLKXM | CLKRM))
@@ -1043,485 +966,6 @@ void omap_mcbsp_stop(unsigned int id, int tx, int rx)
}
EXPORT_SYMBOL(omap_mcbsp_stop);
-/* polled mcbsp i/o operations */
-int omap_mcbsp_pollwrite(unsigned int id, u16 buf)
-{
- struct omap_mcbsp *mcbsp;
-
- if (!omap_mcbsp_check_valid_id(id)) {
- printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
- return -ENODEV;
- }
-
- mcbsp = id_to_mcbsp_ptr(id);
-
- MCBSP_WRITE(mcbsp, DXR1, buf);
- /* if frame sync error - clear the error */
- if (MCBSP_READ(mcbsp, SPCR2) & XSYNC_ERR) {
- /* clear error */
- MCBSP_WRITE(mcbsp, SPCR2, MCBSP_READ_CACHE(mcbsp, SPCR2));
- /* resend */
- return -1;
- } else {
- /* wait for transmit confirmation */
- int attemps = 0;
- while (!(MCBSP_READ(mcbsp, SPCR2) & XRDY)) {
- if (attemps++ > 1000) {
- MCBSP_WRITE(mcbsp, SPCR2,
- MCBSP_READ_CACHE(mcbsp, SPCR2) &
- (~XRST));
- udelay(10);
- MCBSP_WRITE(mcbsp, SPCR2,
- MCBSP_READ_CACHE(mcbsp, SPCR2) |
- (XRST));
- udelay(10);
- dev_err(mcbsp->dev, "Could not write to"
- " McBSP%d Register\n", mcbsp->id);
- return -2;
- }
- }
- }
-
- return 0;
-}
-EXPORT_SYMBOL(omap_mcbsp_pollwrite);
-
-int omap_mcbsp_pollread(unsigned int id, u16 *buf)
-{
- struct omap_mcbsp *mcbsp;
-
- if (!omap_mcbsp_check_valid_id(id)) {
- printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
- return -ENODEV;
- }
- mcbsp = id_to_mcbsp_ptr(id);
-
- /* if frame sync error - clear the error */
- if (MCBSP_READ(mcbsp, SPCR1) & RSYNC_ERR) {
- /* clear error */
- MCBSP_WRITE(mcbsp, SPCR1, MCBSP_READ_CACHE(mcbsp, SPCR1));
- /* resend */
- return -1;
- } else {
- /* wait for receive confirmation */
- int attemps = 0;
- while (!(MCBSP_READ(mcbsp, SPCR1) & RRDY)) {
- if (attemps++ > 1000) {
- MCBSP_WRITE(mcbsp, SPCR1,
- MCBSP_READ_CACHE(mcbsp, SPCR1) &
- (~RRST));
- udelay(10);
- MCBSP_WRITE(mcbsp, SPCR1,
- MCBSP_READ_CACHE(mcbsp, SPCR1) |
- (RRST));
- udelay(10);
- dev_err(mcbsp->dev, "Could not read from"
- " McBSP%d Register\n", mcbsp->id);
- return -2;
- }
- }
- }
- *buf = MCBSP_READ(mcbsp, DRR1);
-
- return 0;
-}
-EXPORT_SYMBOL(omap_mcbsp_pollread);
-
-/*
- * IRQ based word transmission.
- */
-void omap_mcbsp_xmit_word(unsigned int id, u32 word)
-{
- struct omap_mcbsp *mcbsp;
- omap_mcbsp_word_length word_length;
-
- if (!omap_mcbsp_check_valid_id(id)) {
- printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
- return;
- }
-
- mcbsp = id_to_mcbsp_ptr(id);
- word_length = mcbsp->tx_word_length;
-
- wait_for_completion(&mcbsp->tx_irq_completion);
-
- if (word_length > OMAP_MCBSP_WORD_16)
- MCBSP_WRITE(mcbsp, DXR2, word >> 16);
- MCBSP_WRITE(mcbsp, DXR1, word & 0xffff);
-}
-EXPORT_SYMBOL(omap_mcbsp_xmit_word);
-
-u32 omap_mcbsp_recv_word(unsigned int id)
-{
- struct omap_mcbsp *mcbsp;
- u16 word_lsb, word_msb = 0;
- omap_mcbsp_word_length word_length;
-
- if (!omap_mcbsp_check_valid_id(id)) {
- printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
- return -ENODEV;
- }
- mcbsp = id_to_mcbsp_ptr(id);
-
- word_length = mcbsp->rx_word_length;
-
- wait_for_completion(&mcbsp->rx_irq_completion);
-
- if (word_length > OMAP_MCBSP_WORD_16)
- word_msb = MCBSP_READ(mcbsp, DRR2);
- word_lsb = MCBSP_READ(mcbsp, DRR1);
-
- return (word_lsb | (word_msb << 16));
-}
-EXPORT_SYMBOL(omap_mcbsp_recv_word);
-
-int omap_mcbsp_spi_master_xmit_word_poll(unsigned int id, u32 word)
-{
- struct omap_mcbsp *mcbsp;
- omap_mcbsp_word_length tx_word_length;
- omap_mcbsp_word_length rx_word_length;
- u16 spcr2, spcr1, attempts = 0, word_lsb, word_msb = 0;
-
- if (!omap_mcbsp_check_valid_id(id)) {
- printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
- return -ENODEV;
- }
- mcbsp = id_to_mcbsp_ptr(id);
- tx_word_length = mcbsp->tx_word_length;
- rx_word_length = mcbsp->rx_word_length;
-
- if (tx_word_length != rx_word_length)
- return -EINVAL;
-
- /* First we wait for the transmitter to be ready */
- spcr2 = MCBSP_READ(mcbsp, SPCR2);
- while (!(spcr2 & XRDY)) {
- spcr2 = MCBSP_READ(mcbsp, SPCR2);
- if (attempts++ > 1000) {
- /* We must reset the transmitter */
- MCBSP_WRITE(mcbsp, SPCR2,
- MCBSP_READ_CACHE(mcbsp, SPCR2) & (~XRST));
- udelay(10);
- MCBSP_WRITE(mcbsp, SPCR2,
- MCBSP_READ_CACHE(mcbsp, SPCR2) | XRST);
- udelay(10);
- dev_err(mcbsp->dev, "McBSP%d transmitter not "
- "ready\n", mcbsp->id);
- return -EAGAIN;
- }
- }
-
- /* Now we can push the data */
- if (tx_word_length > OMAP_MCBSP_WORD_16)
- MCBSP_WRITE(mcbsp, DXR2, word >> 16);
- MCBSP_WRITE(mcbsp, DXR1, word & 0xffff);
-
- /* We wait for the receiver to be ready */
- spcr1 = MCBSP_READ(mcbsp, SPCR1);
- while (!(spcr1 & RRDY)) {
- spcr1 = MCBSP_READ(mcbsp, SPCR1);
- if (attempts++ > 1000) {
- /* We must reset the receiver */
- MCBSP_WRITE(mcbsp, SPCR1,
- MCBSP_READ_CACHE(mcbsp, SPCR1) & (~RRST));
- udelay(10);
- MCBSP_WRITE(mcbsp, SPCR1,
- MCBSP_READ_CACHE(mcbsp, SPCR1) | RRST);
- udelay(10);
- dev_err(mcbsp->dev, "McBSP%d receiver not "
- "ready\n", mcbsp->id);
- return -EAGAIN;
- }
- }
-
- /* Receiver is ready, let's read the dummy data */
- if (rx_word_length > OMAP_MCBSP_WORD_16)
- word_msb = MCBSP_READ(mcbsp, DRR2);
- word_lsb = MCBSP_READ(mcbsp, DRR1);
-
- return 0;
-}
-EXPORT_SYMBOL(omap_mcbsp_spi_master_xmit_word_poll);
-
-int omap_mcbsp_spi_master_recv_word_poll(unsigned int id, u32 *word)
-{
- struct omap_mcbsp *mcbsp;
- u32 clock_word = 0;
- omap_mcbsp_word_length tx_word_length;
- omap_mcbsp_word_length rx_word_length;
- u16 spcr2, spcr1, attempts = 0, word_lsb, word_msb = 0;
-
- if (!omap_mcbsp_check_valid_id(id)) {
- printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
- return -ENODEV;
- }
-
- mcbsp = id_to_mcbsp_ptr(id);
-
- tx_word_length = mcbsp->tx_word_length;
- rx_word_length = mcbsp->rx_word_length;
-
- if (tx_word_length != rx_word_length)
- return -EINVAL;
-
- /* First we wait for the transmitter to be ready */
- spcr2 = MCBSP_READ(mcbsp, SPCR2);
- while (!(spcr2 & XRDY)) {
- spcr2 = MCBSP_READ(mcbsp, SPCR2);
- if (attempts++ > 1000) {
- /* We must reset the transmitter */
- MCBSP_WRITE(mcbsp, SPCR2,
- MCBSP_READ_CACHE(mcbsp, SPCR2) & (~XRST));
- udelay(10);
- MCBSP_WRITE(mcbsp, SPCR2,
- MCBSP_READ_CACHE(mcbsp, SPCR2) | XRST);
- udelay(10);
- dev_err(mcbsp->dev, "McBSP%d transmitter not "
- "ready\n", mcbsp->id);
- return -EAGAIN;
- }
- }
-
- /* We first need to enable the bus clock */
- if (tx_word_length > OMAP_MCBSP_WORD_16)
- MCBSP_WRITE(mcbsp, DXR2, clock_word >> 16);
- MCBSP_WRITE(mcbsp, DXR1, clock_word & 0xffff);
-
- /* We wait for the receiver to be ready */
- spcr1 = MCBSP_READ(mcbsp, SPCR1);
- while (!(spcr1 & RRDY)) {
- spcr1 = MCBSP_READ(mcbsp, SPCR1);
- if (attempts++ > 1000) {
- /* We must reset the receiver */
- MCBSP_WRITE(mcbsp, SPCR1,
- MCBSP_READ_CACHE(mcbsp, SPCR1) & (~RRST));
- udelay(10);
- MCBSP_WRITE(mcbsp, SPCR1,
- MCBSP_READ_CACHE(mcbsp, SPCR1) | RRST);
- udelay(10);
- dev_err(mcbsp->dev, "McBSP%d receiver not "
- "ready\n", mcbsp->id);
- return -EAGAIN;
- }
- }
-
- /* Receiver is ready, there is something for us */
- if (rx_word_length > OMAP_MCBSP_WORD_16)
- word_msb = MCBSP_READ(mcbsp, DRR2);
- word_lsb = MCBSP_READ(mcbsp, DRR1);
-
- word[0] = (word_lsb | (word_msb << 16));
-
- return 0;
-}
-EXPORT_SYMBOL(omap_mcbsp_spi_master_recv_word_poll);
-
-/*
- * Simple DMA based buffer rx/tx routines.
- * Nothing fancy, just a single buffer tx/rx through DMA.
- * The DMA resources are released once the transfer is done.
- * For anything fancier, you should use your own customized DMA
- * routines and callbacks.
- */
-int omap_mcbsp_xmit_buffer(unsigned int id, dma_addr_t buffer,
- unsigned int length)
-{
- struct omap_mcbsp *mcbsp;
- int dma_tx_ch;
- int src_port = 0;
- int dest_port = 0;
- int sync_dev = 0;
-
- if (!omap_mcbsp_check_valid_id(id)) {
- printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
- return -ENODEV;
- }
- mcbsp = id_to_mcbsp_ptr(id);
-
- if (omap_request_dma(mcbsp->dma_tx_sync, "McBSP TX",
- omap_mcbsp_tx_dma_callback,
- mcbsp,
- &dma_tx_ch)) {
- dev_err(mcbsp->dev, " Unable to request DMA channel for "
- "McBSP%d TX. Trying IRQ based TX\n",
- mcbsp->id);
- return -EAGAIN;
- }
- mcbsp->dma_tx_lch = dma_tx_ch;
-
- dev_err(mcbsp->dev, "McBSP%d TX DMA on channel %d\n", mcbsp->id,
- dma_tx_ch);
-
- init_completion(&mcbsp->tx_dma_completion);
-
- if (cpu_class_is_omap1()) {
- src_port = OMAP_DMA_PORT_TIPB;
- dest_port = OMAP_DMA_PORT_EMIFF;
- }
- if (cpu_class_is_omap2())
- sync_dev = mcbsp->dma_tx_sync;
-
- omap_set_dma_transfer_params(mcbsp->dma_tx_lch,
- OMAP_DMA_DATA_TYPE_S16,
- length >> 1, 1,
- OMAP_DMA_SYNC_ELEMENT,
- sync_dev, 0);
-
- omap_set_dma_dest_params(mcbsp->dma_tx_lch,
- src_port,
- OMAP_DMA_AMODE_CONSTANT,
- mcbsp->phys_base + OMAP_MCBSP_REG_DXR1,
- 0, 0);
-
- omap_set_dma_src_params(mcbsp->dma_tx_lch,
- dest_port,
- OMAP_DMA_AMODE_POST_INC,
- buffer,
- 0, 0);
-
- omap_start_dma(mcbsp->dma_tx_lch);
- wait_for_completion(&mcbsp->tx_dma_completion);
-
- return 0;
-}
-EXPORT_SYMBOL(omap_mcbsp_xmit_buffer);
-
-int omap_mcbsp_recv_buffer(unsigned int id, dma_addr_t buffer,
- unsigned int length)
-{
- struct omap_mcbsp *mcbsp;
- int dma_rx_ch;
- int src_port = 0;
- int dest_port = 0;
- int sync_dev = 0;
-
- if (!omap_mcbsp_check_valid_id(id)) {
- printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
- return -ENODEV;
- }
- mcbsp = id_to_mcbsp_ptr(id);
-
- if (omap_request_dma(mcbsp->dma_rx_sync, "McBSP RX",
- omap_mcbsp_rx_dma_callback,
- mcbsp,
- &dma_rx_ch)) {
- dev_err(mcbsp->dev, "Unable to request DMA channel for "
- "McBSP%d RX. Trying IRQ based RX\n",
- mcbsp->id);
- return -EAGAIN;
- }
- mcbsp->dma_rx_lch = dma_rx_ch;
-
- dev_err(mcbsp->dev, "McBSP%d RX DMA on channel %d\n", mcbsp->id,
- dma_rx_ch);
-
- init_completion(&mcbsp->rx_dma_completion);
-
- if (cpu_class_is_omap1()) {
- src_port = OMAP_DMA_PORT_TIPB;
- dest_port = OMAP_DMA_PORT_EMIFF;
- }
- if (cpu_class_is_omap2())
- sync_dev = mcbsp->dma_rx_sync;
-
- omap_set_dma_transfer_params(mcbsp->dma_rx_lch,
- OMAP_DMA_DATA_TYPE_S16,
- length >> 1, 1,
- OMAP_DMA_SYNC_ELEMENT,
- sync_dev, 0);
-
- omap_set_dma_src_params(mcbsp->dma_rx_lch,
- src_port,
- OMAP_DMA_AMODE_CONSTANT,
- mcbsp->phys_base + OMAP_MCBSP_REG_DRR1,
- 0, 0);
-
- omap_set_dma_dest_params(mcbsp->dma_rx_lch,
- dest_port,
- OMAP_DMA_AMODE_POST_INC,
- buffer,
- 0, 0);
-
- omap_start_dma(mcbsp->dma_rx_lch);
- wait_for_completion(&mcbsp->rx_dma_completion);
-
- return 0;
-}
-EXPORT_SYMBOL(omap_mcbsp_recv_buffer);
-
-/*
- * SPI wrapper.
- * Since SPI setup is much simpler than the generic McBSP one,
- * this wrapper just need an omap_mcbsp_spi_cfg structure as an input.
- * Once this is done, you can call omap_mcbsp_start().
- */
-void omap_mcbsp_set_spi_mode(unsigned int id,
- const struct omap_mcbsp_spi_cfg *spi_cfg)
-{
- struct omap_mcbsp *mcbsp;
- struct omap_mcbsp_reg_cfg mcbsp_cfg;
-
- if (!omap_mcbsp_check_valid_id(id)) {
- printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
- return;
- }
- mcbsp = id_to_mcbsp_ptr(id);
-
- memset(&mcbsp_cfg, 0, sizeof(struct omap_mcbsp_reg_cfg));
-
- /* SPI has only one frame */
- mcbsp_cfg.rcr1 |= (RWDLEN1(spi_cfg->word_length) | RFRLEN1(0));
- mcbsp_cfg.xcr1 |= (XWDLEN1(spi_cfg->word_length) | XFRLEN1(0));
-
- /* Clock stop mode */
- if (spi_cfg->clk_stp_mode == OMAP_MCBSP_CLK_STP_MODE_NO_DELAY)
- mcbsp_cfg.spcr1 |= (1 << 12);
- else
- mcbsp_cfg.spcr1 |= (3 << 11);
-
- /* Set clock parities */
- if (spi_cfg->rx_clock_polarity == OMAP_MCBSP_CLK_RISING)
- mcbsp_cfg.pcr0 |= CLKRP;
- else
- mcbsp_cfg.pcr0 &= ~CLKRP;
-
- if (spi_cfg->tx_clock_polarity == OMAP_MCBSP_CLK_RISING)
- mcbsp_cfg.pcr0 &= ~CLKXP;
- else
- mcbsp_cfg.pcr0 |= CLKXP;
-
- /* Set SCLKME to 0 and CLKSM to 1 */
- mcbsp_cfg.pcr0 &= ~SCLKME;
- mcbsp_cfg.srgr2 |= CLKSM;
-
- /* Set FSXP */
- if (spi_cfg->fsx_polarity == OMAP_MCBSP_FS_ACTIVE_HIGH)
- mcbsp_cfg.pcr0 &= ~FSXP;
- else
- mcbsp_cfg.pcr0 |= FSXP;
-
- if (spi_cfg->spi_mode == OMAP_MCBSP_SPI_MASTER) {
- mcbsp_cfg.pcr0 |= CLKXM;
- mcbsp_cfg.srgr1 |= CLKGDV(spi_cfg->clk_div - 1);
- mcbsp_cfg.pcr0 |= FSXM;
- mcbsp_cfg.srgr2 &= ~FSGM;
- mcbsp_cfg.xcr2 |= XDATDLY(1);
- mcbsp_cfg.rcr2 |= RDATDLY(1);
- } else {
- mcbsp_cfg.pcr0 &= ~CLKXM;
- mcbsp_cfg.srgr1 |= CLKGDV(1);
- mcbsp_cfg.pcr0 &= ~FSXM;
- mcbsp_cfg.xcr2 &= ~XDATDLY(3);
- mcbsp_cfg.rcr2 &= ~RDATDLY(3);
- }
-
- mcbsp_cfg.xcr2 &= ~XPHASE;
- mcbsp_cfg.rcr2 &= ~RPHASE;
-
- omap_mcbsp_config(id, &mcbsp_cfg);
-}
-EXPORT_SYMBOL(omap_mcbsp_set_spi_mode);
-
#ifdef CONFIG_ARCH_OMAP3
#define max_thres(m) (mcbsp->pdata->buffer_size)
#define valid_threshold(m, val) ((val) <= max_thres(m))
@@ -1833,8 +1277,6 @@ static int __devinit omap_mcbsp_probe(struct platform_device *pdev)
spin_lock_init(&mcbsp->lock);
mcbsp->id = id + 1;
mcbsp->free = true;
- mcbsp->dma_tx_lch = -1;
- mcbsp->dma_rx_lch = -1;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mpu");
if (!res) {
@@ -1860,9 +1302,6 @@ static int __devinit omap_mcbsp_probe(struct platform_device *pdev)
else
mcbsp->phys_dma_base = res->start;
- /* Default I/O is IRQ based */
- mcbsp->io_type = OMAP_MCBSP_IRQ_IO;
-
mcbsp->tx_irq = platform_get_irq_byname(pdev, "tx");
mcbsp->rx_irq = platform_get_irq_byname(pdev, "rx");
diff --git a/arch/arm/plat-omap/omap_device.c b/arch/arm/plat-omap/omap_device.c
index 2526fa312b8a..3471c650743b 100644
--- a/arch/arm/plat-omap/omap_device.c
+++ b/arch/arm/plat-omap/omap_device.c
@@ -236,11 +236,6 @@ static int _omap_device_deactivate(struct omap_device *od, u8 ignore_lat)
return 0;
}
-static inline struct omap_device *_find_by_pdev(struct platform_device *pdev)
-{
- return container_of(pdev, struct omap_device, pdev);
-}
-
/**
* _add_optional_clock_clkdev - Add clkdev entry for hwmod optional clocks
* @od: struct omap_device *od
@@ -316,7 +311,7 @@ u32 omap_device_get_context_loss_count(struct platform_device *pdev)
struct omap_device *od;
u32 ret = 0;
- od = _find_by_pdev(pdev);
+ od = to_omap_device(pdev);
if (od->hwmods_cnt)
ret = omap_hwmod_get_context_loss_count(od->hwmods[0]);
@@ -654,7 +649,7 @@ int omap_device_enable(struct platform_device *pdev)
int ret;
struct omap_device *od;
- od = _find_by_pdev(pdev);
+ od = to_omap_device(pdev);
if (od->_state == OMAP_DEVICE_STATE_ENABLED) {
WARN(1, "omap_device: %s.%d: %s() called from invalid state %d\n",
@@ -693,7 +688,7 @@ int omap_device_idle(struct platform_device *pdev)
int ret;
struct omap_device *od;
- od = _find_by_pdev(pdev);
+ od = to_omap_device(pdev);
if (od->_state != OMAP_DEVICE_STATE_ENABLED) {
WARN(1, "omap_device: %s.%d: %s() called from invalid state %d\n",
@@ -724,7 +719,7 @@ int omap_device_shutdown(struct platform_device *pdev)
int ret, i;
struct omap_device *od;
- od = _find_by_pdev(pdev);
+ od = to_omap_device(pdev);
if (od->_state != OMAP_DEVICE_STATE_ENABLED &&
od->_state != OMAP_DEVICE_STATE_IDLE) {
@@ -765,7 +760,7 @@ int omap_device_align_pm_lat(struct platform_device *pdev,
int ret = -EINVAL;
struct omap_device *od;
- od = _find_by_pdev(pdev);
+ od = to_omap_device(pdev);
if (new_wakeup_lat_limit == od->dev_wakeup_lat)
return 0;
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
index 6af3d0b1f8d0..363c91e44efb 100644
--- a/arch/arm/plat-omap/sram.c
+++ b/arch/arm/plat-omap/sram.c
@@ -394,20 +394,15 @@ void omap3_sram_restore_context(void)
}
#endif /* CONFIG_PM */
-static int __init omap34xx_sram_init(void)
-{
- _omap3_sram_configure_core_dpll =
- omap_sram_push(omap3_sram_configure_core_dpll,
- omap3_sram_configure_core_dpll_sz);
- omap_push_sram_idle();
- return 0;
-}
-#else
+#endif /* CONFIG_ARCH_OMAP3 */
+
static inline int omap34xx_sram_init(void)
{
+#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM)
+ omap3_sram_restore_context();
+#endif
return 0;
}
-#endif
int __init omap_sram_init(void)
{
diff --git a/arch/arm/plat-s3c24xx/Kconfig b/arch/arm/plat-s3c24xx/Kconfig
index d9c4096ebf45..8c5b3029b39f 100644
--- a/arch/arm/plat-s3c24xx/Kconfig
+++ b/arch/arm/plat-s3c24xx/Kconfig
@@ -4,7 +4,7 @@
config PLAT_S3C24XX
bool
- depends on ARCH_S3C2410 || ARCH_S3C24A0
+ depends on ARCH_S3C2410
default y
select NO_IOPORT
select ARCH_REQUIRE_GPIOLIB
diff --git a/arch/arm/plat-s3c24xx/clock-dclk.c b/arch/arm/plat-s3c24xx/clock-dclk.c
index cf97caafe56b..f95d3268ae1f 100644
--- a/arch/arm/plat-s3c24xx/clock-dclk.c
+++ b/arch/arm/plat-s3c24xx/clock-dclk.c
@@ -169,7 +169,6 @@ static struct clk_ops dclk_ops = {
struct clk s3c24xx_dclk0 = {
.name = "dclk0",
- .id = -1,
.ctrlbit = S3C2410_DCLKCON_DCLK0EN,
.enable = s3c24xx_dclk_enable,
.ops = &dclk_ops,
@@ -177,7 +176,6 @@ struct clk s3c24xx_dclk0 = {
struct clk s3c24xx_dclk1 = {
.name = "dclk1",
- .id = -1,
.ctrlbit = S3C2410_DCLKCON_DCLK1EN,
.enable = s3c24xx_dclk_enable,
.ops = &dclk_ops,
@@ -189,12 +187,10 @@ static struct clk_ops clkout_ops = {
struct clk s3c24xx_clkout0 = {
.name = "clkout0",
- .id = -1,
.ops = &clkout_ops,
};
struct clk s3c24xx_clkout1 = {
.name = "clkout1",
- .id = -1,
.ops = &clkout_ops,
};
diff --git a/arch/arm/plat-s3c24xx/cpu.c b/arch/arm/plat-s3c24xx/cpu.c
index 4a10c0f684b2..c1fc6c6fac72 100644
--- a/arch/arm/plat-s3c24xx/cpu.c
+++ b/arch/arm/plat-s3c24xx/cpu.c
@@ -46,7 +46,6 @@
#include <plat/cpu.h>
#include <plat/devs.h>
#include <plat/clock.h>
-#include <plat/s3c2400.h>
#include <plat/s3c2410.h>
#include <plat/s3c2412.h>
#include <plat/s3c2416.h>
@@ -55,7 +54,6 @@
/* table of supported CPUs */
-static const char name_s3c2400[] = "S3C2400";
static const char name_s3c2410[] = "S3C2410";
static const char name_s3c2412[] = "S3C2412";
static const char name_s3c2416[] = "S3C2416/S3C2450";
@@ -157,15 +155,6 @@ static struct cpu_table cpu_ids[] __initdata = {
.init = s3c2443_init,
.name = name_s3c2443,
},
- {
- .idcode = 0x0, /* S3C2400 doesn't have an idcode */
- .idmask = 0xffffffff,
- .map_io = s3c2400_map_io,
- .init_clocks = s3c2400_init_clocks,
- .init_uarts = s3c2400_init_uarts,
- .init = s3c2400_init,
- .name = name_s3c2400
- },
};
/* minimal IO mapping */
@@ -200,11 +189,7 @@ static unsigned long s3c24xx_read_idcode_v5(void)
static unsigned long s3c24xx_read_idcode_v4(void)
{
-#ifndef CONFIG_CPU_S3C2400
return __raw_readl(S3C2410_GSTATUS1);
-#else
- return 0UL;
-#endif
}
/* Hook for arm_pm_restart to ensure we execute the reset code
diff --git a/arch/arm/plat-s3c24xx/devs.c b/arch/arm/plat-s3c24xx/devs.c
index 73667994518a..a76bf2df3333 100644
--- a/arch/arm/plat-s3c24xx/devs.c
+++ b/arch/arm/plat-s3c24xx/devs.c
@@ -150,9 +150,8 @@ void __init s3c24xx_fb_set_platdata(struct s3c2410fb_mach_info *pd)
{
struct s3c2410fb_mach_info *npd;
- npd = kmemdup(pd, sizeof(*npd), GFP_KERNEL);
+ npd = s3c_set_platdata(pd, sizeof(*npd), &s3c_device_lcd);
if (npd) {
- s3c_device_lcd.dev.platform_data = npd;
npd->displays = kmemdup(pd->displays,
sizeof(struct s3c2410fb_display) * npd->num_displays,
GFP_KERNEL);
@@ -188,12 +187,10 @@ struct platform_device s3c_device_ts = {
};
EXPORT_SYMBOL(s3c_device_ts);
-static struct s3c2410_ts_mach_info s3c2410ts_info;
-
void __init s3c24xx_ts_set_platdata(struct s3c2410_ts_mach_info *hard_s3c2410ts_info)
{
- memcpy(&s3c2410ts_info, hard_s3c2410ts_info, sizeof(struct s3c2410_ts_mach_info));
- s3c_device_ts.dev.platform_data = &s3c2410ts_info;
+ s3c_set_platdata(hard_s3c2410ts_info,
+ sizeof(struct s3c2410_ts_mach_info), &s3c_device_ts);
}
/* USB Device (Gadget)*/
@@ -223,15 +220,7 @@ EXPORT_SYMBOL(s3c_device_usbgadget);
void __init s3c24xx_udc_set_platdata(struct s3c2410_udc_mach_info *pd)
{
- struct s3c2410_udc_mach_info *npd;
-
- npd = kmalloc(sizeof(*npd), GFP_KERNEL);
- if (npd) {
- memcpy(npd, pd, sizeof(*npd));
- s3c_device_usbgadget.dev.platform_data = npd;
- } else {
- printk(KERN_ERR "no memory for udc platform data\n");
- }
+ s3c_set_platdata(pd, sizeof(*pd), &s3c_device_usbgadget);
}
/* USB High Speed 2.0 Device (Gadget) */
@@ -263,15 +252,7 @@ struct platform_device s3c_device_usb_hsudc = {
void __init s3c24xx_hsudc_set_platdata(struct s3c24xx_hsudc_platdata *pd)
{
- struct s3c24xx_hsudc_platdata *npd;
-
- npd = kmalloc(sizeof(*npd), GFP_KERNEL);
- if (npd) {
- memcpy(npd, pd, sizeof(*npd));
- s3c_device_usb_hsudc.dev.platform_data = npd;
- } else {
- printk(KERN_ERR "no memory for udc platform data\n");
- }
+ s3c_set_platdata(pd, sizeof(*pd), &s3c_device_usb_hsudc);
}
/* IIS */
@@ -383,13 +364,8 @@ EXPORT_SYMBOL(s3c_device_sdi);
void __init s3c24xx_mci_set_platdata(struct s3c24xx_mci_pdata *pdata)
{
- struct s3c24xx_mci_pdata *npd;
-
- npd = kmemdup(pdata, sizeof(struct s3c24xx_mci_pdata), GFP_KERNEL);
- if (!npd)
- printk(KERN_ERR "%s: no memory to copy pdata", __func__);
-
- s3c_device_sdi.dev.platform_data = npd;
+ s3c_set_platdata(pdata, sizeof(struct s3c24xx_mci_pdata),
+ &s3c_device_sdi);
}
diff --git a/arch/arm/plat-s3c24xx/include/mach/clkdev.h b/arch/arm/plat-s3c24xx/include/mach/clkdev.h
new file mode 100644
index 000000000000..7dffa83d23ff
--- /dev/null
+++ b/arch/arm/plat-s3c24xx/include/mach/clkdev.h
@@ -0,0 +1,7 @@
+#ifndef __MACH_CLKDEV_H__
+#define __MACH_CLKDEV_H__
+
+#define __clk_get(clk) ({ 1; })
+#define __clk_put(clk) do {} while (0)
+
+#endif
diff --git a/arch/arm/plat-s3c24xx/include/plat/regs-iis.h b/arch/arm/plat-s3c24xx/include/plat/regs-iis.h
index a6f1d5df13b4..cc44e0e931e9 100644
--- a/arch/arm/plat-s3c24xx/include/plat/regs-iis.h
+++ b/arch/arm/plat-s3c24xx/include/plat/regs-iis.h
@@ -64,14 +64,5 @@
#define S3C2410_IISFCON_RXMASK (0x3f)
#define S3C2410_IISFCON_RXSHIFT (0)
-#define S3C2400_IISFCON_TXDMA (1<<11)
-#define S3C2400_IISFCON_RXDMA (1<<10)
-#define S3C2400_IISFCON_TXENABLE (1<<9)
-#define S3C2400_IISFCON_RXENABLE (1<<8)
-#define S3C2400_IISFCON_TXMASK (0x07 << 4)
-#define S3C2400_IISFCON_TXSHIFT (4)
-#define S3C2400_IISFCON_RXMASK (0x07)
-#define S3C2400_IISFCON_RXSHIFT (0)
-
#define S3C2410_IISFIFO (0x10)
#endif /* __ASM_ARCH_REGS_IIS_H */
diff --git a/arch/arm/plat-s3c24xx/include/plat/regs-spi.h b/arch/arm/plat-s3c24xx/include/plat/regs-spi.h
index 2b35479ee35c..892e2f680fca 100644
--- a/arch/arm/plat-s3c24xx/include/plat/regs-spi.h
+++ b/arch/arm/plat-s3c24xx/include/plat/regs-spi.h
@@ -67,7 +67,6 @@
#define S3C2410_SPPIN_ENMUL (1<<2) /* Multi Master Error detect */
#define S3C2410_SPPIN_RESERVED (1<<1)
-#define S3C2400_SPPIN_nCS (1<<1) /* SPI Card Select */
#define S3C2410_SPPIN_KEEP (1<<0) /* Master Out keep */
#define S3C2410_SPPRE (0x0C)
diff --git a/arch/arm/plat-s3c24xx/include/plat/s3c2400.h b/arch/arm/plat-s3c24xx/include/plat/s3c2400.h
deleted file mode 100644
index b3feaea5c70b..000000000000
--- a/arch/arm/plat-s3c24xx/include/plat/s3c2400.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* linux/include/asm-arm/plat-s3c24xx/s3c2400.h
- *
- * Copyright (c) 2004 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- *
- * Header file for S3C2400 cpu support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Modifications:
- * 09-Fev-2006 LCVR First version, based on s3c2410.h
-*/
-
-#ifdef CONFIG_CPU_S3C2400
-
-extern int s3c2400_init(void);
-
-extern void s3c2400_map_io(void);
-
-extern void s3c2400_init_uarts(struct s3c2410_uartcfg *cfg, int no);
-
-extern void s3c2400_init_clocks(int xtal);
-
-#else
-#define s3c2400_init_clocks NULL
-#define s3c2400_init_uarts NULL
-#define s3c2400_map_io NULL
-#define s3c2400_init NULL
-#endif
diff --git a/arch/arm/plat-s3c24xx/s3c2410-clock.c b/arch/arm/plat-s3c24xx/s3c2410-clock.c
index 9ecc5d913679..def76aa3825a 100644
--- a/arch/arm/plat-s3c24xx/s3c2410-clock.c
+++ b/arch/arm/plat-s3c24xx/s3c2410-clock.c
@@ -90,37 +90,31 @@ static int s3c2410_upll_enable(struct clk *clk, int enable)
static struct clk init_clocks_off[] = {
{
.name = "nand",
- .id = -1,
.parent = &clk_h,
.enable = s3c2410_clkcon_enable,
.ctrlbit = S3C2410_CLKCON_NAND,
}, {
.name = "sdi",
- .id = -1,
.parent = &clk_p,
.enable = s3c2410_clkcon_enable,
.ctrlbit = S3C2410_CLKCON_SDI,
}, {
.name = "adc",
- .id = -1,
.parent = &clk_p,
.enable = s3c2410_clkcon_enable,
.ctrlbit = S3C2410_CLKCON_ADC,
}, {
.name = "i2c",
- .id = -1,
.parent = &clk_p,
.enable = s3c2410_clkcon_enable,
.ctrlbit = S3C2410_CLKCON_IIC,
}, {
.name = "iis",
- .id = -1,
.parent = &clk_p,
.enable = s3c2410_clkcon_enable,
.ctrlbit = S3C2410_CLKCON_IIS,
}, {
.name = "spi",
- .id = -1,
.parent = &clk_p,
.enable = s3c2410_clkcon_enable,
.ctrlbit = S3C2410_CLKCON_SPI,
@@ -130,70 +124,61 @@ static struct clk init_clocks_off[] = {
static struct clk init_clocks[] = {
{
.name = "lcd",
- .id = -1,
.parent = &clk_h,
.enable = s3c2410_clkcon_enable,
.ctrlbit = S3C2410_CLKCON_LCDC,
}, {
.name = "gpio",
- .id = -1,
.parent = &clk_p,
.enable = s3c2410_clkcon_enable,
.ctrlbit = S3C2410_CLKCON_GPIO,
}, {
.name = "usb-host",
- .id = -1,
.parent = &clk_h,
.enable = s3c2410_clkcon_enable,
.ctrlbit = S3C2410_CLKCON_USBH,
}, {
.name = "usb-device",
- .id = -1,
.parent = &clk_h,
.enable = s3c2410_clkcon_enable,
.ctrlbit = S3C2410_CLKCON_USBD,
}, {
.name = "timers",
- .id = -1,
.parent = &clk_p,
.enable = s3c2410_clkcon_enable,
.ctrlbit = S3C2410_CLKCON_PWMT,
}, {
.name = "uart",
- .id = 0,
+ .devname = "s3c2410-uart.0",
.parent = &clk_p,
.enable = s3c2410_clkcon_enable,
.ctrlbit = S3C2410_CLKCON_UART0,
}, {
.name = "uart",
- .id = 1,
+ .devname = "s3c2410-uart.1",
.parent = &clk_p,
.enable = s3c2410_clkcon_enable,
.ctrlbit = S3C2410_CLKCON_UART1,
}, {
.name = "uart",
- .id = 2,
+ .devname = "s3c2410-uart.2",
.parent = &clk_p,
.enable = s3c2410_clkcon_enable,
.ctrlbit = S3C2410_CLKCON_UART2,
}, {
.name = "rtc",
- .id = -1,
.parent = &clk_p,
.enable = s3c2410_clkcon_enable,
.ctrlbit = S3C2410_CLKCON_RTC,
}, {
.name = "watchdog",
- .id = -1,
.parent = &clk_p,
.ctrlbit = 0,
}, {
.name = "usb-bus-host",
- .id = -1,
.parent = &clk_usb_bus,
}, {
.name = "usb-bus-gadget",
- .id = -1,
.parent = &clk_usb_bus,
},
};
diff --git a/arch/arm/plat-s3c24xx/s3c2443-clock.c b/arch/arm/plat-s3c24xx/s3c2443-clock.c
index 82f2d4a39291..59552c0ea5fb 100644
--- a/arch/arm/plat-s3c24xx/s3c2443-clock.c
+++ b/arch/arm/plat-s3c24xx/s3c2443-clock.c
@@ -56,7 +56,6 @@ int s3c2443_clkcon_enable_s(struct clk *clk, int enable)
struct clk clk_mpllref = {
.name = "mpllref",
.parent = &clk_xtal,
- .id = -1,
};
static struct clk *clk_epllref_sources[] = {
@@ -69,7 +68,6 @@ static struct clk *clk_epllref_sources[] = {
struct clksrc_clk clk_epllref = {
.clk = {
.name = "epllref",
- .id = -1,
},
.sources = &(struct clksrc_sources) {
.sources = clk_epllref_sources,
@@ -92,7 +90,6 @@ struct clksrc_clk clk_esysclk = {
.clk = {
.name = "esysclk",
.parent = &clk_epll,
- .id = -1,
},
.sources = &(struct clksrc_sources) {
.sources = clk_sysclk_sources,
@@ -115,7 +112,6 @@ static unsigned long s3c2443_getrate_mdivclk(struct clk *clk)
static struct clk clk_mdivclk = {
.name = "mdivclk",
.parent = &clk_mpllref,
- .id = -1,
.ops = &(struct clk_ops) {
.get_rate = s3c2443_getrate_mdivclk,
},
@@ -132,7 +128,6 @@ struct clksrc_clk clk_msysclk = {
.clk = {
.name = "msysclk",
.parent = &clk_xtal,
- .id = -1,
},
.sources = &(struct clksrc_sources) {
.sources = clk_msysclk_sources,
@@ -159,7 +154,6 @@ static unsigned long s3c2443_prediv_getrate(struct clk *clk)
static struct clk clk_prediv = {
.name = "prediv",
- .id = -1,
.parent = &clk_msysclk.clk,
.ops = &(struct clk_ops) {
.get_rate = s3c2443_prediv_getrate,
@@ -174,7 +168,6 @@ static struct clk clk_prediv = {
static struct clksrc_clk clk_usb_bus_host = {
.clk = {
.name = "usb-bus-host-parent",
- .id = -1,
.parent = &clk_esysclk.clk,
.ctrlbit = S3C2443_SCLKCON_USBHOST,
.enable = s3c2443_clkcon_enable_s,
@@ -189,7 +182,6 @@ static struct clksrc_clk clksrc_clks[] = {
/* ART baud-rate clock sourced from esysclk via a divisor */
.clk = {
.name = "uartclk",
- .id = -1,
.parent = &clk_esysclk.clk,
},
.reg_div = { .reg = S3C2443_CLKDIV1, .size = 4, .shift = 8 },
@@ -197,7 +189,6 @@ static struct clksrc_clk clksrc_clks[] = {
/* camera interface bus-clock, divided down from esysclk */
.clk = {
.name = "camif-upll", /* same as 2440 name */
- .id = -1,
.parent = &clk_esysclk.clk,
.ctrlbit = S3C2443_SCLKCON_CAMCLK,
.enable = s3c2443_clkcon_enable_s,
@@ -206,7 +197,6 @@ static struct clksrc_clk clksrc_clks[] = {
}, {
.clk = {
.name = "display-if",
- .id = -1,
.parent = &clk_esysclk.clk,
.ctrlbit = S3C2443_SCLKCON_DISPCLK,
.enable = s3c2443_clkcon_enable_s,
@@ -219,13 +209,11 @@ static struct clksrc_clk clksrc_clks[] = {
static struct clk init_clocks_off[] = {
{
.name = "adc",
- .id = -1,
.parent = &clk_p,
.enable = s3c2443_clkcon_enable_p,
.ctrlbit = S3C2443_PCLKCON_ADC,
}, {
.name = "i2c",
- .id = -1,
.parent = &clk_p,
.enable = s3c2443_clkcon_enable_p,
.ctrlbit = S3C2443_PCLKCON_IIC,
@@ -235,136 +223,117 @@ static struct clk init_clocks_off[] = {
static struct clk init_clocks[] = {
{
.name = "dma",
- .id = 0,
.parent = &clk_h,
.enable = s3c2443_clkcon_enable_h,
.ctrlbit = S3C2443_HCLKCON_DMA0,
}, {
.name = "dma",
- .id = 1,
.parent = &clk_h,
.enable = s3c2443_clkcon_enable_h,
.ctrlbit = S3C2443_HCLKCON_DMA1,
}, {
.name = "dma",
- .id = 2,
.parent = &clk_h,
.enable = s3c2443_clkcon_enable_h,
.ctrlbit = S3C2443_HCLKCON_DMA2,
}, {
.name = "dma",
- .id = 3,
.parent = &clk_h,
.enable = s3c2443_clkcon_enable_h,
.ctrlbit = S3C2443_HCLKCON_DMA3,
}, {
.name = "dma",
- .id = 4,
.parent = &clk_h,
.enable = s3c2443_clkcon_enable_h,
.ctrlbit = S3C2443_HCLKCON_DMA4,
}, {
.name = "dma",
- .id = 5,
.parent = &clk_h,
.enable = s3c2443_clkcon_enable_h,
.ctrlbit = S3C2443_HCLKCON_DMA5,
}, {
.name = "hsmmc",
- .id = 1,
.parent = &clk_h,
.enable = s3c2443_clkcon_enable_h,
.ctrlbit = S3C2443_HCLKCON_HSMMC,
}, {
.name = "gpio",
- .id = -1,
.parent = &clk_p,
.enable = s3c2443_clkcon_enable_p,
.ctrlbit = S3C2443_PCLKCON_GPIO,
}, {
.name = "usb-host",
- .id = -1,
.parent = &clk_h,
.enable = s3c2443_clkcon_enable_h,
.ctrlbit = S3C2443_HCLKCON_USBH,
}, {
.name = "usb-device",
- .id = -1,
.parent = &clk_h,
.enable = s3c2443_clkcon_enable_h,
.ctrlbit = S3C2443_HCLKCON_USBD,
}, {
.name = "lcd",
- .id = -1,
.parent = &clk_h,
.enable = s3c2443_clkcon_enable_h,
.ctrlbit = S3C2443_HCLKCON_LCDC,
}, {
.name = "timers",
- .id = -1,
.parent = &clk_p,
.enable = s3c2443_clkcon_enable_p,
.ctrlbit = S3C2443_PCLKCON_PWMT,
}, {
.name = "cfc",
- .id = -1,
.parent = &clk_h,
.enable = s3c2443_clkcon_enable_h,
.ctrlbit = S3C2443_HCLKCON_CFC,
}, {
.name = "ssmc",
- .id = -1,
.parent = &clk_h,
.enable = s3c2443_clkcon_enable_h,
.ctrlbit = S3C2443_HCLKCON_SSMC,
}, {
.name = "uart",
- .id = 0,
+ .devname = "s3c2440-uart.0",
.parent = &clk_p,
.enable = s3c2443_clkcon_enable_p,
.ctrlbit = S3C2443_PCLKCON_UART0,
}, {
.name = "uart",
- .id = 1,
+ .devname = "s3c2440-uart.1",
.parent = &clk_p,
.enable = s3c2443_clkcon_enable_p,
.ctrlbit = S3C2443_PCLKCON_UART1,
}, {
.name = "uart",
- .id = 2,
+ .devname = "s3c2440-uart.2",
.parent = &clk_p,
.enable = s3c2443_clkcon_enable_p,
.ctrlbit = S3C2443_PCLKCON_UART2,
}, {
.name = "uart",
- .id = 3,
+ .devname = "s3c2440-uart.3",
.parent = &clk_p,
.enable = s3c2443_clkcon_enable_p,
.ctrlbit = S3C2443_PCLKCON_UART3,
}, {
.name = "rtc",
- .id = -1,
.parent = &clk_p,
.enable = s3c2443_clkcon_enable_p,
.ctrlbit = S3C2443_PCLKCON_RTC,
}, {
.name = "watchdog",
- .id = -1,
.parent = &clk_p,
.ctrlbit = S3C2443_PCLKCON_WDT,
}, {
.name = "ac97",
- .id = -1,
.parent = &clk_p,
.ctrlbit = S3C2443_PCLKCON_AC97,
}, {
.name = "nand",
- .id = -1,
.parent = &clk_h,
}, {
.name = "usb-bus-host",
- .id = -1,
.parent = &clk_usb_bus_host.clk,
}
};
diff --git a/arch/arm/plat-s3c24xx/sleep.S b/arch/arm/plat-s3c24xx/sleep.S
index fd7032f84ae7..c56612569b40 100644
--- a/arch/arm/plat-s3c24xx/sleep.S
+++ b/arch/arm/plat-s3c24xx/sleep.S
@@ -41,31 +41,6 @@
.text
- /* s3c_cpu_save
- *
- * entry:
- * r1 = v:p offset
- */
-
-ENTRY(s3c_cpu_save)
- stmfd sp!, { r4 - r12, lr }
- ldr r3, =resume_with_mmu
- bl cpu_suspend
-
- @@ jump to final code to send system to sleep
- ldr r0, =pm_cpu_sleep
- @@ldr pc, [ r0 ]
- ldr r0, [ r0 ]
- mov pc, r0
-
- @@ return to the caller, after having the MMU
- @@ turned on, this restores the last bits from the
- @@ stack
-resume_with_mmu:
- ldmfd sp!, { r4 - r12, pc }
-
- .ltorg
-
/* sleep magic, to allow the bootloader to check for an valid
* image to resume to. Must be the first word before the
* s3c_cpu_resume entry.
diff --git a/arch/arm/plat-s5p/clock.c b/arch/arm/plat-s5p/clock.c
index 8d081d968c58..02af235298e2 100644
--- a/arch/arm/plat-s5p/clock.c
+++ b/arch/arm/plat-s5p/clock.c
@@ -168,6 +168,41 @@ unsigned long s5p_epll_get_rate(struct clk *clk)
return clk->rate;
}
+int s5p_spdif_set_rate(struct clk *clk, unsigned long rate)
+{
+ struct clk *pclk;
+ int ret;
+
+ pclk = clk_get_parent(clk);
+ if (IS_ERR(pclk))
+ return -EINVAL;
+
+ ret = pclk->ops->set_rate(pclk, rate);
+ clk_put(pclk);
+
+ return ret;
+}
+
+unsigned long s5p_spdif_get_rate(struct clk *clk)
+{
+ struct clk *pclk;
+ int rate;
+
+ pclk = clk_get_parent(clk);
+ if (IS_ERR(pclk))
+ return -EINVAL;
+
+ rate = pclk->ops->get_rate(clk);
+ clk_put(pclk);
+
+ return rate;
+}
+
+struct clk_ops s5p_sclk_spdif_ops = {
+ .set_rate = s5p_spdif_set_rate,
+ .get_rate = s5p_spdif_get_rate,
+};
+
static struct clk *s5p_clks[] __initdata = {
&clk_ext_xtal_mux,
&clk_48m,
diff --git a/arch/arm/plat-s5p/include/plat/s5p-clock.h b/arch/arm/plat-s5p/include/plat/s5p-clock.h
index 2b6dcff8ab2b..769b5bdfb046 100644
--- a/arch/arm/plat-s5p/include/plat/s5p-clock.h
+++ b/arch/arm/plat-s5p/include/plat/s5p-clock.h
@@ -47,4 +47,9 @@ extern int s5p_gatectrl(void __iomem *reg, struct clk *clk, int enable);
extern int s5p_epll_enable(struct clk *clk, int enable);
extern unsigned long s5p_epll_get_rate(struct clk *clk);
+/* SPDIF clk operations common for S5PC100/V210/C110 and Exynos4 */
+extern int s5p_spdif_set_rate(struct clk *clk, unsigned long rate);
+extern unsigned long s5p_spdif_get_rate(struct clk *clk);
+
+extern struct clk_ops s5p_sclk_spdif_ops;
#endif /* __ASM_PLAT_S5P_CLOCK_H */
diff --git a/arch/arm/plat-s5p/s5p-time.c b/arch/arm/plat-s5p/s5p-time.c
index 612934c48b0d..c833e7b57599 100644
--- a/arch/arm/plat-s5p/s5p-time.c
+++ b/arch/arm/plat-s5p/s5p-time.c
@@ -314,13 +314,6 @@ static void __iomem *s5p_timer_reg(void)
return S3C_TIMERREG(offset);
}
-static cycle_t s5p_timer_read(struct clocksource *cs)
-{
- void __iomem *reg = s5p_timer_reg();
-
- return (cycle_t) (reg ? ~__raw_readl(reg) : 0);
-}
-
/*
* Override the global weak sched_clock symbol with this
* local implementation which uses the clocksource to get some
@@ -350,14 +343,6 @@ static void notrace s5p_update_sched_clock(void)
update_sched_clock(&cd, ~__raw_readl(reg), (u32)~0);
}
-struct clocksource time_clocksource = {
- .name = "s5p_clocksource_timer",
- .rating = 250,
- .read = s5p_timer_read,
- .mask = CLOCKSOURCE_MASK(32),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
-};
-
static void __init s5p_clocksource_init(void)
{
unsigned long pclk;
@@ -375,8 +360,9 @@ static void __init s5p_clocksource_init(void)
init_sched_clock(&cd, s5p_update_sched_clock, 32, clock_rate);
- if (clocksource_register_hz(&time_clocksource, clock_rate))
- panic("%s: can't register clocksource\n", time_clocksource.name);
+ if (clocksource_mmio_init(s5p_timer_reg(), "s5p_clocksource_timer",
+ clock_rate, 250, 32, clocksource_mmio_readl_down))
+ panic("s5p_clocksource_timer: can't register clocksource\n");
}
static void __init s5p_timer_resources(void)
@@ -384,6 +370,7 @@ static void __init s5p_timer_resources(void)
unsigned long event_id = timer_source.event_id;
unsigned long source_id = timer_source.source_id;
+ char devname[15];
timerclk = clk_get(NULL, "timers");
if (IS_ERR(timerclk))
@@ -391,6 +378,10 @@ static void __init s5p_timer_resources(void)
clk_enable(timerclk);
+ sprintf(devname, "s3c24xx-pwm.%lu", event_id);
+ s3c_device_timer[event_id].id = event_id;
+ s3c_device_timer[event_id].dev.init_name = devname;
+
tin_event = clk_get(&s3c_device_timer[event_id].dev, "pwm-tin");
if (IS_ERR(tin_event))
panic("failed to get pwm-tin clock for event timer");
@@ -401,6 +392,10 @@ static void __init s5p_timer_resources(void)
clk_enable(tin_event);
+ sprintf(devname, "s3c24xx-pwm.%lu", source_id);
+ s3c_device_timer[source_id].id = source_id;
+ s3c_device_timer[source_id].dev.init_name = devname;
+
tin_source = clk_get(&s3c_device_timer[source_id].dev, "pwm-tin");
if (IS_ERR(tin_source))
panic("failed to get pwm-tin clock for source timer");
diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig
index 4d79519d19a4..b3e10659e4b8 100644
--- a/arch/arm/plat-samsung/Kconfig
+++ b/arch/arm/plat-samsung/Kconfig
@@ -280,6 +280,12 @@ config SAMSUNG_DEV_PWM
help
Compile in platform device definition for PWM Timer
+config SAMSUNG_DEV_BACKLIGHT
+ bool
+ depends on SAMSUNG_DEV_PWM
+ help
+ Compile in platform device definition LCD backlight with PWM Timer
+
config S3C24XX_PWM
bool "PWM device support"
select HAVE_PWM
diff --git a/arch/arm/plat-samsung/Makefile b/arch/arm/plat-samsung/Makefile
index 53eb15b0a07d..853764ba8cc5 100644
--- a/arch/arm/plat-samsung/Makefile
+++ b/arch/arm/plat-samsung/Makefile
@@ -59,6 +59,7 @@ obj-$(CONFIG_SAMSUNG_DEV_IDE) += dev-ide.o
obj-$(CONFIG_SAMSUNG_DEV_TS) += dev-ts.o
obj-$(CONFIG_SAMSUNG_DEV_KEYPAD) += dev-keypad.o
obj-$(CONFIG_SAMSUNG_DEV_PWM) += dev-pwm.o
+obj-$(CONFIG_SAMSUNG_DEV_BACKLIGHT) += dev-backlight.o
# DMA support
diff --git a/arch/arm/plat-samsung/clock.c b/arch/arm/plat-samsung/clock.c
index 0c9f95d98561..302c42670bd1 100644
--- a/arch/arm/plat-samsung/clock.c
+++ b/arch/arm/plat-samsung/clock.c
@@ -71,74 +71,6 @@ static int clk_null_enable(struct clk *clk, int enable)
return 0;
}
-static int dev_is_s3c_uart(struct device *dev)
-{
- struct platform_device **pdev = s3c24xx_uart_devs;
- int i;
- for (i = 0; i < ARRAY_SIZE(s3c24xx_uart_devs); i++, pdev++)
- if (*pdev && dev == &(*pdev)->dev)
- return 1;
- return 0;
-}
-
-/*
- * Serial drivers call get_clock() very early, before platform bus
- * has been set up, this requires a special check to let them get
- * a proper clock
- */
-
-static int dev_is_platform_device(struct device *dev)
-{
- return dev->bus == &platform_bus_type ||
- (dev->bus == NULL && dev_is_s3c_uart(dev));
-}
-
-/* Clock API calls */
-
-struct clk *clk_get(struct device *dev, const char *id)
-{
- struct clk *p;
- struct clk *clk = ERR_PTR(-ENOENT);
- int idno;
-
- if (dev == NULL || !dev_is_platform_device(dev))
- idno = -1;
- else
- idno = to_platform_device(dev)->id;
-
- spin_lock(&clocks_lock);
-
- list_for_each_entry(p, &clocks, list) {
- if (p->id == idno &&
- strcmp(id, p->name) == 0 &&
- try_module_get(p->owner)) {
- clk = p;
- break;
- }
- }
-
- /* check for the case where a device was supplied, but the
- * clock that was being searched for is not device specific */
-
- if (IS_ERR(clk)) {
- list_for_each_entry(p, &clocks, list) {
- if (p->id == -1 && strcmp(id, p->name) == 0 &&
- try_module_get(p->owner)) {
- clk = p;
- break;
- }
- }
- }
-
- spin_unlock(&clocks_lock);
- return clk;
-}
-
-void clk_put(struct clk *clk)
-{
- module_put(clk->owner);
-}
-
int clk_enable(struct clk *clk)
{
if (IS_ERR(clk) || clk == NULL)
@@ -241,8 +173,6 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
return ret;
}
-EXPORT_SYMBOL(clk_get);
-EXPORT_SYMBOL(clk_put);
EXPORT_SYMBOL(clk_enable);
EXPORT_SYMBOL(clk_disable);
EXPORT_SYMBOL(clk_get_rate);
@@ -265,7 +195,6 @@ struct clk_ops clk_ops_def_setrate = {
struct clk clk_xtal = {
.name = "xtal",
- .id = -1,
.rate = 0,
.parent = NULL,
.ctrlbit = 0,
@@ -273,30 +202,25 @@ struct clk clk_xtal = {
struct clk clk_ext = {
.name = "ext",
- .id = -1,
};
struct clk clk_epll = {
.name = "epll",
- .id = -1,
};
struct clk clk_mpll = {
.name = "mpll",
- .id = -1,
.ops = &clk_ops_def_setrate,
};
struct clk clk_upll = {
.name = "upll",
- .id = -1,
.parent = NULL,
.ctrlbit = 0,
};
struct clk clk_f = {
.name = "fclk",
- .id = -1,
.rate = 0,
.parent = &clk_mpll,
.ctrlbit = 0,
@@ -304,7 +228,6 @@ struct clk clk_f = {
struct clk clk_h = {
.name = "hclk",
- .id = -1,
.rate = 0,
.parent = NULL,
.ctrlbit = 0,
@@ -313,7 +236,6 @@ struct clk clk_h = {
struct clk clk_p = {
.name = "pclk",
- .id = -1,
.rate = 0,
.parent = NULL,
.ctrlbit = 0,
@@ -322,7 +244,6 @@ struct clk clk_p = {
struct clk clk_usb_bus = {
.name = "usb-bus",
- .id = -1,
.rate = 0,
.parent = &clk_upll,
};
@@ -330,7 +251,6 @@ struct clk clk_usb_bus = {
struct clk s3c24xx_uclk = {
.name = "uclk",
- .id = -1,
};
/* initialise the clock system */
@@ -346,14 +266,11 @@ int s3c24xx_register_clock(struct clk *clk)
if (clk->enable == NULL)
clk->enable = clk_null_enable;
- /* add to the list of available clocks */
-
- /* Quick check to see if this clock has already been registered. */
- BUG_ON(clk->list.prev != clk->list.next);
-
- spin_lock(&clocks_lock);
- list_add(&clk->list, &clocks);
- spin_unlock(&clocks_lock);
+ /* fill up the clk_lookup structure and register it*/
+ clk->lookup.dev_id = clk->devname;
+ clk->lookup.con_id = clk->name;
+ clk->lookup.clk = clk;
+ clkdev_add(&clk->lookup);
return 0;
}
@@ -463,10 +380,7 @@ static int clk_debugfs_register_one(struct clk *c)
char s[255];
char *p = s;
- p += sprintf(p, "%s", c->name);
-
- if (c->id >= 0)
- sprintf(p, ":%d", c->id);
+ p += sprintf(p, "%s", c->devname);
d = debugfs_create_dir(s, pa ? pa->dent : clk_debugfs_root);
if (!d)
diff --git a/arch/arm/plat-samsung/dev-backlight.c b/arch/arm/plat-samsung/dev-backlight.c
new file mode 100644
index 000000000000..3cedd4c407af
--- /dev/null
+++ b/arch/arm/plat-samsung/dev-backlight.c
@@ -0,0 +1,149 @@
+/* linux/arch/arm/plat-samsung/dev-backlight.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Common infrastructure for PWM Backlight for Samsung boards
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/pwm_backlight.h>
+
+#include <plat/devs.h>
+#include <plat/gpio-cfg.h>
+#include <plat/backlight.h>
+
+static int samsung_bl_init(struct device *dev)
+{
+ int ret = 0;
+ struct platform_device *timer_dev =
+ container_of(dev->parent, struct platform_device, dev);
+ struct samsung_bl_gpio_info *bl_gpio_info =
+ timer_dev->dev.platform_data;
+
+ ret = gpio_request(bl_gpio_info->no, "Backlight");
+ if (ret) {
+ printk(KERN_ERR "failed to request GPIO for LCD Backlight\n");
+ return ret;
+ }
+
+ /* Configure GPIO pin with specific GPIO function for PWM timer */
+ s3c_gpio_cfgpin(bl_gpio_info->no, bl_gpio_info->func);
+
+ return 0;
+}
+
+static void samsung_bl_exit(struct device *dev)
+{
+ struct platform_device *timer_dev =
+ container_of(dev->parent, struct platform_device, dev);
+ struct samsung_bl_gpio_info *bl_gpio_info =
+ timer_dev->dev.platform_data;
+
+ s3c_gpio_cfgpin(bl_gpio_info->no, S3C_GPIO_OUTPUT);
+ gpio_free(bl_gpio_info->no);
+}
+
+/* Initialize few important fields of platform_pwm_backlight_data
+ * structure with default values. These fields can be overridden by
+ * board-specific values sent from machine file.
+ * For ease of operation, these fields are initialized with values
+ * used by most samsung boards.
+ * Users has the option of sending info about other parameters
+ * for their specific boards
+ */
+
+static struct platform_pwm_backlight_data samsung_dfl_bl_data __initdata = {
+ .max_brightness = 255,
+ .dft_brightness = 255,
+ .pwm_period_ns = 78770,
+ .init = samsung_bl_init,
+ .exit = samsung_bl_exit,
+};
+
+static struct platform_device samsung_dfl_bl_device __initdata = {
+ .name = "pwm-backlight",
+};
+
+/* samsung_bl_set - Set board specific data (if any) provided by user for
+ * PWM Backlight control and register specific PWM and backlight device.
+ * @gpio_info: structure containing GPIO info for PWM timer
+ * @bl_data: structure containing Backlight control data
+ */
+void samsung_bl_set(struct samsung_bl_gpio_info *gpio_info,
+ struct platform_pwm_backlight_data *bl_data)
+{
+ int ret = 0;
+ struct platform_device *samsung_bl_device;
+ struct platform_pwm_backlight_data *samsung_bl_data;
+
+ samsung_bl_device = kmemdup(&samsung_dfl_bl_device,
+ sizeof(struct platform_device), GFP_KERNEL);
+ if (!samsung_bl_device) {
+ printk(KERN_ERR "%s: no memory for platform dev\n", __func__);
+ return;
+ }
+
+ samsung_bl_data = s3c_set_platdata(&samsung_dfl_bl_data,
+ sizeof(struct platform_pwm_backlight_data), samsung_bl_device);
+ if (!samsung_bl_data) {
+ printk(KERN_ERR "%s: no memory for platform dev\n", __func__);
+ goto err_data;
+ }
+
+ /* Copy board specific data provided by user */
+ samsung_bl_data->pwm_id = bl_data->pwm_id;
+ samsung_bl_device->dev.parent =
+ &s3c_device_timer[samsung_bl_data->pwm_id].dev;
+
+ if (bl_data->max_brightness)
+ samsung_bl_data->max_brightness = bl_data->max_brightness;
+ if (bl_data->dft_brightness)
+ samsung_bl_data->dft_brightness = bl_data->dft_brightness;
+ if (bl_data->lth_brightness)
+ samsung_bl_data->lth_brightness = bl_data->lth_brightness;
+ if (bl_data->pwm_period_ns)
+ samsung_bl_data->pwm_period_ns = bl_data->pwm_period_ns;
+ if (bl_data->init)
+ samsung_bl_data->init = bl_data->init;
+ if (bl_data->notify)
+ samsung_bl_data->notify = bl_data->notify;
+ if (bl_data->exit)
+ samsung_bl_data->exit = bl_data->exit;
+ if (bl_data->check_fb)
+ samsung_bl_data->check_fb = bl_data->check_fb;
+
+ /* Keep the GPIO info for future use */
+ s3c_device_timer[samsung_bl_data->pwm_id].dev.platform_data = gpio_info;
+
+ /* Register the specific PWM timer dev for Backlight control */
+ ret = platform_device_register(
+ &s3c_device_timer[samsung_bl_data->pwm_id]);
+ if (ret) {
+ printk(KERN_ERR "failed to register pwm timer for backlight: %d\n", ret);
+ goto err_plat_reg1;
+ }
+
+ /* Register the Backlight dev */
+ ret = platform_device_register(samsung_bl_device);
+ if (ret) {
+ printk(KERN_ERR "failed to register backlight device: %d\n", ret);
+ goto err_plat_reg2;
+ }
+
+ return;
+
+err_plat_reg2:
+ platform_device_unregister(&s3c_device_timer[samsung_bl_data->pwm_id]);
+err_plat_reg1:
+ kfree(samsung_bl_data);
+err_data:
+ kfree(samsung_bl_device);
+ return;
+}
diff --git a/arch/arm/plat-samsung/dev-fb.c b/arch/arm/plat-samsung/dev-fb.c
index bf60204c6297..49a1362fd25b 100644
--- a/arch/arm/plat-samsung/dev-fb.c
+++ b/arch/arm/plat-samsung/dev-fb.c
@@ -58,16 +58,6 @@ struct platform_device s3c_device_fb = {
void __init s3c_fb_set_platdata(struct s3c_fb_platdata *pd)
{
- struct s3c_fb_platdata *npd;
-
- if (!pd) {
- printk(KERN_ERR "%s: no platform data\n", __func__);
- return;
- }
-
- npd = kmemdup(pd, sizeof(struct s3c_fb_platdata), GFP_KERNEL);
- if (!npd)
- printk(KERN_ERR "%s: no memory for platform data\n", __func__);
-
- s3c_device_fb.dev.platform_data = npd;
+ s3c_set_platdata(pd, sizeof(struct s3c_fb_platdata),
+ &s3c_device_fb);
}
diff --git a/arch/arm/plat-samsung/dev-hwmon.c b/arch/arm/plat-samsung/dev-hwmon.c
index b3ffb9587250..c91a79ce8f39 100644
--- a/arch/arm/plat-samsung/dev-hwmon.c
+++ b/arch/arm/plat-samsung/dev-hwmon.c
@@ -27,16 +27,6 @@ struct platform_device s3c_device_hwmon = {
void __init s3c_hwmon_set_platdata(struct s3c_hwmon_pdata *pd)
{
- struct s3c_hwmon_pdata *npd;
-
- if (!pd) {
- printk(KERN_ERR "%s: no platform data\n", __func__);
- return;
- }
-
- npd = kmemdup(pd, sizeof(struct s3c_hwmon_pdata), GFP_KERNEL);
- if (!npd)
- printk(KERN_ERR "%s: no memory for platform data\n", __func__);
-
- s3c_device_hwmon.dev.platform_data = npd;
+ s3c_set_platdata(pd, sizeof(struct s3c_hwmon_pdata),
+ &s3c_device_hwmon);
}
diff --git a/arch/arm/plat-samsung/dev-i2c0.c b/arch/arm/plat-samsung/dev-i2c0.c
index 3a601c16f03c..f8251f5098bd 100644
--- a/arch/arm/plat-samsung/dev-i2c0.c
+++ b/arch/arm/plat-samsung/dev-i2c0.c
@@ -48,7 +48,7 @@ struct platform_device s3c_device_i2c0 = {
.resource = s3c_i2c_resource,
};
-static struct s3c2410_platform_i2c default_i2c_data0 __initdata = {
+struct s3c2410_platform_i2c default_i2c_data __initdata = {
.flags = 0,
.slave_addr = 0x10,
.frequency = 100*1000,
@@ -60,13 +60,11 @@ void __init s3c_i2c0_set_platdata(struct s3c2410_platform_i2c *pd)
struct s3c2410_platform_i2c *npd;
if (!pd)
- pd = &default_i2c_data0;
+ pd = &default_i2c_data;
- npd = kmemdup(pd, sizeof(struct s3c2410_platform_i2c), GFP_KERNEL);
- if (!npd)
- printk(KERN_ERR "%s: no memory for platform data\n", __func__);
- else if (!npd->cfg_gpio)
- npd->cfg_gpio = s3c_i2c0_cfg_gpio;
+ npd = s3c_set_platdata(pd, sizeof(struct s3c2410_platform_i2c),
+ &s3c_device_i2c0);
- s3c_device_i2c0.dev.platform_data = npd;
+ if (!npd->cfg_gpio)
+ npd->cfg_gpio = s3c_i2c0_cfg_gpio;
}
diff --git a/arch/arm/plat-samsung/dev-i2c1.c b/arch/arm/plat-samsung/dev-i2c1.c
index 858ee2a0414c..3b7c7bec1cf9 100644
--- a/arch/arm/plat-samsung/dev-i2c1.c
+++ b/arch/arm/plat-samsung/dev-i2c1.c
@@ -44,26 +44,18 @@ struct platform_device s3c_device_i2c1 = {
.resource = s3c_i2c_resource,
};
-static struct s3c2410_platform_i2c default_i2c_data1 __initdata = {
- .flags = 0,
- .bus_num = 1,
- .slave_addr = 0x10,
- .frequency = 100*1000,
- .sda_delay = 100,
-};
-
void __init s3c_i2c1_set_platdata(struct s3c2410_platform_i2c *pd)
{
struct s3c2410_platform_i2c *npd;
- if (!pd)
- pd = &default_i2c_data1;
+ if (!pd) {
+ pd = &default_i2c_data;
+ pd->bus_num = 1;
+ }
- npd = kmemdup(pd, sizeof(struct s3c2410_platform_i2c), GFP_KERNEL);
- if (!npd)
- printk(KERN_ERR "%s: no memory for platform data\n", __func__);
- else if (!npd->cfg_gpio)
- npd->cfg_gpio = s3c_i2c1_cfg_gpio;
+ npd = s3c_set_platdata(pd, sizeof(struct s3c2410_platform_i2c),
+ &s3c_device_i2c1);
- s3c_device_i2c1.dev.platform_data = npd;
+ if (!npd->cfg_gpio)
+ npd->cfg_gpio = s3c_i2c1_cfg_gpio;
}
diff --git a/arch/arm/plat-samsung/dev-i2c2.c b/arch/arm/plat-samsung/dev-i2c2.c
index ff4ba69b6830..07e9fd0b1b8b 100644
--- a/arch/arm/plat-samsung/dev-i2c2.c
+++ b/arch/arm/plat-samsung/dev-i2c2.c
@@ -45,26 +45,18 @@ struct platform_device s3c_device_i2c2 = {
.resource = s3c_i2c_resource,
};
-static struct s3c2410_platform_i2c default_i2c_data2 __initdata = {
- .flags = 0,
- .bus_num = 2,
- .slave_addr = 0x10,
- .frequency = 100*1000,
- .sda_delay = 100,
-};
-
void __init s3c_i2c2_set_platdata(struct s3c2410_platform_i2c *pd)
{
struct s3c2410_platform_i2c *npd;
- if (!pd)
- pd = &default_i2c_data2;
+ if (!pd) {
+ pd = &default_i2c_data;
+ pd->bus_num = 2;
+ }
- npd = kmemdup(pd, sizeof(struct s3c2410_platform_i2c), GFP_KERNEL);
- if (!npd)
- printk(KERN_ERR "%s: no memory for platform data\n", __func__);
- else if (!npd->cfg_gpio)
- npd->cfg_gpio = s3c_i2c2_cfg_gpio;
+ npd = s3c_set_platdata(pd, sizeof(struct s3c2410_platform_i2c),
+ &s3c_device_i2c2);
- s3c_device_i2c2.dev.platform_data = npd;
+ if (!npd->cfg_gpio)
+ npd->cfg_gpio = s3c_i2c2_cfg_gpio;
}
diff --git a/arch/arm/plat-samsung/dev-i2c3.c b/arch/arm/plat-samsung/dev-i2c3.c
index 8586a10014b7..d48efa93c6e7 100644
--- a/arch/arm/plat-samsung/dev-i2c3.c
+++ b/arch/arm/plat-samsung/dev-i2c3.c
@@ -43,26 +43,18 @@ struct platform_device s3c_device_i2c3 = {
.resource = s3c_i2c_resource,
};
-static struct s3c2410_platform_i2c default_i2c_data3 __initdata = {
- .flags = 0,
- .bus_num = 3,
- .slave_addr = 0x10,
- .frequency = 100*1000,
- .sda_delay = 100,
-};
-
void __init s3c_i2c3_set_platdata(struct s3c2410_platform_i2c *pd)
{
struct s3c2410_platform_i2c *npd;
- if (!pd)
- pd = &default_i2c_data3;
+ if (!pd) {
+ pd = &default_i2c_data;
+ pd->bus_num = 3;
+ }
- npd = kmemdup(pd, sizeof(struct s3c2410_platform_i2c), GFP_KERNEL);
- if (!npd)
- printk(KERN_ERR "%s: no memory for platform data\n", __func__);
- else if (!npd->cfg_gpio)
- npd->cfg_gpio = s3c_i2c3_cfg_gpio;
+ npd = s3c_set_platdata(pd, sizeof(struct s3c2410_platform_i2c),
+ &s3c_device_i2c3);
- s3c_device_i2c3.dev.platform_data = npd;
+ if (!npd->cfg_gpio)
+ npd->cfg_gpio = s3c_i2c3_cfg_gpio;
}
diff --git a/arch/arm/plat-samsung/dev-i2c4.c b/arch/arm/plat-samsung/dev-i2c4.c
index df2159e2daa6..07e26444efe6 100644
--- a/arch/arm/plat-samsung/dev-i2c4.c
+++ b/arch/arm/plat-samsung/dev-i2c4.c
@@ -43,26 +43,18 @@ struct platform_device s3c_device_i2c4 = {
.resource = s3c_i2c_resource,
};
-static struct s3c2410_platform_i2c default_i2c_data4 __initdata = {
- .flags = 0,
- .bus_num = 4,
- .slave_addr = 0x10,
- .frequency = 100*1000,
- .sda_delay = 100,
-};
-
void __init s3c_i2c4_set_platdata(struct s3c2410_platform_i2c *pd)
{
struct s3c2410_platform_i2c *npd;
- if (!pd)
- pd = &default_i2c_data4;
+ if (!pd) {
+ pd = &default_i2c_data;
+ pd->bus_num = 4;
+ }
- npd = kmemdup(pd, sizeof(struct s3c2410_platform_i2c), GFP_KERNEL);
- if (!npd)
- printk(KERN_ERR "%s: no memory for platform data\n", __func__);
- else if (!npd->cfg_gpio)
- npd->cfg_gpio = s3c_i2c4_cfg_gpio;
+ npd = s3c_set_platdata(pd, sizeof(struct s3c2410_platform_i2c),
+ &s3c_device_i2c4);
- s3c_device_i2c4.dev.platform_data = npd;
+ if (!npd->cfg_gpio)
+ npd->cfg_gpio = s3c_i2c4_cfg_gpio;
}
diff --git a/arch/arm/plat-samsung/dev-i2c5.c b/arch/arm/plat-samsung/dev-i2c5.c
index 0499c2c3877b..f49655784563 100644
--- a/arch/arm/plat-samsung/dev-i2c5.c
+++ b/arch/arm/plat-samsung/dev-i2c5.c
@@ -43,26 +43,18 @@ struct platform_device s3c_device_i2c5 = {
.resource = s3c_i2c_resource,
};
-static struct s3c2410_platform_i2c default_i2c_data5 __initdata = {
- .flags = 0,
- .bus_num = 5,
- .slave_addr = 0x10,
- .frequency = 100*1000,
- .sda_delay = 100,
-};
-
void __init s3c_i2c5_set_platdata(struct s3c2410_platform_i2c *pd)
{
struct s3c2410_platform_i2c *npd;
- if (!pd)
- pd = &default_i2c_data5;
+ if (!pd) {
+ pd = &default_i2c_data;
+ pd->bus_num = 5;
+ }
- npd = kmemdup(pd, sizeof(struct s3c2410_platform_i2c), GFP_KERNEL);
- if (!npd)
- printk(KERN_ERR "%s: no memory for platform data\n", __func__);
- else if (!npd->cfg_gpio)
- npd->cfg_gpio = s3c_i2c5_cfg_gpio;
+ npd = s3c_set_platdata(pd, sizeof(struct s3c2410_platform_i2c),
+ &s3c_device_i2c5);
- s3c_device_i2c5.dev.platform_data = npd;
+ if (!npd->cfg_gpio)
+ npd->cfg_gpio = s3c_i2c5_cfg_gpio;
}
diff --git a/arch/arm/plat-samsung/dev-i2c6.c b/arch/arm/plat-samsung/dev-i2c6.c
index 4083108908a8..141d799944e2 100644
--- a/arch/arm/plat-samsung/dev-i2c6.c
+++ b/arch/arm/plat-samsung/dev-i2c6.c
@@ -43,26 +43,18 @@ struct platform_device s3c_device_i2c6 = {
.resource = s3c_i2c_resource,
};
-static struct s3c2410_platform_i2c default_i2c_data6 __initdata = {
- .flags = 0,
- .bus_num = 6,
- .slave_addr = 0x10,
- .frequency = 100*1000,
- .sda_delay = 100,
-};
-
void __init s3c_i2c6_set_platdata(struct s3c2410_platform_i2c *pd)
{
struct s3c2410_platform_i2c *npd;
- if (!pd)
- pd = &default_i2c_data6;
+ if (!pd) {
+ pd = &default_i2c_data;
+ pd->bus_num = 6;
+ }
- npd = kmemdup(pd, sizeof(struct s3c2410_platform_i2c), GFP_KERNEL);
- if (!npd)
- printk(KERN_ERR "%s: no memory for platform data\n", __func__);
- else if (!npd->cfg_gpio)
- npd->cfg_gpio = s3c_i2c6_cfg_gpio;
+ npd = s3c_set_platdata(pd, sizeof(struct s3c2410_platform_i2c),
+ &s3c_device_i2c6);
- s3c_device_i2c6.dev.platform_data = npd;
+ if (!npd->cfg_gpio)
+ npd->cfg_gpio = s3c_i2c6_cfg_gpio;
}
diff --git a/arch/arm/plat-samsung/dev-i2c7.c b/arch/arm/plat-samsung/dev-i2c7.c
index 1182451d7dce..9dddcd1665b5 100644
--- a/arch/arm/plat-samsung/dev-i2c7.c
+++ b/arch/arm/plat-samsung/dev-i2c7.c
@@ -43,26 +43,18 @@ struct platform_device s3c_device_i2c7 = {
.resource = s3c_i2c_resource,
};
-static struct s3c2410_platform_i2c default_i2c_data7 __initdata = {
- .flags = 0,
- .bus_num = 7,
- .slave_addr = 0x10,
- .frequency = 100*1000,
- .sda_delay = 100,
-};
-
void __init s3c_i2c7_set_platdata(struct s3c2410_platform_i2c *pd)
{
struct s3c2410_platform_i2c *npd;
- if (!pd)
- pd = &default_i2c_data7;
+ if (!pd) {
+ pd = &default_i2c_data;
+ pd->bus_num = 7;
+ }
- npd = kmemdup(pd, sizeof(struct s3c2410_platform_i2c), GFP_KERNEL);
- if (!npd)
- printk(KERN_ERR "%s: no memory for platform data\n", __func__);
- else if (!npd->cfg_gpio)
- npd->cfg_gpio = s3c_i2c7_cfg_gpio;
+ npd = s3c_set_platdata(pd, sizeof(struct s3c2410_platform_i2c),
+ &s3c_device_i2c7);
- s3c_device_i2c7.dev.platform_data = npd;
+ if (!npd->cfg_gpio)
+ npd->cfg_gpio = s3c_i2c7_cfg_gpio;
}
diff --git a/arch/arm/plat-samsung/dev-nand.c b/arch/arm/plat-samsung/dev-nand.c
index 6927ae8fd118..b8e30ec6ac26 100644
--- a/arch/arm/plat-samsung/dev-nand.c
+++ b/arch/arm/plat-samsung/dev-nand.c
@@ -91,11 +91,10 @@ void __init s3c_nand_set_platdata(struct s3c2410_platform_nand *nand)
* time then there is little chance the system is going to run.
*/
- npd = kmemdup(nand, sizeof(struct s3c2410_platform_nand), GFP_KERNEL);
- if (!npd) {
- printk(KERN_ERR "%s: failed copying platform data\n", __func__);
+ npd = s3c_set_platdata(nand, sizeof(struct s3c2410_platform_nand),
+ &s3c_device_nand);
+ if (!npd)
return;
- }
/* now see if we need to copy any of the nand set data */
@@ -123,6 +122,4 @@ void __init s3c_nand_set_platdata(struct s3c2410_platform_nand *nand)
to++;
}
}
-
- s3c_device_nand.dev.platform_data = npd;
}
diff --git a/arch/arm/plat-samsung/dev-ts.c b/arch/arm/plat-samsung/dev-ts.c
index 3e4bd8147bf4..82543f0248ac 100644
--- a/arch/arm/plat-samsung/dev-ts.c
+++ b/arch/arm/plat-samsung/dev-ts.c
@@ -45,16 +45,6 @@ struct platform_device s3c_device_ts = {
void __init s3c24xx_ts_set_platdata(struct s3c2410_ts_mach_info *pd)
{
- struct s3c2410_ts_mach_info *npd;
-
- if (!pd) {
- printk(KERN_ERR "%s: no platform data\n", __func__);
- return;
- }
-
- npd = kmemdup(pd, sizeof(struct s3c2410_ts_mach_info), GFP_KERNEL);
- if (!npd)
- printk(KERN_ERR "%s: no memory for platform data\n", __func__);
-
- s3c_device_ts.dev.platform_data = npd;
+ s3c_set_platdata(pd, sizeof(struct s3c2410_ts_mach_info),
+ &s3c_device_ts);
}
diff --git a/arch/arm/plat-samsung/dev-usb.c b/arch/arm/plat-samsung/dev-usb.c
index 0e0a3bf5c982..33fbaa967700 100644
--- a/arch/arm/plat-samsung/dev-usb.c
+++ b/arch/arm/plat-samsung/dev-usb.c
@@ -60,11 +60,6 @@ EXPORT_SYMBOL(s3c_device_ohci);
*/
void __init s3c_ohci_set_platdata(struct s3c2410_hcd_info *info)
{
- struct s3c2410_hcd_info *npd;
-
- npd = kmemdup(info, sizeof(struct s3c2410_hcd_info), GFP_KERNEL);
- if (!npd)
- printk(KERN_ERR "%s: no memory for platform data\n", __func__);
-
- s3c_device_ohci.dev.platform_data = npd;
+ s3c_set_platdata(info, sizeof(struct s3c2410_hcd_info),
+ &s3c_device_ohci);
}
diff --git a/arch/arm/plat-samsung/include/plat/backlight.h b/arch/arm/plat-samsung/include/plat/backlight.h
new file mode 100644
index 000000000000..51d8da846a62
--- /dev/null
+++ b/arch/arm/plat-samsung/include/plat/backlight.h
@@ -0,0 +1,26 @@
+/* linux/arch/arm/plat-samsung/include/plat/backlight.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_PLAT_BACKLIGHT_H
+#define __ASM_PLAT_BACKLIGHT_H __FILE__
+
+/* samsung_bl_gpio_info - GPIO info for PWM Backlight control
+ * @no: GPIO number for PWM timer out
+ * @func: Special function of GPIO line for PWM timer
+ */
+struct samsung_bl_gpio_info {
+ int no;
+ int func;
+};
+
+extern void samsung_bl_set(struct samsung_bl_gpio_info *gpio_info,
+ struct platform_pwm_backlight_data *bl_data);
+
+#endif /* __ASM_PLAT_BACKLIGHT_H */
diff --git a/arch/arm/plat-samsung/include/plat/clock.h b/arch/arm/plat-samsung/include/plat/clock.h
index 983c578b8276..87d5b38a86fb 100644
--- a/arch/arm/plat-samsung/include/plat/clock.h
+++ b/arch/arm/plat-samsung/include/plat/clock.h
@@ -10,6 +10,7 @@
*/
#include <linux/spinlock.h>
+#include <linux/clkdev.h>
struct clk;
@@ -40,6 +41,7 @@ struct clk {
struct module *owner;
struct clk *parent;
const char *name;
+ const char *devname;
int id;
int usage;
unsigned long rate;
@@ -47,6 +49,7 @@ struct clk {
struct clk_ops *ops;
int (*enable)(struct clk *, int enable);
+ struct clk_lookup lookup;
#if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
struct dentry *dent; /* For visible tree hierarchy */
#endif
diff --git a/arch/arm/plat-samsung/include/plat/gpio-cfg-helpers.h b/arch/arm/plat-samsung/include/plat/gpio-cfg-helpers.h
index 3ad8386599c3..9a4e53d52967 100644
--- a/arch/arm/plat-samsung/include/plat/gpio-cfg-helpers.h
+++ b/arch/arm/plat-samsung/include/plat/gpio-cfg-helpers.h
@@ -140,7 +140,7 @@ extern unsigned s3c_gpio_getcfg_s3c64xx_4bit(struct s3c_gpio_chip *chip,
/* Pull-{up,down} resistor controls.
*
- * S3C2410,S3C2440,S3C24A0 = Pull-UP,
+ * S3C2410,S3C2440 = Pull-UP,
* S3C2412,S3C2413 = Pull-Down
* S3C6400,S3C6410 = Pull-Both [None,Down,Up,Undef]
* S3C2443 = Pull-Both [not same as S3C6400]
diff --git a/arch/arm/plat-samsung/include/plat/iic.h b/arch/arm/plat-samsung/include/plat/iic.h
index 1543da8f85c1..56b0059439e1 100644
--- a/arch/arm/plat-samsung/include/plat/iic.h
+++ b/arch/arm/plat-samsung/include/plat/iic.h
@@ -71,4 +71,6 @@ extern void s3c_i2c5_cfg_gpio(struct platform_device *dev);
extern void s3c_i2c6_cfg_gpio(struct platform_device *dev);
extern void s3c_i2c7_cfg_gpio(struct platform_device *dev);
+extern struct s3c2410_platform_i2c default_i2c_data;
+
#endif /* __ASM_ARCH_IIC_H */
diff --git a/arch/arm/plat-samsung/include/plat/pm.h b/arch/arm/plat-samsung/include/plat/pm.h
index 7fb6f6be8c81..f6749916d194 100644
--- a/arch/arm/plat-samsung/include/plat/pm.h
+++ b/arch/arm/plat-samsung/include/plat/pm.h
@@ -42,7 +42,7 @@ extern unsigned long s3c_irqwake_eintallow;
/* per-cpu sleep functions */
extern void (*pm_cpu_prep)(void);
-extern void (*pm_cpu_sleep)(void);
+extern int (*pm_cpu_sleep)(unsigned long);
/* Flags for PM Control */
@@ -52,10 +52,9 @@ extern unsigned char pm_uart_udivslot; /* true to save UART UDIVSLOT */
/* from sleep.S */
-extern int s3c_cpu_save(unsigned long *saveblk, long);
extern void s3c_cpu_resume(void);
-extern void s3c2410_cpu_suspend(void);
+extern int s3c2410_cpu_suspend(unsigned long);
/* sleep save info */
diff --git a/arch/arm/plat-samsung/include/plat/regs-serial.h b/arch/arm/plat-samsung/include/plat/regs-serial.h
index 116edfe120b9..bac36fa3becb 100644
--- a/arch/arm/plat-samsung/include/plat/regs-serial.h
+++ b/arch/arm/plat-samsung/include/plat/regs-serial.h
@@ -155,14 +155,6 @@
#define S3C2410_UFSTAT_RXMASK (15<<0)
#define S3C2410_UFSTAT_RXSHIFT (0)
-/* UFSTAT S3C24A0 */
-#define S3C24A0_UFSTAT_TXFULL (1 << 14)
-#define S3C24A0_UFSTAT_RXFULL (1 << 6)
-#define S3C24A0_UFSTAT_TXMASK (63 << 8)
-#define S3C24A0_UFSTAT_TXSHIFT (8)
-#define S3C24A0_UFSTAT_RXMASK (63)
-#define S3C24A0_UFSTAT_RXSHIFT (0)
-
/* UFSTAT S3C2443 same as S3C2440 */
#define S3C2440_UFSTAT_TXFULL (1<<14)
#define S3C2440_UFSTAT_RXFULL (1<<6)
diff --git a/arch/arm/plat-samsung/pm.c b/arch/arm/plat-samsung/pm.c
index 5c0a440d6e16..5fa1742d019b 100644
--- a/arch/arm/plat-samsung/pm.c
+++ b/arch/arm/plat-samsung/pm.c
@@ -20,6 +20,7 @@
#include <linux/io.h>
#include <asm/cacheflush.h>
+#include <asm/suspend.h>
#include <mach/hardware.h>
#include <mach/map.h>
@@ -231,7 +232,7 @@ static void __maybe_unused s3c_pm_show_resume_irqs(int start,
void (*pm_cpu_prep)(void);
-void (*pm_cpu_sleep)(void);
+int (*pm_cpu_sleep)(unsigned long);
#define any_allowed(mask, allow) (((mask) & (allow)) != (allow))
@@ -294,15 +295,11 @@ static int s3c_pm_enter(suspend_state_t state)
s3c_pm_arch_stop_clocks();
- /* s3c_cpu_save will also act as our return point from when
+ /* this will also act as our return point from when
* we resume as it saves its own register state and restores it
* during the resume. */
- s3c_cpu_save(0, PLAT_PHYS_OFFSET - PAGE_OFFSET);
-
- /* restore the cpu state using the kernel's cpu init code. */
-
- cpu_init();
+ cpu_suspend(0, pm_cpu_sleep);
/* restore the system state */
diff --git a/arch/arm/plat-samsung/pwm-clock.c b/arch/arm/plat-samsung/pwm-clock.c
index 46c9381e083b..f1bba88ed2f5 100644
--- a/arch/arm/plat-samsung/pwm-clock.c
+++ b/arch/arm/plat-samsung/pwm-clock.c
@@ -268,6 +268,7 @@ static struct pwm_tdiv_clk clk_timer_tdiv[] = {
[0] = {
.clk = {
.name = "pwm-tdiv",
+ .devname = "s3c24xx-pwm.0",
.ops = &clk_tdiv_ops,
.parent = &clk_timer_scaler[0],
},
@@ -275,6 +276,7 @@ static struct pwm_tdiv_clk clk_timer_tdiv[] = {
[1] = {
.clk = {
.name = "pwm-tdiv",
+ .devname = "s3c24xx-pwm.1",
.ops = &clk_tdiv_ops,
.parent = &clk_timer_scaler[0],
}
@@ -282,6 +284,7 @@ static struct pwm_tdiv_clk clk_timer_tdiv[] = {
[2] = {
.clk = {
.name = "pwm-tdiv",
+ .devname = "s3c24xx-pwm.2",
.ops = &clk_tdiv_ops,
.parent = &clk_timer_scaler[1],
},
@@ -289,6 +292,7 @@ static struct pwm_tdiv_clk clk_timer_tdiv[] = {
[3] = {
.clk = {
.name = "pwm-tdiv",
+ .devname = "s3c24xx-pwm.3",
.ops = &clk_tdiv_ops,
.parent = &clk_timer_scaler[1],
},
@@ -296,6 +300,7 @@ static struct pwm_tdiv_clk clk_timer_tdiv[] = {
[4] = {
.clk = {
.name = "pwm-tdiv",
+ .devname = "s3c24xx-pwm.4",
.ops = &clk_tdiv_ops,
.parent = &clk_timer_scaler[1],
},
@@ -361,26 +366,31 @@ static struct clk_ops clk_tin_ops = {
static struct clk clk_tin[] = {
[0] = {
.name = "pwm-tin",
+ .devname = "s3c24xx-pwm.0",
.id = 0,
.ops = &clk_tin_ops,
},
[1] = {
.name = "pwm-tin",
+ .devname = "s3c24xx-pwm.1",
.id = 1,
.ops = &clk_tin_ops,
},
[2] = {
.name = "pwm-tin",
+ .devname = "s3c24xx-pwm.2",
.id = 2,
.ops = &clk_tin_ops,
},
[3] = {
.name = "pwm-tin",
+ .devname = "s3c24xx-pwm.3",
.id = 3,
.ops = &clk_tin_ops,
},
[4] = {
.name = "pwm-tin",
+ .devname = "s3c24xx-pwm.4",
.id = 4,
.ops = &clk_tin_ops,
},
diff --git a/arch/arm/plat-samsung/time.c b/arch/arm/plat-samsung/time.c
index 2231d80ad817..e3bb806bbafe 100644
--- a/arch/arm/plat-samsung/time.c
+++ b/arch/arm/plat-samsung/time.c
@@ -259,6 +259,8 @@ static void __init s3c2410_timer_resources(void)
clk_enable(timerclk);
if (!use_tclk1_12()) {
+ tmpdev.id = 4;
+ tmpdev.dev.init_name = "s3c24xx-pwm.4";
tin = clk_get(&tmpdev.dev, "pwm-tin");
if (IS_ERR(tin))
panic("failed to get pwm-tin clock for system timer");
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
index 9897dcfc16d6..2d30c7f6edd3 100644
--- a/arch/arm/vfp/vfphw.S
+++ b/arch/arm/vfp/vfphw.S
@@ -77,27 +77,27 @@ ENTRY(vfp_support_entry)
bne look_for_VFP_exceptions @ VFP is already enabled
DBGSTR1 "enable %x", r10
- ldr r3, last_VFP_context_address
+ ldr r3, vfp_current_hw_state_address
orr r1, r1, #FPEXC_EN @ user FPEXC has the enable bit set
- ldr r4, [r3, r11, lsl #2] @ last_VFP_context pointer
+ ldr r4, [r3, r11, lsl #2] @ vfp_current_hw_state pointer
bic r5, r1, #FPEXC_EX @ make sure exceptions are disabled
- cmp r4, r10
- beq check_for_exception @ we are returning to the same
- @ process, so the registers are
- @ still there. In this case, we do
- @ not want to drop a pending exception.
+ cmp r4, r10 @ this thread owns the hw context?
+#ifndef CONFIG_SMP
+ @ For UP, checking that this thread owns the hw context is
+ @ sufficient to determine that the hardware state is valid.
+ beq vfp_hw_state_valid
+
+ @ On UP, we lazily save the VFP context. As a different
+ @ thread wants ownership of the VFP hardware, save the old
+ @ state if there was a previous (valid) owner.
VFPFMXR FPEXC, r5 @ enable VFP, disable any pending
@ exceptions, so we can get at the
@ rest of it
-#ifndef CONFIG_SMP
- @ Save out the current registers to the old thread state
- @ No need for SMP since this is not done lazily
-
DBGSTR1 "save old state %p", r4
- cmp r4, #0
- beq no_old_VFP_process
+ cmp r4, #0 @ if the vfp_current_hw_state is NULL
+ beq vfp_reload_hw @ then the hw state needs reloading
VFPFSTMIA r4, r5 @ save the working registers
VFPFMRX r5, FPSCR @ current status
#ifndef CONFIG_CPU_FEROCEON
@@ -110,13 +110,35 @@ ENTRY(vfp_support_entry)
1:
#endif
stmia r4, {r1, r5, r6, r8} @ save FPEXC, FPSCR, FPINST, FPINST2
- @ and point r4 at the word at the
- @ start of the register dump
+vfp_reload_hw:
+
+#else
+ @ For SMP, if this thread does not own the hw context, then we
+ @ need to reload it. No need to save the old state as on SMP,
+ @ we always save the state when we switch away from a thread.
+ bne vfp_reload_hw
+
+ @ This thread has ownership of the current hardware context.
+ @ However, it may have been migrated to another CPU, in which
+ @ case the saved state is newer than the hardware context.
+ @ Check this by looking at the CPU number which the state was
+ @ last loaded onto.
+ ldr ip, [r10, #VFP_CPU]
+ teq ip, r11
+ beq vfp_hw_state_valid
+
+vfp_reload_hw:
+ @ We're loading this threads state into the VFP hardware. Update
+ @ the CPU number which contains the most up to date VFP context.
+ str r11, [r10, #VFP_CPU]
+
+ VFPFMXR FPEXC, r5 @ enable VFP, disable any pending
+ @ exceptions, so we can get at the
+ @ rest of it
#endif
-no_old_VFP_process:
DBGSTR1 "load state %p", r10
- str r10, [r3, r11, lsl #2] @ update the last_VFP_context pointer
+ str r10, [r3, r11, lsl #2] @ update the vfp_current_hw_state pointer
@ Load the saved state back into the VFP
VFPFLDMIA r10, r5 @ reload the working registers while
@ FPEXC is in a safe state
@@ -132,7 +154,8 @@ no_old_VFP_process:
#endif
VFPFMXR FPSCR, r5 @ restore status
-check_for_exception:
+@ The context stored in the VFP hardware is up to date with this thread
+vfp_hw_state_valid:
tst r1, #FPEXC_EX
bne process_exception @ might as well handle the pending
@ exception before retrying branch
@@ -207,8 +230,8 @@ ENTRY(vfp_save_state)
ENDPROC(vfp_save_state)
.align
-last_VFP_context_address:
- .word last_VFP_context
+vfp_current_hw_state_address:
+ .word vfp_current_hw_state
.macro tbl_branch, base, tmp, shift
#ifdef CONFIG_THUMB2_KERNEL
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index f25e7ec89416..79bcb4316930 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -33,7 +33,6 @@ void vfp_support_entry(void);
void vfp_null_entry(void);
void (*vfp_vector)(void) = vfp_null_entry;
-union vfp_state *last_VFP_context[NR_CPUS];
/*
* Dual-use variable.
@@ -43,6 +42,46 @@ union vfp_state *last_VFP_context[NR_CPUS];
unsigned int VFP_arch;
/*
+ * The pointer to the vfpstate structure of the thread which currently
+ * owns the context held in the VFP hardware, or NULL if the hardware
+ * context is invalid.
+ *
+ * For UP, this is sufficient to tell which thread owns the VFP context.
+ * However, for SMP, we also need to check the CPU number stored in the
+ * saved state too to catch migrations.
+ */
+union vfp_state *vfp_current_hw_state[NR_CPUS];
+
+/*
+ * Is 'thread's most up to date state stored in this CPUs hardware?
+ * Must be called from non-preemptible context.
+ */
+static bool vfp_state_in_hw(unsigned int cpu, struct thread_info *thread)
+{
+#ifdef CONFIG_SMP
+ if (thread->vfpstate.hard.cpu != cpu)
+ return false;
+#endif
+ return vfp_current_hw_state[cpu] == &thread->vfpstate;
+}
+
+/*
+ * Force a reload of the VFP context from the thread structure. We do
+ * this by ensuring that access to the VFP hardware is disabled, and
+ * clear last_VFP_context. Must be called from non-preemptible context.
+ */
+static void vfp_force_reload(unsigned int cpu, struct thread_info *thread)
+{
+ if (vfp_state_in_hw(cpu, thread)) {
+ fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
+ vfp_current_hw_state[cpu] = NULL;
+ }
+#ifdef CONFIG_SMP
+ thread->vfpstate.hard.cpu = NR_CPUS;
+#endif
+}
+
+/*
* Per-thread VFP initialization.
*/
static void vfp_thread_flush(struct thread_info *thread)
@@ -50,21 +89,27 @@ static void vfp_thread_flush(struct thread_info *thread)
union vfp_state *vfp = &thread->vfpstate;
unsigned int cpu;
- memset(vfp, 0, sizeof(union vfp_state));
-
- vfp->hard.fpexc = FPEXC_EN;
- vfp->hard.fpscr = FPSCR_ROUND_NEAREST;
-
/*
* Disable VFP to ensure we initialize it first. We must ensure
- * that the modification of last_VFP_context[] and hardware disable
- * are done for the same CPU and without preemption.
+ * that the modification of vfp_current_hw_state[] and hardware
+ * disable are done for the same CPU and without preemption.
+ *
+ * Do this first to ensure that preemption won't overwrite our
+ * state saving should access to the VFP be enabled at this point.
*/
cpu = get_cpu();
- if (last_VFP_context[cpu] == vfp)
- last_VFP_context[cpu] = NULL;
+ if (vfp_current_hw_state[cpu] == vfp)
+ vfp_current_hw_state[cpu] = NULL;
fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
put_cpu();
+
+ memset(vfp, 0, sizeof(union vfp_state));
+
+ vfp->hard.fpexc = FPEXC_EN;
+ vfp->hard.fpscr = FPSCR_ROUND_NEAREST;
+#ifdef CONFIG_SMP
+ vfp->hard.cpu = NR_CPUS;
+#endif
}
static void vfp_thread_exit(struct thread_info *thread)
@@ -73,8 +118,8 @@ static void vfp_thread_exit(struct thread_info *thread)
union vfp_state *vfp = &thread->vfpstate;
unsigned int cpu = get_cpu();
- if (last_VFP_context[cpu] == vfp)
- last_VFP_context[cpu] = NULL;
+ if (vfp_current_hw_state[cpu] == vfp)
+ vfp_current_hw_state[cpu] = NULL;
put_cpu();
}
@@ -84,6 +129,9 @@ static void vfp_thread_copy(struct thread_info *thread)
vfp_sync_hwstate(parent);
thread->vfpstate = parent->vfpstate;
+#ifdef CONFIG_SMP
+ thread->vfpstate.hard.cpu = NR_CPUS;
+#endif
}
/*
@@ -129,17 +177,8 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
* case the thread migrates to a different CPU. The
* restoring is done lazily.
*/
- if ((fpexc & FPEXC_EN) && last_VFP_context[cpu]) {
- vfp_save_state(last_VFP_context[cpu], fpexc);
- last_VFP_context[cpu]->hard.cpu = cpu;
- }
- /*
- * Thread migration, just force the reloading of the
- * state on the new CPU in case the VFP registers
- * contain stale data.
- */
- if (thread->vfpstate.hard.cpu != cpu)
- last_VFP_context[cpu] = NULL;
+ if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu])
+ vfp_save_state(vfp_current_hw_state[cpu], fpexc);
#endif
/*
@@ -415,7 +454,7 @@ static int vfp_pm_suspend(void)
}
/* clear any information we had about last context state */
- memset(last_VFP_context, 0, sizeof(last_VFP_context));
+ memset(vfp_current_hw_state, 0, sizeof(vfp_current_hw_state));
return 0;
}
@@ -443,15 +482,15 @@ static void vfp_pm_init(void)
static inline void vfp_pm_init(void) { }
#endif /* CONFIG_PM */
+/*
+ * Ensure that the VFP state stored in 'thread->vfpstate' is up to date
+ * with the hardware state.
+ */
void vfp_sync_hwstate(struct thread_info *thread)
{
unsigned int cpu = get_cpu();
- /*
- * If the thread we're interested in is the current owner of the
- * hardware VFP state, then we need to save its state.
- */
- if (last_VFP_context[cpu] == &thread->vfpstate) {
+ if (vfp_state_in_hw(cpu, thread)) {
u32 fpexc = fmrx(FPEXC);
/*
@@ -465,36 +504,13 @@ void vfp_sync_hwstate(struct thread_info *thread)
put_cpu();
}
+/* Ensure that the thread reloads the hardware VFP state on the next use. */
void vfp_flush_hwstate(struct thread_info *thread)
{
unsigned int cpu = get_cpu();
- /*
- * If the thread we're interested in is the current owner of the
- * hardware VFP state, then we need to save its state.
- */
- if (last_VFP_context[cpu] == &thread->vfpstate) {
- u32 fpexc = fmrx(FPEXC);
+ vfp_force_reload(cpu, thread);
- fmxr(FPEXC, fpexc & ~FPEXC_EN);
-
- /*
- * Set the context to NULL to force a reload the next time
- * the thread uses the VFP.
- */
- last_VFP_context[cpu] = NULL;
- }
-
-#ifdef CONFIG_SMP
- /*
- * For SMP we still have to take care of the case where the thread
- * migrates to another CPU and then back to the original CPU on which
- * the last VFP user is still the same thread. Mark the thread VFP
- * state as belonging to a non-existent CPU so that the saved one will
- * be reloaded in the above case.
- */
- thread->vfpstate.hard.cpu = NR_CPUS;
-#endif
put_cpu();
}
@@ -513,8 +529,7 @@ static int vfp_hotplug(struct notifier_block *b, unsigned long action,
void *hcpu)
{
if (action == CPU_DYING || action == CPU_DYING_FROZEN) {
- unsigned int cpu = (long)hcpu;
- last_VFP_context[cpu] = NULL;
+ vfp_force_reload((long)hcpu, current_thread_info());
} else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
vfp_enable(NULL);
return NOTIFY_OK;
@@ -582,7 +597,6 @@ static int __init vfp_init(void)
elf_hwcap |= HWCAP_VFPv3D16;
}
#endif
-#ifdef CONFIG_NEON
/*
* Check for the presence of the Advanced SIMD
* load/store instructions, integer and single
@@ -590,10 +604,13 @@ static int __init vfp_init(void)
* for NEON if the hardware has the MVFR registers.
*/
if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
+#ifdef CONFIG_NEON
if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100)
elf_hwcap |= HWCAP_NEON;
- }
#endif
+ if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000)
+ elf_hwcap |= HWCAP_VFPv4;
+ }
}
return 0;
}
diff --git a/arch/avr32/include/asm/delay.h b/arch/avr32/include/asm/delay.h
index a0ed9a9839a5..9670e127b7b2 100644
--- a/arch/avr32/include/asm/delay.h
+++ b/arch/avr32/include/asm/delay.h
@@ -1,26 +1 @@
-#ifndef __ASM_AVR32_DELAY_H
-#define __ASM_AVR32_DELAY_H
-
-/*
- * Copyright (C) 1993 Linus Torvalds
- *
- * Delay routines calling functions in arch/avr32/lib/delay.c
- */
-
-extern void __bad_udelay(void);
-extern void __bad_ndelay(void);
-
-extern void __udelay(unsigned long usecs);
-extern void __ndelay(unsigned long nsecs);
-extern void __const_udelay(unsigned long xloops);
-extern void __delay(unsigned long loops);
-
-#define udelay(n) (__builtin_constant_p(n) ? \
- ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c6ul)) : \
- __udelay(n))
-
-#define ndelay(n) (__builtin_constant_p(n) ? \
- ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
- __ndelay(n))
-
-#endif /* __ASM_AVR32_DELAY_H */
+#include <asm-generic/delay.h>
diff --git a/arch/avr32/kernel/module.c b/arch/avr32/kernel/module.c
index a727f54d64d6..596f7305d93f 100644
--- a/arch/avr32/kernel/module.c
+++ b/arch/avr32/kernel/module.c
@@ -19,13 +19,6 @@
#include <linux/moduleloader.h>
#include <linux/vmalloc.h>
-void *module_alloc(unsigned long size)
-{
- if (size == 0)
- return NULL;
- return vmalloc(size);
-}
-
void module_free(struct module *mod, void *module_region)
{
vfree(mod->arch.syminfo);
@@ -299,15 +292,6 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
return ret;
}
-int apply_relocate(Elf32_Shdr *sechdrs, const char *strtab,
- unsigned int symindex, unsigned int relindex,
- struct module *module)
-{
- printk(KERN_ERR "module %s: REL relocations are not supported\n",
- module->name);
- return -ENOEXEC;
-}
-
int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
struct module *module)
{
@@ -316,7 +300,3 @@ int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
return 0;
}
-
-void module_arch_cleanup(struct module *module)
-{
-}
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index d619b17c4413..c7476295de80 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -953,6 +953,16 @@ config BFIN_GPTIMERS
To compile this driver as a module, choose M here: the module
will be called gptimers.
+config HAVE_PWM
+ tristate "Enable PWM API support"
+ depends on BFIN_GPTIMERS
+ help
+ Enable support for the Pulse Width Modulation framework (as
+ found in linux/pwm.h).
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm.
+
choice
prompt "Uncached DMA region"
default DMA_UNCACHED_1M
diff --git a/arch/blackfin/configs/BF561-EZKIT_defconfig b/arch/blackfin/configs/BF561-EZKIT_defconfig
index 1c0a82a10591..d7ff2aee3fbc 100644
--- a/arch/blackfin/configs/BF561-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF561-EZKIT_defconfig
@@ -58,13 +58,13 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=m
+CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=m
-CONFIG_MTD_CFI_AMDSTD=m
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_RAM=y
CONFIG_MTD_ROM=m
-CONFIG_MTD_PHYSMAP=m
+CONFIG_MTD_PHYSMAP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_NETDEVICES=y
CONFIG_NET_ETHERNET=y
diff --git a/arch/blackfin/include/asm/Kbuild b/arch/blackfin/include/asm/Kbuild
index 9e7c5379d3ff..7a075eaf6041 100644
--- a/arch/blackfin/include/asm/Kbuild
+++ b/arch/blackfin/include/asm/Kbuild
@@ -1,5 +1,48 @@
include include/asm-generic/Kbuild.asm
+generic-y += auxvec.h
+generic-y += bitsperlong.h
+generic-y += bugs.h
+generic-y += cputime.h
+generic-y += current.h
+generic-y += device.h
+generic-y += div64.h
+generic-y += emergency-restart.h
+generic-y += errno.h
+generic-y += fb.h
+generic-y += futex.h
+generic-y += hw_irq.h
+generic-y += ioctl.h
+generic-y += ipcbuf.h
+generic-y += irq_regs.h
+generic-y += kdebug.h
+generic-y += kmap_types.h
+generic-y += local64.h
+generic-y += local.h
+generic-y += mman.h
+generic-y += msgbuf.h
+generic-y += param.h
+generic-y += percpu.h
+generic-y += pgalloc.h
+generic-y += resource.h
+generic-y += scatterlist.h
+generic-y += sembuf.h
+generic-y += serial.h
+generic-y += setup.h
+generic-y += shmbuf.h
+generic-y += shmparam.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += statfs.h
+generic-y += termbits.h
+generic-y += termios.h
+generic-y += topology.h
+generic-y += types.h
+generic-y += ucontext.h
+generic-y += unaligned.h
+generic-y += user.h
+generic-y += xor.h
+
header-y += bfin_sport.h
header-y += cachectl.h
header-y += fixed_code.h
diff --git a/arch/blackfin/include/asm/atomic.h b/arch/blackfin/include/asm/atomic.h
index e48508957160..4c707dbe1ff9 100644
--- a/arch/blackfin/include/asm/atomic.h
+++ b/arch/blackfin/include/asm/atomic.h
@@ -1,8 +1,8 @@
/*
- * Copyright 2004-2009 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
+ * Copyright 2004-2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
#ifndef __ARCH_BLACKFIN_ATOMIC__
#define __ARCH_BLACKFIN_ATOMIC__
@@ -76,11 +76,6 @@ static inline void atomic_set_mask(int mask, atomic_t *v)
__raw_atomic_set_asm(&v->counter, mask);
}
-static inline int atomic_test_mask(int mask, atomic_t *v)
-{
- return __raw_atomic_test_asm(&v->counter, mask);
-}
-
/* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
diff --git a/arch/blackfin/include/asm/auxvec.h b/arch/blackfin/include/asm/auxvec.h
deleted file mode 100644
index 41fa68b71287..000000000000
--- a/arch/blackfin/include/asm/auxvec.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/auxvec.h>
diff --git a/arch/blackfin/include/asm/bitsperlong.h b/arch/blackfin/include/asm/bitsperlong.h
deleted file mode 100644
index 6dc0bb0c13b2..000000000000
--- a/arch/blackfin/include/asm/bitsperlong.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/bitsperlong.h>
diff --git a/arch/blackfin/include/asm/blackfin.h b/arch/blackfin/include/asm/blackfin.h
index eb7c1441d8f9..0928700b6bc4 100644
--- a/arch/blackfin/include/asm/blackfin.h
+++ b/arch/blackfin/include/asm/blackfin.h
@@ -1,9 +1,9 @@
/*
* Common header file for Blackfin family of processors.
*
- * Copyright 2004-2009 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
+ * Copyright 2004-2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
*/
#ifndef _BLACKFIN_H_
diff --git a/arch/blackfin/include/asm/bugs.h b/arch/blackfin/include/asm/bugs.h
deleted file mode 100644
index 61791e1ad9f5..000000000000
--- a/arch/blackfin/include/asm/bugs.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/bugs.h>
diff --git a/arch/blackfin/include/asm/cputime.h b/arch/blackfin/include/asm/cputime.h
deleted file mode 100644
index 6d68ad7e0ea3..000000000000
--- a/arch/blackfin/include/asm/cputime.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/cputime.h>
diff --git a/arch/blackfin/include/asm/current.h b/arch/blackfin/include/asm/current.h
deleted file mode 100644
index 4c51401b5537..000000000000
--- a/arch/blackfin/include/asm/current.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/current.h>
diff --git a/arch/blackfin/include/asm/device.h b/arch/blackfin/include/asm/device.h
deleted file mode 100644
index f0a4c256403b..000000000000
--- a/arch/blackfin/include/asm/device.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/device.h>
diff --git a/arch/blackfin/include/asm/div64.h b/arch/blackfin/include/asm/div64.h
deleted file mode 100644
index 6cd978cefb28..000000000000
--- a/arch/blackfin/include/asm/div64.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/div64.h>
diff --git a/arch/blackfin/include/asm/dpmc.h b/arch/blackfin/include/asm/dpmc.h
index edf2a2ad5183..c4ec959dad78 100644
--- a/arch/blackfin/include/asm/dpmc.h
+++ b/arch/blackfin/include/asm/dpmc.h
@@ -117,7 +117,6 @@
#ifndef __ASSEMBLY__
void sleep_mode(u32 sic_iwr0, u32 sic_iwr1, u32 sic_iwr2);
-void hibernate_mode(u32 sic_iwr0, u32 sic_iwr1, u32 sic_iwr2);
void sleep_deeper(u32 sic_iwr0, u32 sic_iwr1, u32 sic_iwr2);
void do_hibernate(int wakeup);
void set_dram_srfs(void);
@@ -134,32 +133,6 @@ struct bfin_dpmc_platform_data {
unsigned short vr_settling_time; /* in us */
};
-#else
-
-#define PM_PUSH(x) \
- R0 = [P0 + (x - SRAM_BASE_ADDRESS)];\
- [--SP] = R0;\
-
-#define PM_POP(x) \
- R0 = [SP++];\
- [P0 + (x - SRAM_BASE_ADDRESS)] = R0;\
-
-#define PM_SYS_PUSH(x) \
- R0 = [P0 + (x - PLL_CTL)];\
- [--SP] = R0;\
-
-#define PM_SYS_POP(x) \
- R0 = [SP++];\
- [P0 + (x - PLL_CTL)] = R0;\
-
-#define PM_SYS_PUSH16(x) \
- R0 = w[P0 + (x - PLL_CTL)];\
- [--SP] = R0;\
-
-#define PM_SYS_POP16(x) \
- R0 = [SP++];\
- w[P0 + (x - PLL_CTL)] = R0;\
-
#endif
#endif /*_BLACKFIN_DPMC_H_*/
diff --git a/arch/blackfin/include/asm/emergency-restart.h b/arch/blackfin/include/asm/emergency-restart.h
deleted file mode 100644
index 3711bd9d50bd..000000000000
--- a/arch/blackfin/include/asm/emergency-restart.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/emergency-restart.h>
diff --git a/arch/blackfin/include/asm/errno.h b/arch/blackfin/include/asm/errno.h
deleted file mode 100644
index 4c82b503d92f..000000000000
--- a/arch/blackfin/include/asm/errno.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/errno.h>
diff --git a/arch/blackfin/include/asm/fb.h b/arch/blackfin/include/asm/fb.h
deleted file mode 100644
index 3a4988e8df45..000000000000
--- a/arch/blackfin/include/asm/fb.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/fb.h>
diff --git a/arch/blackfin/include/asm/futex.h b/arch/blackfin/include/asm/futex.h
deleted file mode 100644
index 0b745828f42b..000000000000
--- a/arch/blackfin/include/asm/futex.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/futex.h>
diff --git a/arch/blackfin/include/asm/gpio.h b/arch/blackfin/include/asm/gpio.h
index 1ef8417f5d27..5a25856381ff 100644
--- a/arch/blackfin/include/asm/gpio.h
+++ b/arch/blackfin/include/asm/gpio.h
@@ -16,58 +16,13 @@
#include <mach/gpio.h>
-#define GPIO_0 0
-#define GPIO_1 1
-#define GPIO_2 2
-#define GPIO_3 3
-#define GPIO_4 4
-#define GPIO_5 5
-#define GPIO_6 6
-#define GPIO_7 7
-#define GPIO_8 8
-#define GPIO_9 9
-#define GPIO_10 10
-#define GPIO_11 11
-#define GPIO_12 12
-#define GPIO_13 13
-#define GPIO_14 14
-#define GPIO_15 15
-#define GPIO_16 16
-#define GPIO_17 17
-#define GPIO_18 18
-#define GPIO_19 19
-#define GPIO_20 20
-#define GPIO_21 21
-#define GPIO_22 22
-#define GPIO_23 23
-#define GPIO_24 24
-#define GPIO_25 25
-#define GPIO_26 26
-#define GPIO_27 27
-#define GPIO_28 28
-#define GPIO_29 29
-#define GPIO_30 30
-#define GPIO_31 31
-#define GPIO_32 32
-#define GPIO_33 33
-#define GPIO_34 34
-#define GPIO_35 35
-#define GPIO_36 36
-#define GPIO_37 37
-#define GPIO_38 38
-#define GPIO_39 39
-#define GPIO_40 40
-#define GPIO_41 41
-#define GPIO_42 42
-#define GPIO_43 43
-#define GPIO_44 44
-#define GPIO_45 45
-#define GPIO_46 46
-#define GPIO_47 47
-
#define PERIPHERAL_USAGE 1
#define GPIO_USAGE 0
+#ifndef BFIN_GPIO_PINT
+# define BFIN_GPIO_PINT 0
+#endif
+
#ifndef __ASSEMBLY__
#include <linux/compiler.h>
@@ -89,7 +44,7 @@
* MODIFICATION HISTORY :
**************************************************************/
-#ifndef CONFIG_BF54x
+#if !BFIN_GPIO_PINT
void set_gpio_dir(unsigned, unsigned short);
void set_gpio_inen(unsigned, unsigned short);
void set_gpio_polar(unsigned, unsigned short);
@@ -164,6 +119,10 @@ struct gpio_port_t {
#ifdef BFIN_SPECIAL_GPIO_BANKS
void bfin_special_gpio_free(unsigned gpio);
int bfin_special_gpio_request(unsigned gpio, const char *label);
+# ifdef CONFIG_PM
+void bfin_special_gpio_pm_hibernate_restore(void);
+void bfin_special_gpio_pm_hibernate_suspend(void);
+# endif
#endif
#ifdef CONFIG_PM
@@ -182,7 +141,7 @@ static inline void bfin_pm_standby_restore(void)
void bfin_gpio_pm_hibernate_restore(void);
void bfin_gpio_pm_hibernate_suspend(void);
-#ifndef CONFIG_BF54x
+# if !BFIN_GPIO_PINT
int gpio_pm_wakeup_ctrl(unsigned gpio, unsigned ctrl);
struct gpio_port_s {
@@ -199,8 +158,9 @@ struct gpio_port_s {
unsigned short reserved;
unsigned short mux;
};
-#endif /*CONFIG_BF54x*/
+# endif
#endif /*CONFIG_PM*/
+
/***********************************************************
*
* FUNCTIONS: Blackfin GPIO Driver
diff --git a/arch/blackfin/include/asm/gptimers.h b/arch/blackfin/include/asm/gptimers.h
index 38657dac1235..38bddcb190c8 100644
--- a/arch/blackfin/include/asm/gptimers.h
+++ b/arch/blackfin/include/asm/gptimers.h
@@ -193,6 +193,16 @@ uint16_t get_enabled_gptimers(void);
uint32_t get_gptimer_status(unsigned int group);
void set_gptimer_status(unsigned int group, uint32_t value);
+static inline void enable_gptimer(unsigned int timer_id)
+{
+ enable_gptimers(1 << timer_id);
+}
+
+static inline void disable_gptimer(unsigned int timer_id)
+{
+ disable_gptimers(1 << timer_id);
+}
+
/*
* All Blackfin system MMRs are padded to 32bits even if the register
* itself is only 16bits. So use a helper macro to streamline this.
@@ -209,6 +219,15 @@ struct bfin_gptimer_regs {
u32 width;
};
+/*
+ * bfin group timer registers layout
+ */
+struct bfin_gptimer_group_regs {
+ __BFP(enable);
+ __BFP(disable);
+ u32 status;
+};
+
#undef __BFP
#endif
diff --git a/arch/blackfin/include/asm/hw_irq.h b/arch/blackfin/include/asm/hw_irq.h
deleted file mode 100644
index 1f5ef7da0045..000000000000
--- a/arch/blackfin/include/asm/hw_irq.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/hw_irq.h>
diff --git a/arch/blackfin/include/asm/ioctl.h b/arch/blackfin/include/asm/ioctl.h
deleted file mode 100644
index b279fe06dfe5..000000000000
--- a/arch/blackfin/include/asm/ioctl.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ioctl.h>
diff --git a/arch/blackfin/include/asm/ipcbuf.h b/arch/blackfin/include/asm/ipcbuf.h
deleted file mode 100644
index 84c7e51cb6d0..000000000000
--- a/arch/blackfin/include/asm/ipcbuf.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ipcbuf.h>
diff --git a/arch/blackfin/include/asm/irq_regs.h b/arch/blackfin/include/asm/irq_regs.h
deleted file mode 100644
index 3dd9c0b70270..000000000000
--- a/arch/blackfin/include/asm/irq_regs.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/irq_regs.h>
diff --git a/arch/blackfin/include/asm/irqflags.h b/arch/blackfin/include/asm/irqflags.h
index b4bbb75a9e15..43eb4749de3d 100644
--- a/arch/blackfin/include/asm/irqflags.h
+++ b/arch/blackfin/include/asm/irqflags.h
@@ -18,12 +18,12 @@
extern unsigned long bfin_irq_flags;
#endif
-static inline void bfin_sti(unsigned long flags)
+static inline notrace void bfin_sti(unsigned long flags)
{
asm volatile("sti %0;" : : "d" (flags));
}
-static inline unsigned long bfin_cli(void)
+static inline notrace unsigned long bfin_cli(void)
{
unsigned long flags;
asm volatile("cli %0;" : "=d" (flags));
@@ -40,22 +40,22 @@ static inline unsigned long bfin_cli(void)
/*
* Hard, untraced CPU interrupt flag manipulation and access.
*/
-static inline void __hard_local_irq_disable(void)
+static inline notrace void __hard_local_irq_disable(void)
{
bfin_cli();
}
-static inline void __hard_local_irq_enable(void)
+static inline notrace void __hard_local_irq_enable(void)
{
bfin_sti(bfin_irq_flags);
}
-static inline unsigned long hard_local_save_flags(void)
+static inline notrace unsigned long hard_local_save_flags(void)
{
return bfin_read_IMASK();
}
-static inline unsigned long __hard_local_irq_save(void)
+static inline notrace unsigned long __hard_local_irq_save(void)
{
unsigned long flags;
flags = bfin_cli();
@@ -65,18 +65,18 @@ static inline unsigned long __hard_local_irq_save(void)
return flags;
}
-static inline int hard_irqs_disabled_flags(unsigned long flags)
+static inline notrace int hard_irqs_disabled_flags(unsigned long flags)
{
return (flags & ~0x3f) == 0;
}
-static inline int hard_irqs_disabled(void)
+static inline notrace int hard_irqs_disabled(void)
{
unsigned long flags = hard_local_save_flags();
return hard_irqs_disabled_flags(flags);
}
-static inline void __hard_local_irq_restore(unsigned long flags)
+static inline notrace void __hard_local_irq_restore(unsigned long flags)
{
if (!hard_irqs_disabled_flags(flags))
__hard_local_irq_enable();
@@ -113,31 +113,31 @@ void ipipe_check_context(struct ipipe_domain *ipd);
/*
* Interrupt pipe interface to linux/irqflags.h.
*/
-static inline void arch_local_irq_disable(void)
+static inline notrace void arch_local_irq_disable(void)
{
__check_irqop_context();
__ipipe_stall_root();
barrier();
}
-static inline void arch_local_irq_enable(void)
+static inline notrace void arch_local_irq_enable(void)
{
barrier();
__check_irqop_context();
__ipipe_unstall_root();
}
-static inline unsigned long arch_local_save_flags(void)
+static inline notrace unsigned long arch_local_save_flags(void)
{
return __ipipe_test_root() ? bfin_no_irqs : bfin_irq_flags;
}
-static inline int arch_irqs_disabled_flags(unsigned long flags)
+static inline notrace int arch_irqs_disabled_flags(unsigned long flags)
{
return flags == bfin_no_irqs;
}
-static inline unsigned long arch_local_irq_save(void)
+static inline notrace unsigned long arch_local_irq_save(void)
{
unsigned long flags;
@@ -148,13 +148,13 @@ static inline unsigned long arch_local_irq_save(void)
return flags;
}
-static inline void arch_local_irq_restore(unsigned long flags)
+static inline notrace void arch_local_irq_restore(unsigned long flags)
{
__check_irqop_context();
__ipipe_restore_root(flags == bfin_no_irqs);
}
-static inline unsigned long arch_mangle_irq_bits(int virt, unsigned long real)
+static inline notrace unsigned long arch_mangle_irq_bits(int virt, unsigned long real)
{
/*
* Merge virtual and real interrupt mask bits into a single
@@ -163,7 +163,7 @@ static inline unsigned long arch_mangle_irq_bits(int virt, unsigned long real)
return (real & ~(1 << 31)) | ((virt != 0) << 31);
}
-static inline int arch_demangle_irq_bits(unsigned long *x)
+static inline notrace int arch_demangle_irq_bits(unsigned long *x)
{
int virt = (*x & (1 << 31)) != 0;
*x &= ~(1L << 31);
@@ -174,7 +174,7 @@ static inline int arch_demangle_irq_bits(unsigned long *x)
* Interface to various arch routines that may be traced.
*/
#ifdef CONFIG_IPIPE_TRACE_IRQSOFF
-static inline void hard_local_irq_disable(void)
+static inline notrace void hard_local_irq_disable(void)
{
if (!hard_irqs_disabled()) {
__hard_local_irq_disable();
@@ -182,7 +182,7 @@ static inline void hard_local_irq_disable(void)
}
}
-static inline void hard_local_irq_enable(void)
+static inline notrace void hard_local_irq_enable(void)
{
if (hard_irqs_disabled()) {
ipipe_trace_end(0x80000000);
@@ -190,7 +190,7 @@ static inline void hard_local_irq_enable(void)
}
}
-static inline unsigned long hard_local_irq_save(void)
+static inline notrace unsigned long hard_local_irq_save(void)
{
unsigned long flags = hard_local_save_flags();
if (!hard_irqs_disabled_flags(flags)) {
@@ -200,7 +200,7 @@ static inline unsigned long hard_local_irq_save(void)
return flags;
}
-static inline void hard_local_irq_restore(unsigned long flags)
+static inline notrace void hard_local_irq_restore(unsigned long flags)
{
if (!hard_irqs_disabled_flags(flags)) {
ipipe_trace_end(0x80000001);
diff --git a/arch/blackfin/include/asm/kdebug.h b/arch/blackfin/include/asm/kdebug.h
deleted file mode 100644
index 6ece1b037665..000000000000
--- a/arch/blackfin/include/asm/kdebug.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/kdebug.h>
diff --git a/arch/blackfin/include/asm/kmap_types.h b/arch/blackfin/include/asm/kmap_types.h
deleted file mode 100644
index 3575c64af42a..000000000000
--- a/arch/blackfin/include/asm/kmap_types.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/kmap_types.h>
diff --git a/arch/blackfin/include/asm/local.h b/arch/blackfin/include/asm/local.h
deleted file mode 100644
index c11c530f74d0..000000000000
--- a/arch/blackfin/include/asm/local.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/local.h>
diff --git a/arch/blackfin/include/asm/local64.h b/arch/blackfin/include/asm/local64.h
deleted file mode 100644
index 36c93b5cc239..000000000000
--- a/arch/blackfin/include/asm/local64.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/local64.h>
diff --git a/arch/blackfin/include/asm/mman.h b/arch/blackfin/include/asm/mman.h
deleted file mode 100644
index 8eebf89f5ab1..000000000000
--- a/arch/blackfin/include/asm/mman.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/mman.h>
diff --git a/arch/blackfin/include/asm/module.h b/arch/blackfin/include/asm/module.h
index 4282b169ead9..ed5689b82c9f 100644
--- a/arch/blackfin/include/asm/module.h
+++ b/arch/blackfin/include/asm/module.h
@@ -1,8 +1,8 @@
/*
- * Copyright 2004-2008 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
+ * Copyright 2004-2008 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
#ifndef _ASM_BFIN_MODULE_H
#define _ASM_BFIN_MODULE_H
diff --git a/arch/blackfin/include/asm/msgbuf.h b/arch/blackfin/include/asm/msgbuf.h
deleted file mode 100644
index 809134c644a6..000000000000
--- a/arch/blackfin/include/asm/msgbuf.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/msgbuf.h>
diff --git a/arch/blackfin/include/asm/mutex.h b/arch/blackfin/include/asm/mutex.h
index f726e3a80ad0..ff6101aa2c71 100644
--- a/arch/blackfin/include/asm/mutex.h
+++ b/arch/blackfin/include/asm/mutex.h
@@ -1,76 +1 @@
-/*
- * Pull in the generic implementation for the mutex fastpath.
- *
- * TODO: implement optimized primitives instead, or leave the generic
- * implementation in place, or pick the atomic_xchg() based generic
- * implementation. (see asm-generic/mutex-xchg.h for details)
- *
- * Copyright 2006-2009 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#ifndef _ASM_MUTEX_H
-#define _ASM_MUTEX_H
-
-#ifndef CONFIG_SMP
-#include <asm-generic/mutex.h>
-#else
-
-static inline void
-__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
-{
- if (unlikely(atomic_dec_return(count) < 0))
- fail_fn(count);
- else
- smp_mb();
-}
-
-static inline int
-__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
-{
- if (unlikely(atomic_dec_return(count) < 0))
- return fail_fn(count);
- else {
- smp_mb();
- return 0;
- }
-}
-
-static inline void
-__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
-{
- smp_mb();
- if (unlikely(atomic_inc_return(count) <= 0))
- fail_fn(count);
-}
-
-#define __mutex_slowpath_needs_to_unlock() 1
-
-static inline int
-__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
-{
- /*
- * We have two variants here. The cmpxchg based one is the best one
- * because it never induce a false contention state. It is included
- * here because architectures using the inc/dec algorithms over the
- * xchg ones are much more likely to support cmpxchg natively.
- *
- * If not we fall back to the spinlock based variant - that is
- * just as efficient (and simpler) as a 'destructive' probing of
- * the mutex state would be.
- */
-#ifdef __HAVE_ARCH_CMPXCHG
- if (likely(atomic_cmpxchg(count, 1, 0) == 1)) {
- smp_mb();
- return 1;
- }
- return 0;
-#else
- return fail_fn(count);
-#endif
-}
-
-#endif
-
-#endif
+#include <asm-generic/mutex-dec.h>
diff --git a/arch/blackfin/include/asm/page.h b/arch/blackfin/include/asm/page.h
index d0ce975bcd48..7202404966f6 100644
--- a/arch/blackfin/include/asm/page.h
+++ b/arch/blackfin/include/asm/page.h
@@ -1,8 +1,8 @@
/*
- * Copyright 2004-2009 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
+ * Copyright 2004-2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
#ifndef _BLACKFIN_PAGE_H
#define _BLACKFIN_PAGE_H
diff --git a/arch/blackfin/include/asm/param.h b/arch/blackfin/include/asm/param.h
deleted file mode 100644
index 965d45427975..000000000000
--- a/arch/blackfin/include/asm/param.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/param.h>
diff --git a/arch/blackfin/include/asm/pda.h b/arch/blackfin/include/asm/pda.h
index d49bb261d9b7..28c2498c9c98 100644
--- a/arch/blackfin/include/asm/pda.h
+++ b/arch/blackfin/include/asm/pda.h
@@ -54,6 +54,16 @@ struct blackfin_pda { /* Per-processor Data Area */
#endif
};
+struct blackfin_initial_pda {
+ void *retx;
+#ifdef CONFIG_DEBUG_DOUBLEFAULT
+ void *dcplb_doublefault_addr;
+ void *icplb_doublefault_addr;
+ void *retx_doublefault;
+ unsigned seqstat_doublefault;
+#endif
+};
+
extern struct blackfin_pda cpu_pda[];
#endif /* __ASSEMBLY__ */
diff --git a/arch/blackfin/include/asm/percpu.h b/arch/blackfin/include/asm/percpu.h
deleted file mode 100644
index 06a959d67234..000000000000
--- a/arch/blackfin/include/asm/percpu.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/percpu.h>
diff --git a/arch/blackfin/include/asm/pgalloc.h b/arch/blackfin/include/asm/pgalloc.h
deleted file mode 100644
index f261cb7dda06..000000000000
--- a/arch/blackfin/include/asm/pgalloc.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/pgalloc.h>
diff --git a/arch/blackfin/include/asm/resource.h b/arch/blackfin/include/asm/resource.h
deleted file mode 100644
index 04bc4db8921b..000000000000
--- a/arch/blackfin/include/asm/resource.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/resource.h>
diff --git a/arch/blackfin/include/asm/scatterlist.h b/arch/blackfin/include/asm/scatterlist.h
deleted file mode 100644
index d177a1588958..000000000000
--- a/arch/blackfin/include/asm/scatterlist.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _BLACKFIN_SCATTERLIST_H
-#define _BLACKFIN_SCATTERLIST_H
-
-#include <asm-generic/scatterlist.h>
-
-#endif /* !(_BLACKFIN_SCATTERLIST_H) */
diff --git a/arch/blackfin/include/asm/sections.h b/arch/blackfin/include/asm/sections.h
index 14a3e66d9167..fbd408475725 100644
--- a/arch/blackfin/include/asm/sections.h
+++ b/arch/blackfin/include/asm/sections.h
@@ -1,8 +1,8 @@
/*
- * Copyright 2004-2009 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
+ * Copyright 2004-2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
#ifndef _BLACKFIN_SECTIONS_H
#define _BLACKFIN_SECTIONS_H
diff --git a/arch/blackfin/include/asm/sembuf.h b/arch/blackfin/include/asm/sembuf.h
deleted file mode 100644
index 7673b83cfef7..000000000000
--- a/arch/blackfin/include/asm/sembuf.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/sembuf.h>
diff --git a/arch/blackfin/include/asm/serial.h b/arch/blackfin/include/asm/serial.h
deleted file mode 100644
index a0cb0caff152..000000000000
--- a/arch/blackfin/include/asm/serial.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/serial.h>
diff --git a/arch/blackfin/include/asm/setup.h b/arch/blackfin/include/asm/setup.h
deleted file mode 100644
index 552df83f1a49..000000000000
--- a/arch/blackfin/include/asm/setup.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/setup.h>
diff --git a/arch/blackfin/include/asm/shmbuf.h b/arch/blackfin/include/asm/shmbuf.h
deleted file mode 100644
index 83c05fc2de38..000000000000
--- a/arch/blackfin/include/asm/shmbuf.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/shmbuf.h>
diff --git a/arch/blackfin/include/asm/shmparam.h b/arch/blackfin/include/asm/shmparam.h
deleted file mode 100644
index 93f30deb95d0..000000000000
--- a/arch/blackfin/include/asm/shmparam.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/shmparam.h>
diff --git a/arch/blackfin/include/asm/sigcontext.h b/arch/blackfin/include/asm/sigcontext.h
index ce4081a4d815..906bdc1f5fda 100644
--- a/arch/blackfin/include/asm/sigcontext.h
+++ b/arch/blackfin/include/asm/sigcontext.h
@@ -1,8 +1,8 @@
/*
- * Copyright 2004-2008 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
+ * Copyright 2004-2008 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
#ifndef _ASM_BLACKFIN_SIGCONTEXT_H
#define _ASM_BLACKFIN_SIGCONTEXT_H
diff --git a/arch/blackfin/include/asm/socket.h b/arch/blackfin/include/asm/socket.h
deleted file mode 100644
index 6b71384b9d8b..000000000000
--- a/arch/blackfin/include/asm/socket.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/socket.h>
diff --git a/arch/blackfin/include/asm/sockios.h b/arch/blackfin/include/asm/sockios.h
deleted file mode 100644
index def6d4746ee7..000000000000
--- a/arch/blackfin/include/asm/sockios.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/sockios.h>
diff --git a/arch/blackfin/include/asm/spinlock.h b/arch/blackfin/include/asm/spinlock.h
index 1f286e71c21f..2336093fca23 100644
--- a/arch/blackfin/include/asm/spinlock.h
+++ b/arch/blackfin/include/asm/spinlock.h
@@ -1,8 +1,8 @@
/*
- * Copyright 2004-2009 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
+ * Copyright 2004-2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
#ifndef __BFIN_SPINLOCK_H
#define __BFIN_SPINLOCK_H
diff --git a/arch/blackfin/include/asm/statfs.h b/arch/blackfin/include/asm/statfs.h
deleted file mode 100644
index 0b91fe198c20..000000000000
--- a/arch/blackfin/include/asm/statfs.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/statfs.h>
diff --git a/arch/blackfin/include/asm/termbits.h b/arch/blackfin/include/asm/termbits.h
deleted file mode 100644
index 3935b106de79..000000000000
--- a/arch/blackfin/include/asm/termbits.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/termbits.h>
diff --git a/arch/blackfin/include/asm/termios.h b/arch/blackfin/include/asm/termios.h
deleted file mode 100644
index 280d78a9d966..000000000000
--- a/arch/blackfin/include/asm/termios.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/termios.h>
diff --git a/arch/blackfin/include/asm/topology.h b/arch/blackfin/include/asm/topology.h
deleted file mode 100644
index 5428f333a02c..000000000000
--- a/arch/blackfin/include/asm/topology.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/topology.h>
diff --git a/arch/blackfin/include/asm/types.h b/arch/blackfin/include/asm/types.h
deleted file mode 100644
index b9e79bc580dd..000000000000
--- a/arch/blackfin/include/asm/types.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/types.h>
diff --git a/arch/blackfin/include/asm/ucontext.h b/arch/blackfin/include/asm/ucontext.h
deleted file mode 100644
index 9bc07b9f30fb..000000000000
--- a/arch/blackfin/include/asm/ucontext.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ucontext.h>
diff --git a/arch/blackfin/include/asm/unaligned.h b/arch/blackfin/include/asm/unaligned.h
deleted file mode 100644
index 6cecbbb2111f..000000000000
--- a/arch/blackfin/include/asm/unaligned.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/unaligned.h>
diff --git a/arch/blackfin/include/asm/user.h b/arch/blackfin/include/asm/user.h
deleted file mode 100644
index 4792a60831e4..000000000000
--- a/arch/blackfin/include/asm/user.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/user.h>
diff --git a/arch/blackfin/include/asm/xor.h b/arch/blackfin/include/asm/xor.h
deleted file mode 100644
index c82eb12a5b18..000000000000
--- a/arch/blackfin/include/asm/xor.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/xor.h>
diff --git a/arch/blackfin/kernel/Makefile b/arch/blackfin/kernel/Makefile
index d550b24d9e9b..b7bdc42fe1a3 100644
--- a/arch/blackfin/kernel/Makefile
+++ b/arch/blackfin/kernel/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_FUNCTION_TRACER) += ftrace-entry.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
CFLAGS_REMOVE_ftrace.o = -pg
+obj-$(CONFIG_HAVE_PWM) += pwm.o
obj-$(CONFIG_IPIPE) += ipipe.o
obj-$(CONFIG_BFIN_GPTIMERS) += gptimers.o
obj-$(CONFIG_CPLB_INFO) += cplbinfo.o
diff --git a/arch/blackfin/kernel/asm-offsets.c b/arch/blackfin/kernel/asm-offsets.c
index bd32c09b9349..17e35465a416 100644
--- a/arch/blackfin/kernel/asm-offsets.c
+++ b/arch/blackfin/kernel/asm-offsets.c
@@ -138,6 +138,16 @@ int main(void)
DEFINE(PDA_DF_SEQSTAT, offsetof(struct blackfin_pda, seqstat_doublefault));
DEFINE(PDA_DF_RETX, offsetof(struct blackfin_pda, retx_doublefault));
#endif
+
+ /* PDA initial management */
+ DEFINE(PDA_INIT_RETX, offsetof(struct blackfin_initial_pda, retx));
+#ifdef CONFIG_DEBUG_DOUBLEFAULT
+ DEFINE(PDA_INIT_DF_DCPLB, offsetof(struct blackfin_initial_pda, dcplb_doublefault_addr));
+ DEFINE(PDA_INIT_DF_ICPLB, offsetof(struct blackfin_initial_pda, icplb_doublefault_addr));
+ DEFINE(PDA_INIT_DF_SEQSTAT, offsetof(struct blackfin_initial_pda, seqstat_doublefault));
+ DEFINE(PDA_INIT_DF_RETX, offsetof(struct blackfin_initial_pda, retx_doublefault));
+#endif
+
#ifdef CONFIG_SMP
/* Inter-core lock (in L2 SRAM) */
DEFINE(SIZEOF_CORELOCK, sizeof(struct corelock_slot));
diff --git a/arch/blackfin/kernel/bfin_gpio.c b/arch/blackfin/kernel/bfin_gpio.c
index bcf8cf6fe412..02796b88443d 100644
--- a/arch/blackfin/kernel/bfin_gpio.c
+++ b/arch/blackfin/kernel/bfin_gpio.c
@@ -118,6 +118,9 @@ static struct str_ident {
#if defined(CONFIG_PM)
static struct gpio_port_s gpio_bank_saved[GPIO_BANK_NUM];
+# ifdef BF538_FAMILY
+static unsigned short port_fer_saved[3];
+# endif
#endif
static void gpio_error(unsigned gpio)
@@ -604,6 +607,11 @@ void bfin_gpio_pm_hibernate_suspend(void)
{
int i, bank;
+#ifdef BF538_FAMILY
+ for (i = 0; i < ARRAY_SIZE(port_fer_saved); ++i)
+ port_fer_saved[i] = *port_fer[i];
+#endif
+
for (i = 0; i < MAX_BLACKFIN_GPIOS; i += GPIO_BANKSIZE) {
bank = gpio_bank(i);
@@ -625,6 +633,10 @@ void bfin_gpio_pm_hibernate_suspend(void)
gpio_bank_saved[bank].maska = gpio_array[bank]->maska;
}
+#ifdef BFIN_SPECIAL_GPIO_BANKS
+ bfin_special_gpio_pm_hibernate_suspend();
+#endif
+
AWA_DUMMY_READ(maska);
}
@@ -632,6 +644,11 @@ void bfin_gpio_pm_hibernate_restore(void)
{
int i, bank;
+#ifdef BF538_FAMILY
+ for (i = 0; i < ARRAY_SIZE(port_fer_saved); ++i)
+ *port_fer[i] = port_fer_saved[i];
+#endif
+
for (i = 0; i < MAX_BLACKFIN_GPIOS; i += GPIO_BANKSIZE) {
bank = gpio_bank(i);
@@ -653,6 +670,11 @@ void bfin_gpio_pm_hibernate_restore(void)
gpio_array[bank]->both = gpio_bank_saved[bank].both;
gpio_array[bank]->maska = gpio_bank_saved[bank].maska;
}
+
+#ifdef BFIN_SPECIAL_GPIO_BANKS
+ bfin_special_gpio_pm_hibernate_restore();
+#endif
+
AWA_DUMMY_READ(maska);
}
@@ -691,9 +713,9 @@ void bfin_gpio_pm_hibernate_restore(void)
gpio_array[bank]->port_mux = gpio_bank_saved[bank].mux;
gpio_array[bank]->port_fer = gpio_bank_saved[bank].fer;
gpio_array[bank]->inen = gpio_bank_saved[bank].inen;
- gpio_array[bank]->dir_set = gpio_bank_saved[bank].dir;
gpio_array[bank]->data_set = gpio_bank_saved[bank].data
- | gpio_bank_saved[bank].dir;
+ & gpio_bank_saved[bank].dir;
+ gpio_array[bank]->dir_set = gpio_bank_saved[bank].dir;
}
}
#endif
diff --git a/arch/blackfin/kernel/debug-mmrs.c b/arch/blackfin/kernel/debug-mmrs.c
index fce4807ceef9..92f664826281 100644
--- a/arch/blackfin/kernel/debug-mmrs.c
+++ b/arch/blackfin/kernel/debug-mmrs.c
@@ -27,7 +27,7 @@
#define PORT_MUX BFIN_PORT_MUX
#endif
-#define _d(name, bits, addr, perms) debugfs_create_x##bits(name, perms, parent, (u##bits *)addr)
+#define _d(name, bits, addr, perms) debugfs_create_x##bits(name, perms, parent, (u##bits *)(addr))
#define d(name, bits, addr) _d(name, bits, addr, S_IRUSR|S_IWUSR)
#define d_RO(name, bits, addr) _d(name, bits, addr, S_IRUSR)
#define d_WO(name, bits, addr) _d(name, bits, addr, S_IWUSR)
@@ -223,7 +223,8 @@ bfin_debug_mmrs_dma(struct dentry *parent, unsigned long base, int num, char mdm
__DMA(CURR_DESC_PTR, curr_desc_ptr);
__DMA(CURR_ADDR, curr_addr);
__DMA(IRQ_STATUS, irq_status);
- __DMA(PERIPHERAL_MAP, peripheral_map);
+ if (strcmp(pfx, "IMDMA") != 0)
+ __DMA(PERIPHERAL_MAP, peripheral_map);
__DMA(CURR_X_COUNT, curr_x_count);
__DMA(CURR_Y_COUNT, curr_y_count);
}
@@ -277,6 +278,32 @@ bfin_debug_mmrs_gptimer(struct dentry *parent, unsigned long base, int num)
}
#define GPTIMER(num) bfin_debug_mmrs_gptimer(parent, TIMER##num##_CONFIG, num)
+#define GPTIMER_GROUP_OFF(mmr) REGS_OFF(gptimer_group, mmr)
+#define __GPTIMER_GROUP(uname, lname) __REGS(gptimer_group, #uname, lname)
+static void __init __maybe_unused
+bfin_debug_mmrs_gptimer_group(struct dentry *parent, unsigned long base, int num)
+{
+ char buf[32], *_buf;
+
+ if (num == -1) {
+ _buf = buf + sprintf(buf, "TIMER_");
+ __GPTIMER_GROUP(ENABLE, enable);
+ __GPTIMER_GROUP(DISABLE, disable);
+ __GPTIMER_GROUP(STATUS, status);
+ } else {
+ /* These MMRs are a bit odd as the group # is a suffix */
+ _buf = buf + sprintf(buf, "TIMER_ENABLE%i", num);
+ d(buf, 16, base + GPTIMER_GROUP_OFF(enable));
+
+ _buf = buf + sprintf(buf, "TIMER_DISABLE%i", num);
+ d(buf, 16, base + GPTIMER_GROUP_OFF(disable));
+
+ _buf = buf + sprintf(buf, "TIMER_STATUS%i", num);
+ d(buf, 32, base + GPTIMER_GROUP_OFF(status));
+ }
+}
+#define GPTIMER_GROUP(mmr, num) bfin_debug_mmrs_gptimer_group(parent, mmr, num)
+
/*
* Handshake MDMA
*/
@@ -296,6 +323,29 @@ bfin_debug_mmrs_hmdma(struct dentry *parent, unsigned long base, int num)
#define HMDMA(num) bfin_debug_mmrs_hmdma(parent, HMDMA##num##_CONTROL, num)
/*
+ * Peripheral Interrupts (PINT/GPIO)
+ */
+#ifdef PINT0_MASK_SET
+#define __PINT(uname, lname) __REGS(pint, #uname, lname)
+static void __init __maybe_unused
+bfin_debug_mmrs_pint(struct dentry *parent, unsigned long base, int num)
+{
+ char buf[32], *_buf = REGS_STR_PFX(buf, PINT, num);
+ __PINT(MASK_SET, mask_set);
+ __PINT(MASK_CLEAR, mask_clear);
+ __PINT(REQUEST, request);
+ __PINT(ASSIGN, assign);
+ __PINT(EDGE_SET, edge_set);
+ __PINT(EDGE_CLEAR, edge_clear);
+ __PINT(INVERT_SET, invert_set);
+ __PINT(INVERT_CLEAR, invert_clear);
+ __PINT(PINSTATE, pinstate);
+ __PINT(LATCH, latch);
+}
+#define PINT(num) bfin_debug_mmrs_pint(parent, PINT##num##_MASK_SET, num)
+#endif
+
+/*
* Port/GPIO
*/
#define bfin_gpio_regs gpio_port_t
@@ -747,7 +797,7 @@ static int __init bfin_debug_mmrs_init(void)
#endif
parent = debugfs_create_dir("dmac", top);
-#ifdef DMA_TC_CNT
+#ifdef DMAC_TC_CNT
D16(DMAC_TC_CNT);
D16(DMAC_TC_PER);
#endif
@@ -1005,29 +1055,19 @@ static int __init bfin_debug_mmrs_init(void)
#endif
parent = debugfs_create_dir("gptimer", top);
-#ifdef TIMER_DISABLE
- D16(TIMER_DISABLE);
- D16(TIMER_ENABLE);
- D32(TIMER_STATUS);
+#ifdef TIMER_ENABLE
+ GPTIMER_GROUP(TIMER_ENABLE, -1);
#endif
-#ifdef TIMER_DISABLE0
- D16(TIMER_DISABLE0);
- D16(TIMER_ENABLE0);
- D32(TIMER_STATUS0);
+#ifdef TIMER_ENABLE0
+ GPTIMER_GROUP(TIMER_ENABLE0, 0);
#endif
-#ifdef TIMER_DISABLE1
- D16(TIMER_DISABLE1);
- D16(TIMER_ENABLE1);
- D32(TIMER_STATUS1);
+#ifdef TIMER_ENABLE1
+ GPTIMER_GROUP(TIMER_ENABLE1, 1);
#endif
/* XXX: Should convert BF561 MMR names */
#ifdef TMRS4_DISABLE
- D16(TMRS4_DISABLE);
- D16(TMRS4_ENABLE);
- D32(TMRS4_STATUS);
- D16(TMRS8_DISABLE);
- D16(TMRS8_ENABLE);
- D32(TMRS8_STATUS);
+ GPTIMER_GROUP(TMRS4_ENABLE, 0);
+ GPTIMER_GROUP(TMRS8_ENABLE, 1);
#endif
GPTIMER(0);
GPTIMER(1);
@@ -1253,6 +1293,14 @@ static int __init bfin_debug_mmrs_init(void)
D32(OTP_DATA3);
#endif
+#ifdef PINT0_MASK_SET
+ parent = debugfs_create_dir("pint", top);
+ PINT(0);
+ PINT(1);
+ PINT(2);
+ PINT(3);
+#endif
+
#ifdef PIXC_CTL
parent = debugfs_create_dir("pixc", top);
D16(PIXC_CTL);
@@ -1816,7 +1864,6 @@ static int __init bfin_debug_mmrs_init(void)
{
int num;
unsigned long base;
- char *_buf, buf[32];
base = PORTA_FER;
for (num = 0; num < 10; ++num) {
@@ -1824,24 +1871,6 @@ static int __init bfin_debug_mmrs_init(void)
base += sizeof(struct bfin_gpio_regs);
}
-#define __PINT(uname, lname) __REGS(pint, #uname, lname)
- parent = debugfs_create_dir("pint", top);
- base = PINT0_MASK_SET;
- for (num = 0; num < 4; ++num) {
- _buf = REGS_STR_PFX(buf, PINT, num);
- __PINT(MASK_SET, mask_set);
- __PINT(MASK_CLEAR, mask_clear);
- __PINT(IRQ, irq);
- __PINT(ASSIGN, assign);
- __PINT(EDGE_SET, edge_set);
- __PINT(EDGE_CLEAR, edge_clear);
- __PINT(INVERT_SET, invert_set);
- __PINT(INVERT_CLEAR, invert_clear);
- __PINT(PINSTATE, pinstate);
- __PINT(LATCH, latch);
- base += sizeof(struct bfin_pint_regs);
- }
-
}
#endif /* BF54x */
diff --git a/arch/blackfin/kernel/gptimers.c b/arch/blackfin/kernel/gptimers.c
index 8b81dc04488a..06459f4bf43a 100644
--- a/arch/blackfin/kernel/gptimers.c
+++ b/arch/blackfin/kernel/gptimers.c
@@ -25,49 +25,33 @@
#define BFIN_TIMER_NUM_GROUP (BFIN_TIMER_OCTET(MAX_BLACKFIN_GPTIMERS - 1) + 1)
-typedef struct {
- uint16_t config;
- uint16_t __pad;
- uint32_t counter;
- uint32_t period;
- uint32_t width;
-} GPTIMER_timer_regs;
-
-typedef struct {
- uint16_t enable;
- uint16_t __pad0;
- uint16_t disable;
- uint16_t __pad1;
- uint32_t status;
-} GPTIMER_group_regs;
-
-static volatile GPTIMER_timer_regs *const timer_regs[MAX_BLACKFIN_GPTIMERS] =
+static struct bfin_gptimer_regs * const timer_regs[MAX_BLACKFIN_GPTIMERS] =
{
- (GPTIMER_timer_regs *)TIMER0_CONFIG,
- (GPTIMER_timer_regs *)TIMER1_CONFIG,
- (GPTIMER_timer_regs *)TIMER2_CONFIG,
+ (void *)TIMER0_CONFIG,
+ (void *)TIMER1_CONFIG,
+ (void *)TIMER2_CONFIG,
#if (MAX_BLACKFIN_GPTIMERS > 3)
- (GPTIMER_timer_regs *)TIMER3_CONFIG,
- (GPTIMER_timer_regs *)TIMER4_CONFIG,
- (GPTIMER_timer_regs *)TIMER5_CONFIG,
- (GPTIMER_timer_regs *)TIMER6_CONFIG,
- (GPTIMER_timer_regs *)TIMER7_CONFIG,
+ (void *)TIMER3_CONFIG,
+ (void *)TIMER4_CONFIG,
+ (void *)TIMER5_CONFIG,
+ (void *)TIMER6_CONFIG,
+ (void *)TIMER7_CONFIG,
# if (MAX_BLACKFIN_GPTIMERS > 8)
- (GPTIMER_timer_regs *)TIMER8_CONFIG,
- (GPTIMER_timer_regs *)TIMER9_CONFIG,
- (GPTIMER_timer_regs *)TIMER10_CONFIG,
+ (void *)TIMER8_CONFIG,
+ (void *)TIMER9_CONFIG,
+ (void *)TIMER10_CONFIG,
# if (MAX_BLACKFIN_GPTIMERS > 11)
- (GPTIMER_timer_regs *)TIMER11_CONFIG,
+ (void *)TIMER11_CONFIG,
# endif
# endif
#endif
};
-static volatile GPTIMER_group_regs *const group_regs[BFIN_TIMER_NUM_GROUP] =
+static struct bfin_gptimer_group_regs * const group_regs[BFIN_TIMER_NUM_GROUP] =
{
- (GPTIMER_group_regs *)TIMER0_GROUP_REG,
+ (void *)TIMER0_GROUP_REG,
#if (MAX_BLACKFIN_GPTIMERS > 8)
- (GPTIMER_group_regs *)TIMER8_GROUP_REG,
+ (void *)TIMER8_GROUP_REG,
#endif
};
@@ -140,7 +124,7 @@ static uint32_t const timil_mask[MAX_BLACKFIN_GPTIMERS] =
void set_gptimer_pwidth(unsigned int timer_id, uint32_t value)
{
tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
- timer_regs[timer_id]->width = value;
+ bfin_write(&timer_regs[timer_id]->width, value);
SSYNC();
}
EXPORT_SYMBOL(set_gptimer_pwidth);
@@ -148,14 +132,14 @@ EXPORT_SYMBOL(set_gptimer_pwidth);
uint32_t get_gptimer_pwidth(unsigned int timer_id)
{
tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
- return timer_regs[timer_id]->width;
+ return bfin_read(&timer_regs[timer_id]->width);
}
EXPORT_SYMBOL(get_gptimer_pwidth);
void set_gptimer_period(unsigned int timer_id, uint32_t period)
{
tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
- timer_regs[timer_id]->period = period;
+ bfin_write(&timer_regs[timer_id]->period, period);
SSYNC();
}
EXPORT_SYMBOL(set_gptimer_period);
@@ -163,71 +147,76 @@ EXPORT_SYMBOL(set_gptimer_period);
uint32_t get_gptimer_period(unsigned int timer_id)
{
tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
- return timer_regs[timer_id]->period;
+ return bfin_read(&timer_regs[timer_id]->period);
}
EXPORT_SYMBOL(get_gptimer_period);
uint32_t get_gptimer_count(unsigned int timer_id)
{
tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
- return timer_regs[timer_id]->counter;
+ return bfin_read(&timer_regs[timer_id]->counter);
}
EXPORT_SYMBOL(get_gptimer_count);
uint32_t get_gptimer_status(unsigned int group)
{
tassert(group < BFIN_TIMER_NUM_GROUP);
- return group_regs[group]->status;
+ return bfin_read(&group_regs[group]->status);
}
EXPORT_SYMBOL(get_gptimer_status);
void set_gptimer_status(unsigned int group, uint32_t value)
{
tassert(group < BFIN_TIMER_NUM_GROUP);
- group_regs[group]->status = value;
+ bfin_write(&group_regs[group]->status, value);
SSYNC();
}
EXPORT_SYMBOL(set_gptimer_status);
+static uint32_t read_gptimer_status(unsigned int timer_id)
+{
+ return bfin_read(&group_regs[BFIN_TIMER_OCTET(timer_id)]->status);
+}
+
int get_gptimer_intr(unsigned int timer_id)
{
tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
- return !!(group_regs[BFIN_TIMER_OCTET(timer_id)]->status & timil_mask[timer_id]);
+ return !!(read_gptimer_status(timer_id) & timil_mask[timer_id]);
}
EXPORT_SYMBOL(get_gptimer_intr);
void clear_gptimer_intr(unsigned int timer_id)
{
tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
- group_regs[BFIN_TIMER_OCTET(timer_id)]->status = timil_mask[timer_id];
+ bfin_write(&group_regs[BFIN_TIMER_OCTET(timer_id)]->status, timil_mask[timer_id]);
}
EXPORT_SYMBOL(clear_gptimer_intr);
int get_gptimer_over(unsigned int timer_id)
{
tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
- return !!(group_regs[BFIN_TIMER_OCTET(timer_id)]->status & tovf_mask[timer_id]);
+ return !!(read_gptimer_status(timer_id) & tovf_mask[timer_id]);
}
EXPORT_SYMBOL(get_gptimer_over);
void clear_gptimer_over(unsigned int timer_id)
{
tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
- group_regs[BFIN_TIMER_OCTET(timer_id)]->status = tovf_mask[timer_id];
+ bfin_write(&group_regs[BFIN_TIMER_OCTET(timer_id)]->status, tovf_mask[timer_id]);
}
EXPORT_SYMBOL(clear_gptimer_over);
int get_gptimer_run(unsigned int timer_id)
{
tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
- return !!(group_regs[BFIN_TIMER_OCTET(timer_id)]->status & trun_mask[timer_id]);
+ return !!(read_gptimer_status(timer_id) & trun_mask[timer_id]);
}
EXPORT_SYMBOL(get_gptimer_run);
void set_gptimer_config(unsigned int timer_id, uint16_t config)
{
tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
- timer_regs[timer_id]->config = config;
+ bfin_write(&timer_regs[timer_id]->config, config);
SSYNC();
}
EXPORT_SYMBOL(set_gptimer_config);
@@ -235,7 +224,7 @@ EXPORT_SYMBOL(set_gptimer_config);
uint16_t get_gptimer_config(unsigned int timer_id)
{
tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
- return timer_regs[timer_id]->config;
+ return bfin_read(&timer_regs[timer_id]->config);
}
EXPORT_SYMBOL(get_gptimer_config);
@@ -244,7 +233,7 @@ void enable_gptimers(uint16_t mask)
int i;
tassert((mask & ~BLACKFIN_GPTIMER_IDMASK) == 0);
for (i = 0; i < BFIN_TIMER_NUM_GROUP; ++i) {
- group_regs[i]->enable = mask & 0xFF;
+ bfin_write(&group_regs[i]->enable, mask & 0xFF);
mask >>= 8;
}
SSYNC();
@@ -257,7 +246,7 @@ static void _disable_gptimers(uint16_t mask)
uint16_t m = mask;
tassert((mask & ~BLACKFIN_GPTIMER_IDMASK) == 0);
for (i = 0; i < BFIN_TIMER_NUM_GROUP; ++i) {
- group_regs[i]->disable = m & 0xFF;
+ bfin_write(&group_regs[i]->disable, m & 0xFF);
m >>= 8;
}
}
@@ -268,7 +257,7 @@ void disable_gptimers(uint16_t mask)
_disable_gptimers(mask);
for (i = 0; i < MAX_BLACKFIN_GPTIMERS; ++i)
if (mask & (1 << i))
- group_regs[BFIN_TIMER_OCTET(i)]->status = trun_mask[i];
+ bfin_write(&group_regs[BFIN_TIMER_OCTET(i)]->status, trun_mask[i]);
SSYNC();
}
EXPORT_SYMBOL(disable_gptimers);
@@ -283,7 +272,7 @@ EXPORT_SYMBOL(disable_gptimers_sync);
void set_gptimer_pulse_hi(unsigned int timer_id)
{
tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
- timer_regs[timer_id]->config |= TIMER_PULSE_HI;
+ bfin_write_or(&timer_regs[timer_id]->config, TIMER_PULSE_HI);
SSYNC();
}
EXPORT_SYMBOL(set_gptimer_pulse_hi);
@@ -291,7 +280,7 @@ EXPORT_SYMBOL(set_gptimer_pulse_hi);
void clear_gptimer_pulse_hi(unsigned int timer_id)
{
tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
- timer_regs[timer_id]->config &= ~TIMER_PULSE_HI;
+ bfin_write_and(&timer_regs[timer_id]->config, ~TIMER_PULSE_HI);
SSYNC();
}
EXPORT_SYMBOL(clear_gptimer_pulse_hi);
@@ -301,7 +290,7 @@ uint16_t get_enabled_gptimers(void)
int i;
uint16_t result = 0;
for (i = 0; i < BFIN_TIMER_NUM_GROUP; ++i)
- result |= (group_regs[i]->enable << (i << 3));
+ result |= (bfin_read(&group_regs[i]->enable) << (i << 3));
return result;
}
EXPORT_SYMBOL(get_enabled_gptimers);
diff --git a/arch/blackfin/kernel/module.c b/arch/blackfin/kernel/module.c
index 35e350cad9d9..4489efc52883 100644
--- a/arch/blackfin/kernel/module.c
+++ b/arch/blackfin/kernel/module.c
@@ -16,19 +16,6 @@
#include <asm/cacheflush.h>
#include <asm/uaccess.h>
-void *module_alloc(unsigned long size)
-{
- if (size == 0)
- return NULL;
- return vmalloc(size);
-}
-
-/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
-{
- vfree(module_region);
-}
-
/* Transfer the section to the L1 memory */
int
module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
@@ -150,14 +137,6 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
return 0;
}
-int
-apply_relocate(Elf_Shdr * sechdrs, const char *strtab,
- unsigned int symindex, unsigned int relsec, struct module *mod)
-{
- pr_err(".rel unsupported\n");
- return -ENOEXEC;
-}
-
/*************************************************************************/
/* FUNCTION : apply_relocate_add */
/* ABSTRACT : Blackfin specific relocation handling for the loadable */
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index 6a660fa921b5..6a80a9e9fc4a 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -140,7 +140,6 @@ EXPORT_SYMBOL(kernel_thread);
*/
void start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
{
- set_fs(USER_DS);
regs->pc = new_ip;
if (current->mm)
regs->p5 = current->mm->start_data;
diff --git a/arch/blackfin/kernel/pwm.c b/arch/blackfin/kernel/pwm.c
new file mode 100644
index 000000000000..33f5942733bd
--- /dev/null
+++ b/arch/blackfin/kernel/pwm.c
@@ -0,0 +1,100 @@
+/*
+ * Blackfin Pulse Width Modulation (PWM) core
+ *
+ * Copyright (c) 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/pwm.h>
+#include <linux/slab.h>
+
+#include <asm/gptimers.h>
+#include <asm/portmux.h>
+
+struct pwm_device {
+ unsigned id;
+ unsigned short pin;
+};
+
+static const unsigned short pwm_to_gptimer_per[] = {
+ P_TMR0, P_TMR1, P_TMR2, P_TMR3, P_TMR4, P_TMR5,
+ P_TMR6, P_TMR7, P_TMR8, P_TMR9, P_TMR10, P_TMR11,
+};
+
+struct pwm_device *pwm_request(int pwm_id, const char *label)
+{
+ struct pwm_device *pwm;
+ int ret;
+
+ /* XXX: pwm_id really should be unsigned */
+ if (pwm_id < 0)
+ return NULL;
+
+ pwm = kzalloc(sizeof(*pwm), GFP_KERNEL);
+ if (!pwm)
+ return pwm;
+
+ pwm->id = pwm_id;
+ if (pwm->id >= ARRAY_SIZE(pwm_to_gptimer_per))
+ goto err;
+
+ pwm->pin = pwm_to_gptimer_per[pwm->id];
+ ret = peripheral_request(pwm->pin, label);
+ if (ret)
+ goto err;
+
+ return pwm;
+ err:
+ kfree(pwm);
+ return NULL;
+}
+EXPORT_SYMBOL(pwm_request);
+
+void pwm_free(struct pwm_device *pwm)
+{
+ peripheral_free(pwm->pin);
+ kfree(pwm);
+}
+EXPORT_SYMBOL(pwm_free);
+
+int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
+{
+ unsigned long period, duty;
+ unsigned long long val;
+
+ if (duty_ns < 0 || duty_ns > period_ns)
+ return -EINVAL;
+
+ val = (unsigned long long)get_sclk() * period_ns;
+ do_div(val, NSEC_PER_SEC);
+ period = val;
+
+ val = (unsigned long long)period * duty_ns;
+ do_div(val, period_ns);
+ duty = period - val;
+
+ if (duty >= period)
+ duty = period - 1;
+
+ set_gptimer_config(pwm->id, TIMER_MODE_PWM | TIMER_PERIOD_CNT);
+ set_gptimer_pwidth(pwm->id, duty);
+ set_gptimer_period(pwm->id, period);
+
+ return 0;
+}
+EXPORT_SYMBOL(pwm_config);
+
+int pwm_enable(struct pwm_device *pwm)
+{
+ enable_gptimer(pwm->id);
+ return 0;
+}
+EXPORT_SYMBOL(pwm_enable);
+
+void pwm_disable(struct pwm_device *pwm)
+{
+ disable_gptimer(pwm->id);
+}
+EXPORT_SYMBOL(pwm_disable);
diff --git a/arch/blackfin/kernel/reboot.c b/arch/blackfin/kernel/reboot.c
index 488bdc51aaa5..c4c0081b1996 100644
--- a/arch/blackfin/kernel/reboot.c
+++ b/arch/blackfin/kernel/reboot.c
@@ -54,7 +54,9 @@ static void bfin_reset(void)
/* The BF526 ROM will crash during reset */
#if defined(__ADSPBF522__) || defined(__ADSPBF524__) || defined(__ADSPBF526__)
- bfin_read_SWRST();
+ /* Seems to be fixed with newer parts though ... */
+ if (__SILICON_REVISION__ < 1 && bfin_revid() < 1)
+ bfin_read_SWRST();
#endif
/* Wait for the SWRST write to complete. Cannot rely on SSYNC
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index 536bd9d7e0cf..dfa2525a442d 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -54,8 +54,7 @@ EXPORT_SYMBOL(mtd_size);
#endif
char __initdata command_line[COMMAND_LINE_SIZE];
-void __initdata *init_retx, *init_saved_retx, *init_saved_seqstat,
- *init_saved_icplb_fault_addr, *init_saved_dcplb_fault_addr;
+struct blackfin_initial_pda __initdata initial_pda;
/* boot memmap, for parsing "memmap=" */
#define BFIN_MEMMAP_MAX 128 /* number of entries in bfin_memmap */
@@ -957,13 +956,16 @@ void __init setup_arch(char **cmdline_p)
printk(KERN_EMERG "Recovering from DOUBLE FAULT event\n");
#ifdef CONFIG_DEBUG_DOUBLEFAULT
/* We assume the crashing kernel, and the current symbol table match */
- printk(KERN_EMERG " While handling exception (EXCAUSE = 0x%x) at %pF\n",
- (int)init_saved_seqstat & SEQSTAT_EXCAUSE, init_saved_retx);
- printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %pF\n", init_saved_dcplb_fault_addr);
- printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %pF\n", init_saved_icplb_fault_addr);
+ printk(KERN_EMERG " While handling exception (EXCAUSE = %#x) at %pF\n",
+ initial_pda.seqstat_doublefault & SEQSTAT_EXCAUSE,
+ initial_pda.retx_doublefault);
+ printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %pF\n",
+ initial_pda.dcplb_doublefault_addr);
+ printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %pF\n",
+ initial_pda.icplb_doublefault_addr);
#endif
printk(KERN_NOTICE " The instruction at %pF caused a double exception\n",
- init_retx);
+ initial_pda.retx);
} else if (_bfin_swrst & RESET_WDOG)
printk(KERN_INFO "Recovering from Watchdog event\n");
else if (_bfin_swrst & RESET_SOFTWARE)
diff --git a/arch/blackfin/kernel/time.c b/arch/blackfin/kernel/time.c
index 8d73724c0092..ceb2bf63dfe2 100644
--- a/arch/blackfin/kernel/time.c
+++ b/arch/blackfin/kernel/time.c
@@ -51,7 +51,7 @@ void __init setup_core_timer(void)
u32 tcount;
/* power up the timer, but don't enable it just yet */
- bfin_write_TCNTL(1);
+ bfin_write_TCNTL(TMPWR);
CSYNC();
/* the TSCALE prescaler counter */
@@ -64,7 +64,7 @@ void __init setup_core_timer(void)
/* now enable the timer */
CSYNC();
- bfin_write_TCNTL(7);
+ bfin_write_TCNTL(TAUTORLD | TMREN | TMPWR);
}
#endif
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S
index 3ac5b66d14aa..ba35864b2b74 100644
--- a/arch/blackfin/kernel/vmlinux.lds.S
+++ b/arch/blackfin/kernel/vmlinux.lds.S
@@ -155,6 +155,7 @@ SECTIONS
SECURITY_INITCALL
INIT_RAM_FS
+ . = ALIGN(PAGE_SIZE);
___per_cpu_load = .;
PERCPU_INPUT(32)
diff --git a/arch/blackfin/mach-bf518/Kconfig b/arch/blackfin/mach-bf518/Kconfig
index 1d9f631a7f94..bde92a19970e 100644
--- a/arch/blackfin/mach-bf518/Kconfig
+++ b/arch/blackfin/mach-bf518/Kconfig
@@ -11,55 +11,75 @@ menu "BF518 Specific Configuration"
comment "Alternative Multiplexing Scheme"
choice
- prompt "SPORT0"
- default BF518_SPORT0_PORTG
+ prompt "PWM Channel Pins"
+ default BF518_PWM_ALL_PORTF
help
- Select PORT used for SPORT0. See Hardware Reference Manual
+ Select pins used for the PWM channels:
+ PWM_AH PWM_AL PWM_BH PWM_BL PWM_CH PWM_CL
-config BF518_SPORT0_PORTF
- bool "PORT F"
+ See the Hardware Reference Manual for more details.
+
+config BF518_PWM_ALL_PORTF
+ bool "PF1 - PF6"
help
- PORT F
+ PF{1,2,3,4,5,6} <-> PWM_{AH,AL,BH,BL,CH,CL}
-config BF518_SPORT0_PORTG
- bool "PORT G"
+config BF518_PWM_PORTF_PORTG
+ bool "PF11 - PF14 / PG1 - PG2"
help
- PORT G
+ PF{11,12,13,14} <-> PWM_{AH,AL,BH,BL}
+ PG{1,2} <-> PWM_{CH,CL}
+
endchoice
choice
- prompt "SPORT0 TSCLK Location"
- depends on BF518_SPORT0_PORTG
- default BF518_SPORT0_TSCLK_PG10
+ prompt "PWM Sync Pin"
+ default BF518_PWM_SYNC_PF7
help
- Select PIN used for SPORT0_TSCLK. See Hardware Reference Manual
+ Select the pin used for PWM_SYNC.
-config BF518_SPORT0_TSCLK_PG10
- bool "PORT PG10"
- help
- PORT PG10
+ See the Hardware Reference Manual for more details.
+
+config BF518_PWM_SYNC_PF7
+ bool "PF7"
+config BF518_PWM_SYNC_PF15
+ bool "PF15"
+endchoice
-config BF518_SPORT0_TSCLK_PG14
- bool "PORT PG14"
+choice
+ prompt "PWM Trip B Pin"
+ default BF518_PWM_TRIPB_PG10
help
- PORT PG14
+ Select the pin used for PWM_TRIPB.
+
+ See the Hardware Reference Manual for more details.
+
+config BF518_PWM_TRIPB_PG10
+ bool "PG10"
+config BF518_PWM_TRIPB_PG14
+ bool "PG14"
endchoice
choice
- prompt "UART1"
- default BF518_UART1_PORTF
+ prompt "PPI / Timer Pins"
+ default BF518_PPI_TMR_PG5
help
- Select PORT used for UART1. See Hardware Reference Manual
+ Select pins used for PPI/Timer:
+ PPICLK PPIFS1 PPIFS2
+ TMRCLK TMR0 TMR1
-config BF518_UART1_PORTF
- bool "PORT F"
+ See the Hardware Reference Manual for more details.
+
+config BF518_PPI_TMR_PG5
+ bool "PG5 - PG7"
help
- PORT F
+ PG{5,6,7} <-> {PPICLK/TMRCLK,TMR0/PPIFS1,TMR1/PPIFS2}
-config BF518_UART1_PORTG
- bool "PORT G"
+config BF518_PPI_TMR_PG12
+ bool "PG12 - PG14"
help
- PORT G
+ PG{12,13,14} <-> {PPICLK/TMRCLK,TMR0/PPIFS1,TMR1/PPIFS2}
+
endchoice
comment "Hysteresis/Schmitt Trigger Control"
diff --git a/arch/blackfin/mach-bf518/boards/ezbrd.c b/arch/blackfin/mach-bf518/boards/ezbrd.c
index c0ccadcfa44e..d78fc2cc7d16 100644
--- a/arch/blackfin/mach-bf518/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf518/boards/ezbrd.c
@@ -187,43 +187,16 @@ static struct flash_platform_data bfin_spi_flash_data = {
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
};
#endif
-#if defined(CONFIG_BFIN_SPI_ADC) \
- || defined(CONFIG_BFIN_SPI_ADC_MODULE)
-/* SPI ADC chip */
-static struct bfin5xx_spi_chip spi_adc_chip_info = {
- .enable_dma = 1, /* use dma transfer with this chip*/
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
-#if defined(CONFIG_NET_DSA_KSZ8893M) \
- || defined(CONFIG_NET_DSA_KSZ8893M_MODULE)
-/* SPI SWITCH CHIP */
-static struct bfin5xx_spi_chip spi_switch_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
-};
-#endif
-#endif
-
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0,
- .bits_per_word = 8,
};
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
-static struct bfin5xx_spi_chip spi_ad7877_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
-
static const struct ad7877_platform_data bfin_ad7877_ts_info = {
.model = 7877,
.vref_delay_usecs = 50, /* internal, no capacitor */
@@ -239,21 +212,6 @@ static const struct ad7877_platform_data bfin_ad7877_ts_info = {
};
#endif
-#if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \
- && defined(CONFIG_SND_SOC_WM8731_SPI)
-static struct bfin5xx_spi_chip spi_wm8731_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
-static struct bfin5xx_spi_chip spidev_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
-};
-#endif
-
static struct spi_board_info bfin_spi_board_info[] __initdata = {
#if defined(CONFIG_MTD_M25P80) \
|| defined(CONFIG_MTD_M25P80_MODULE)
@@ -269,18 +227,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
},
#endif
-#if defined(CONFIG_BFIN_SPI_ADC) \
- || defined(CONFIG_BFIN_SPI_ADC_MODULE)
- {
- .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
- .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
- .bus_num = 0, /* Framework bus number */
- .chip_select = 1, /* Framework chip select. */
- .platform_data = NULL, /* No spi_driver specific config */
- .controller_data = &spi_adc_chip_info,
- },
-#endif
-
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
#if defined(CONFIG_NET_DSA_KSZ8893M) \
|| defined(CONFIG_NET_DSA_KSZ8893M_MODULE)
@@ -290,7 +236,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.bus_num = 0,
.chip_select = 1,
.platform_data = NULL,
- .controller_data = &spi_switch_info,
.mode = SPI_MODE_3,
},
#endif
@@ -314,7 +259,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 2,
- .controller_data = &spi_ad7877_chip_info,
},
#endif
#if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \
@@ -324,7 +268,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 5,
- .controller_data = &spi_wm8731_chip_info,
.mode = SPI_MODE_0,
},
#endif
@@ -334,7 +277,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
- .controller_data = &spidev_chip_info,
},
#endif
#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
@@ -343,7 +285,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
- .controller_data = &lq035q1_spi_chip_info,
.mode = SPI_CPHA | SPI_CPOL,
},
#endif
diff --git a/arch/blackfin/mach-bf518/boards/tcm-bf518.c b/arch/blackfin/mach-bf518/boards/tcm-bf518.c
index 50fc5c89e379..55c127908815 100644
--- a/arch/blackfin/mach-bf518/boards/tcm-bf518.c
+++ b/arch/blackfin/mach-bf518/boards/tcm-bf518.c
@@ -138,32 +138,16 @@ static struct flash_platform_data bfin_spi_flash_data = {
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_BFIN_SPI_ADC) \
- || defined(CONFIG_BFIN_SPI_ADC_MODULE)
-/* SPI ADC chip */
-static struct bfin5xx_spi_chip spi_adc_chip_info = {
- .enable_dma = 1, /* use dma transfer with this chip*/
- .bits_per_word = 16,
};
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0,
- .bits_per_word = 8,
};
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
-static struct bfin5xx_spi_chip spi_ad7877_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
-
static const struct ad7877_platform_data bfin_ad7877_ts_info = {
.model = 7877,
.vref_delay_usecs = 50, /* internal, no capacitor */
@@ -179,21 +163,6 @@ static const struct ad7877_platform_data bfin_ad7877_ts_info = {
};
#endif
-#if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \
- && defined(CONFIG_SND_SOC_WM8731_SPI)
-static struct bfin5xx_spi_chip spi_wm8731_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
-static struct bfin5xx_spi_chip spidev_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
-};
-#endif
-
static struct spi_board_info bfin_spi_board_info[] __initdata = {
#if defined(CONFIG_MTD_M25P80) \
|| defined(CONFIG_MTD_M25P80_MODULE)
@@ -209,18 +178,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
},
#endif
-#if defined(CONFIG_BFIN_SPI_ADC) \
- || defined(CONFIG_BFIN_SPI_ADC_MODULE)
- {
- .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
- .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
- .bus_num = 0, /* Framework bus number */
- .chip_select = 1, /* Framework chip select. */
- .platform_data = NULL, /* No spi_driver specific config */
- .controller_data = &spi_adc_chip_info,
- },
-#endif
-
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
{
.modalias = "mmc_spi",
@@ -239,7 +196,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 2,
- .controller_data = &spi_ad7877_chip_info,
},
#endif
#if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \
@@ -249,7 +205,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 5,
- .controller_data = &spi_wm8731_chip_info,
.mode = SPI_MODE_0,
},
#endif
@@ -259,7 +214,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
- .controller_data = &spidev_chip_info,
},
#endif
#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
@@ -268,7 +222,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
- .controller_data = &lq035q1_spi_chip_info,
.mode = SPI_CPHA | SPI_CPOL,
},
#endif
diff --git a/arch/blackfin/mach-bf518/include/mach/anomaly.h b/arch/blackfin/mach-bf518/include/mach/anomaly.h
index d2f076fbbc9e..56383f7cbc07 100644
--- a/arch/blackfin/mach-bf518/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf518/include/mach/anomaly.h
@@ -11,10 +11,9 @@
*/
/* This file should be up to date with:
- * - Revision E, 01/26/2010; ADSP-BF512/BF514/BF516/BF518 Blackfin Processor Anomaly List
+ * - Revision F, 05/23/2011; ADSP-BF512/BF514/BF516/BF518 Blackfin Processor Anomaly List
*/
-/* We plan on not supporting 0.0 silicon, but 0.1 isn't out yet - sorry */
#if __SILICON_REVISION__ < 0
# error will not work on BF518 silicon version
#endif
@@ -77,19 +76,29 @@
/* False Hardware Error when RETI Points to Invalid Memory */
#define ANOMALY_05000461 (1)
/* Synchronization Problem at Startup May Cause SPORT Transmit Channels to Misalign */
-#define ANOMALY_05000462 (1)
-/* PLL Latches Incorrect Settings During Reset */
-#define ANOMALY_05000469 (1)
+#define ANOMALY_05000462 (__SILICON_REVISION__ < 2)
/* Incorrect Default MSEL Value in PLL_CTL */
-#define ANOMALY_05000472 (1)
+#define ANOMALY_05000472 (__SILICON_REVISION__ < 2)
/* Interrupted SPORT Receive Data Register Read Results In Underflow when SLEN > 15 */
#define ANOMALY_05000473 (1)
/* TESTSET Instruction Cannot Be Interrupted */
#define ANOMALY_05000477 (1)
/* Reads of ITEST_COMMAND and ITEST_DATA Registers Cause Cache Corruption */
#define ANOMALY_05000481 (1)
-/* IFLUSH sucks at life */
+/* PLL Latches Incorrect Settings During Reset */
+#define ANOMALY_05000482 (__SILICON_REVISION__ < 2)
+/* PLL_CTL Change Using bfrom_SysControl() Can Result in Processor Overclocking */
+#define ANOMALY_05000485 (__SILICON_REVISION__ < 2)
+/* SPI Master Boot Can Fail Under Certain Conditions */
+#define ANOMALY_05000490 (1)
+/* Instruction Memory Stalls Can Cause IFLUSH to Fail */
#define ANOMALY_05000491 (1)
+/* EXCPT Instruction May Be Lost If NMI Happens Simultaneously */
+#define ANOMALY_05000494 (1)
+/* CNT_COMMAND Functionality Depends on CNT_IMASK Configuration */
+#define ANOMALY_05000498 (1)
+/* RXS Bit in SPI_STAT May Become Stuck In RX DMA Modes */
+#define ANOMALY_05000501 (1)
/* Anomalies that don't exist on this proc */
#define ANOMALY_05000099 (0)
@@ -157,6 +166,5 @@
#define ANOMALY_05000474 (0)
#define ANOMALY_05000475 (0)
#define ANOMALY_05000480 (0)
-#define ANOMALY_05000485 (0)
#endif
diff --git a/arch/blackfin/mach-bf518/include/mach/portmux.h b/arch/blackfin/mach-bf518/include/mach/portmux.h
index cd84a569b04e..b3b806f468da 100644
--- a/arch/blackfin/mach-bf518/include/mach/portmux.h
+++ b/arch/blackfin/mach-bf518/include/mach/portmux.h
@@ -81,9 +81,15 @@
#define P_PPI0_D14 (P_DEFINED | P_IDENT(GPIO_PF14) | P_FUNCT(1))
#define P_PPI0_D15 (P_DEFINED | P_IDENT(GPIO_PF15) | P_FUNCT(1))
+#ifndef CONFIG_BF518_PPI_TMR_PG12
+#define P_PPI0_CLK (P_DEFINED | P_IDENT(GPIO_PG5) | P_FUNCT(1))
+#define P_PPI0_FS1 (P_DEFINED | P_IDENT(GPIO_PG6) | P_FUNCT(1))
+#define P_PPI0_FS2 (P_DEFINED | P_IDENT(GPIO_PG7) | P_FUNCT(1))
+#else
#define P_PPI0_CLK (P_DEFINED | P_IDENT(GPIO_PG12) | P_FUNCT(1))
#define P_PPI0_FS1 (P_DEFINED | P_IDENT(GPIO_PG13) | P_FUNCT(1))
#define P_PPI0_FS2 (P_DEFINED | P_IDENT(GPIO_PG14) | P_FUNCT(1))
+#endif
#define P_PPI0_FS3 (P_DEFINED | P_IDENT(GPIO_PG15) | P_FUNCT(1))
/* SPI Port Mux */
@@ -139,9 +145,15 @@
#define P_UART1_RX (P_DEFINED | P_IDENT(GPIO_PH7) | P_FUNCT(1))
/* Timer */
+#ifndef CONFIG_BF518_PPI_TMR_PG12
#define P_TMRCLK (P_DEFINED | P_IDENT(GPIO_PG5) | P_FUNCT(2))
#define P_TMR0 (P_DEFINED | P_IDENT(GPIO_PG6) | P_FUNCT(2))
#define P_TMR1 (P_DEFINED | P_IDENT(GPIO_PG7) | P_FUNCT(2))
+#else
+#define P_TMRCLK (P_DEFINED | P_IDENT(GPIO_PG12) | P_FUNCT(2))
+#define P_TMR0 (P_DEFINED | P_IDENT(GPIO_PG13) | P_FUNCT(2))
+#define P_TMR1 (P_DEFINED | P_IDENT(GPIO_PG14) | P_FUNCT(2))
+#endif
#define P_TMR2 (P_DEFINED | P_IDENT(GPIO_PF9) | P_FUNCT(2))
#define P_TMR3 (P_DEFINED | P_IDENT(GPIO_PF10) | P_FUNCT(2))
#define P_TMR4 (P_DEFINED | P_IDENT(GPIO_PG9) | P_FUNCT(2))
@@ -158,23 +170,33 @@
#define P_TWI0_SDA (P_DONTCARE)
/* PWM */
-#define P_PWM0_AH (P_DEFINED | P_IDENT(GPIO_PF1) | P_FUNCT(2))
-#define P_PWM0_AL (P_DEFINED | P_IDENT(GPIO_PF2) | P_FUNCT(2))
-#define P_PWM0_BH (P_DEFINED | P_IDENT(GPIO_PF3) | P_FUNCT(2))
-#define P_PWM0_BL (P_DEFINED | P_IDENT(GPIO_PF4) | P_FUNCT(2))
-#define P_PWM0_CH (P_DEFINED | P_IDENT(GPIO_PF5) | P_FUNCT(2))
-#define P_PWM0_CL (P_DEFINED | P_IDENT(GPIO_PF6) | P_FUNCT(2))
-#define P_PWM0_SYNC (P_DEFINED | P_IDENT(GPIO_PF7) | P_FUNCT(2))
-
-#define P_PWM1_AH (P_DEFINED | P_IDENT(GPIO_PF11) | P_FUNCT(2))
-#define P_PWM1_AL (P_DEFINED | P_IDENT(GPIO_PF12) | P_FUNCT(2))
-#define P_PWM1_BH (P_DEFINED | P_IDENT(GPIO_PF13) | P_FUNCT(2))
-#define P_PWM1_BL (P_DEFINED | P_IDENT(GPIO_PF14) | P_FUNCT(2))
-#define P_PWM1_CH (P_DEFINED | P_IDENT(GPIO_PG1) | P_FUNCT(2))
-#define P_PWM1_CL (P_DEFINED | P_IDENT(GPIO_PG2) | P_FUNCT(2))
-#define P_PWM1_SYNC (P_DEFINED | P_IDENT(GPIO_PF15) | P_FUNCT(2))
-
+#ifndef CONFIG_BF518_PWM_PORTF_PORTG
+#define P_PWM_AH (P_DEFINED | P_IDENT(GPIO_PF1) | P_FUNCT(2))
+#define P_PWM_AL (P_DEFINED | P_IDENT(GPIO_PF2) | P_FUNCT(2))
+#define P_PWM_BH (P_DEFINED | P_IDENT(GPIO_PF3) | P_FUNCT(2))
+#define P_PWM_BL (P_DEFINED | P_IDENT(GPIO_PF4) | P_FUNCT(2))
+#define P_PWM_CH (P_DEFINED | P_IDENT(GPIO_PF5) | P_FUNCT(2))
+#define P_PWM_CL (P_DEFINED | P_IDENT(GPIO_PF6) | P_FUNCT(2))
+#else
+#define P_PWM_AH (P_DEFINED | P_IDENT(GPIO_PF11) | P_FUNCT(2))
+#define P_PWM_AL (P_DEFINED | P_IDENT(GPIO_PF12) | P_FUNCT(2))
+#define P_PWM_BH (P_DEFINED | P_IDENT(GPIO_PF13) | P_FUNCT(2))
+#define P_PWM_BL (P_DEFINED | P_IDENT(GPIO_PF14) | P_FUNCT(2))
+#define P_PWM_CH (P_DEFINED | P_IDENT(GPIO_PG1) | P_FUNCT(2))
+#define P_PWM_CL (P_DEFINED | P_IDENT(GPIO_PG2) | P_FUNCT(2))
+#endif
+
+#ifndef CONFIG_BF518_PWM_SYNC_PF15
+#define P_PWM_SYNC (P_DEFINED | P_IDENT(GPIO_PF7) | P_FUNCT(2))
+#else
+#define P_PWM_SYNC (P_DEFINED | P_IDENT(GPIO_PF15) | P_FUNCT(2))
+#endif
+
+#ifndef CONFIG_BF518_PWM_TRIPB_PG14
+#define P_PWM_TRIPB (P_DEFINED | P_IDENT(GPIO_PG10) | P_FUNCT(2))
+#else
#define P_PWM_TRIPB (P_DEFINED | P_IDENT(GPIO_PG14) | P_FUNCT(2))
+#endif
/* RSI */
#define P_RSI_DATA0 (P_DEFINED | P_IDENT(GPIO_PG3) | P_FUNCT(1))
diff --git a/arch/blackfin/mach-bf527/boards/ad7160eval.c b/arch/blackfin/mach-bf527/boards/ad7160eval.c
index ccab4c689dc3..c04df43f6391 100644
--- a/arch/blackfin/mach-bf527/boards/ad7160eval.c
+++ b/arch/blackfin/mach-bf527/boards/ad7160eval.c
@@ -265,29 +265,12 @@ static struct flash_platform_data bfin_spi_flash_data = {
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \
- || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
-static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
};
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0,
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
-static struct bfin5xx_spi_chip spidev_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
};
#endif
@@ -328,7 +311,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 4,
- .controller_data = &ad1836_spi_chip_info,
},
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
@@ -347,7 +329,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
- .controller_data = &spidev_chip_info,
},
#endif
};
diff --git a/arch/blackfin/mach-bf527/boards/cm_bf527.c b/arch/blackfin/mach-bf527/boards/cm_bf527.c
index c9d6dc88f0e6..6400341cc230 100644
--- a/arch/blackfin/mach-bf527/boards/cm_bf527.c
+++ b/arch/blackfin/mach-bf527/boards/cm_bf527.c
@@ -354,40 +354,16 @@ static struct flash_platform_data bfin_spi_flash_data = {
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_BFIN_SPI_ADC) \
- || defined(CONFIG_BFIN_SPI_ADC_MODULE)
-/* SPI ADC chip */
-static struct bfin5xx_spi_chip spi_adc_chip_info = {
- .enable_dma = 1, /* use dma transfer with this chip*/
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \
- || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
-static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
};
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0,
- .bits_per_word = 8,
};
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
-static struct bfin5xx_spi_chip spi_ad7877_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
-
static const struct ad7877_platform_data bfin_ad7877_ts_info = {
.model = 7877,
.vref_delay_usecs = 50, /* internal, no capacitor */
@@ -403,21 +379,6 @@ static const struct ad7877_platform_data bfin_ad7877_ts_info = {
};
#endif
-#if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \
- && defined(CONFIG_SND_SOC_WM8731_SPI)
-static struct bfin5xx_spi_chip spi_wm8731_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
-static struct bfin5xx_spi_chip spidev_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
-};
-#endif
-
static struct spi_board_info bfin_spi_board_info[] __initdata = {
#if defined(CONFIG_MTD_M25P80) \
|| defined(CONFIG_MTD_M25P80_MODULE)
@@ -433,18 +394,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
},
#endif
-#if defined(CONFIG_BFIN_SPI_ADC) \
- || defined(CONFIG_BFIN_SPI_ADC_MODULE)
- {
- .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
- .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
- .bus_num = 0, /* Framework bus number */
- .chip_select = 1, /* Framework chip select. */
- .platform_data = NULL, /* No spi_driver specific config */
- .controller_data = &spi_adc_chip_info,
- },
-#endif
-
#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \
|| defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
{
@@ -452,7 +401,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 4,
- .controller_data = &ad1836_spi_chip_info,
},
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
@@ -473,7 +421,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 2,
- .controller_data = &spi_ad7877_chip_info,
},
#endif
#if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \
@@ -483,7 +430,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 5,
- .controller_data = &spi_wm8731_chip_info,
.mode = SPI_MODE_0,
},
#endif
@@ -493,7 +439,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
- .controller_data = &spidev_chip_info,
},
#endif
};
diff --git a/arch/blackfin/mach-bf527/boards/ezbrd.c b/arch/blackfin/mach-bf527/boards/ezbrd.c
index b7101aa6e3aa..6dbb1b403763 100644
--- a/arch/blackfin/mach-bf527/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf527/boards/ezbrd.c
@@ -253,32 +253,16 @@ static struct flash_platform_data bfin_spi_flash_data = {
/* SPI flash chip (sst25wf040) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_BFIN_SPI_ADC) \
- || defined(CONFIG_BFIN_SPI_ADC_MODULE)
-/* SPI ADC chip */
-static struct bfin5xx_spi_chip spi_adc_chip_info = {
- .enable_dma = 1, /* use dma transfer with this chip*/
- .bits_per_word = 16,
};
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0,
- .bits_per_word = 8,
};
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
-static struct bfin5xx_spi_chip spi_ad7877_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
-
static const struct ad7877_platform_data bfin_ad7877_ts_info = {
.model = 7877,
.vref_delay_usecs = 50, /* internal, no capacitor */
@@ -311,35 +295,6 @@ static const struct ad7879_platform_data bfin_ad7879_ts_info = {
};
#endif
-#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
-static struct bfin5xx_spi_chip spi_ad7879_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \
- && defined(CONFIG_SND_SOC_WM8731_SPI)
-static struct bfin5xx_spi_chip spi_wm8731_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
-static struct bfin5xx_spi_chip spidev_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
-static struct bfin5xx_spi_chip lq035q1_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
-};
-#endif
-
static struct spi_board_info bfin_spi_board_info[] __initdata = {
#if defined(CONFIG_MTD_M25P80) \
|| defined(CONFIG_MTD_M25P80_MODULE)
@@ -355,18 +310,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
},
#endif
-#if defined(CONFIG_BFIN_SPI_ADC) \
- || defined(CONFIG_BFIN_SPI_ADC_MODULE)
- {
- .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
- .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
- .bus_num = 0, /* Framework bus number */
- .chip_select = 1, /* Framework chip select. */
- .platform_data = NULL, /* No spi_driver specific config */
- .controller_data = &spi_adc_chip_info,
- },
-#endif
-
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
{
.modalias = "mmc_spi",
@@ -385,7 +328,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 2,
- .controller_data = &spi_ad7877_chip_info,
},
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
@@ -396,7 +338,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 5,
- .controller_data = &spi_ad7879_chip_info,
.mode = SPI_CPHA | SPI_CPOL,
},
#endif
@@ -407,7 +348,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 5,
- .controller_data = &spi_wm8731_chip_info,
.mode = SPI_MODE_0,
},
#endif
@@ -417,7 +357,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
- .controller_data = &spidev_chip_info,
},
#endif
#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
@@ -426,7 +365,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
- .controller_data = &lq035q1_spi_chip_info,
.mode = SPI_CPHA | SPI_CPOL,
},
#endif
diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c
index e67ac7720668..4e9dc9cf8241 100644
--- a/arch/blackfin/mach-bf527/boards/ezkit.c
+++ b/arch/blackfin/mach-bf527/boards/ezkit.c
@@ -409,6 +409,9 @@ static struct resource net2272_bfin_resources[] = {
.end = 0x20300000 + 0x100,
.flags = IORESOURCE_MEM,
}, {
+ .start = 1,
+ .flags = IORESOURCE_BUS,
+ }, {
.start = IRQ_PF7,
.end = IRQ_PF7,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
@@ -448,40 +451,16 @@ static struct flash_platform_data bfin_spi_flash_data = {
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_BFIN_SPI_ADC) \
- || defined(CONFIG_BFIN_SPI_ADC_MODULE)
-/* SPI ADC chip */
-static struct bfin5xx_spi_chip spi_adc_chip_info = {
- .enable_dma = 1, /* use dma transfer with this chip*/
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \
- || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
-static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
};
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0,
- .bits_per_word = 8,
};
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
-static struct bfin5xx_spi_chip spi_ad7877_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
-
static const struct ad7877_platform_data bfin_ad7877_ts_info = {
.model = 7877,
.vref_delay_usecs = 50, /* internal, no capacitor */
@@ -513,20 +492,6 @@ static const struct ad7879_platform_data bfin_ad7879_ts_info = {
};
#endif
-#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
-static struct bfin5xx_spi_chip spi_ad7879_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
-static struct bfin5xx_spi_chip spidev_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
-};
-#endif
-
#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) || \
defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE)
@@ -574,9 +539,25 @@ static struct resource bfin_snd_resources[][4] = {
BFIN_SND_RES(0),
BFIN_SND_RES(1),
};
+#endif
-static struct platform_device bfin_pcm = {
- .name = "bfin-pcm-audio",
+#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
+static struct platform_device bfin_i2s_pcm = {
+ .name = "bfin-i2s-pcm-audio",
+ .id = -1,
+};
+#endif
+
+#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE)
+static struct platform_device bfin_tdm_pcm = {
+ .name = "bfin-tdm-pcm-audio",
+ .id = -1,
+};
+#endif
+
+#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE)
+static struct platform_device bfin_ac97_pcm = {
+ .name = "bfin-ac97-pcm-audio",
.id = -1,
};
#endif
@@ -605,13 +586,6 @@ static struct platform_device bfin_tdm = {
};
#endif
-#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
-static struct bfin5xx_spi_chip lq035q1_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
-};
-#endif
-
static struct spi_board_info bfin_spi_board_info[] __initdata = {
#if defined(CONFIG_MTD_M25P80) \
|| defined(CONFIG_MTD_M25P80_MODULE)
@@ -627,18 +601,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
},
#endif
-#if defined(CONFIG_BFIN_SPI_ADC) \
- || defined(CONFIG_BFIN_SPI_ADC_MODULE)
- {
- .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
- .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
- .bus_num = 0, /* Framework bus number */
- .chip_select = 1, /* Framework chip select. */
- .platform_data = NULL, /* No spi_driver specific config */
- .controller_data = &spi_adc_chip_info,
- },
-#endif
-
#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \
|| defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
{
@@ -647,7 +609,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.bus_num = 0,
.chip_select = 4,
.platform_data = "ad1836",
- .controller_data = &ad1836_spi_chip_info,
.mode = SPI_MODE_3,
},
#endif
@@ -670,7 +631,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 2,
- .controller_data = &spi_ad7877_chip_info,
},
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
@@ -681,7 +641,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 3,
- .controller_data = &spi_ad7879_chip_info,
.mode = SPI_CPHA | SPI_CPOL,
},
#endif
@@ -691,7 +650,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
- .controller_data = &spidev_chip_info,
},
#endif
#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
@@ -700,7 +658,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 7,
- .controller_data = &lq035q1_spi_chip_info,
.mode = SPI_CPHA | SPI_CPOL,
},
#endif
@@ -1276,9 +1233,16 @@ static struct platform_device *stamp_devices[] __initdata = {
&ezkit_flash_device,
#endif
-#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) || \
- defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE)
- &bfin_pcm,
+#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
+ &bfin_i2s_pcm,
+#endif
+
+#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE)
+ &bfin_tdm_pcm,
+#endif
+
+#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE)
+ &bfin_ac97_pcm,
#endif
#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
diff --git a/arch/blackfin/mach-bf527/boards/tll6527m.c b/arch/blackfin/mach-bf527/boards/tll6527m.c
index 18d303dd5627..ec4bc7429c9f 100644
--- a/arch/blackfin/mach-bf527/boards/tll6527m.c
+++ b/arch/blackfin/mach-bf527/boards/tll6527m.c
@@ -314,29 +314,12 @@ static struct flash_platform_data bfin_spi_flash_data = {
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_BFIN_SPI_ADC) \
- || defined(CONFIG_BFIN_SPI_ADC_MODULE)
-/* SPI ADC chip */
-static struct bfin5xx_spi_chip spi_adc_chip_info = {
- .enable_dma = 0, /* use dma transfer with this chip*/
-/*
- * tll6527m V1.0 does not support native spi slave selects
- * hence DMA mode will not be useful since the ADC needs
- * CS to toggle for each sample and cs_change_per_word
- * seems to be removed from spi_bfin5xx.c
- */
- .bits_per_word = 16,
};
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0,
- .bits_per_word = 8,
};
#endif
@@ -359,21 +342,6 @@ static const struct ad7879_platform_data bfin_ad7879_ts_info = {
};
#endif
-#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) \
- || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
-static struct bfin5xx_spi_chip spi_ad7879_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
-static struct bfin5xx_spi_chip spidev_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
-};
-#endif
-
#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
static struct platform_device bfin_i2s = {
.name = "bfin-i2s",
@@ -382,24 +350,7 @@ static struct platform_device bfin_i2s = {
};
#endif
-#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
-static struct bfin5xx_spi_chip lq035q1_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
-};
-#endif
-
#if defined(CONFIG_GPIO_MCP23S08) || defined(CONFIG_GPIO_MCP23S08_MODULE)
-static struct bfin5xx_spi_chip spi_mcp23s08_sys_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
-};
-
-static struct bfin5xx_spi_chip spi_mcp23s08_usr_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
-};
-
#include <linux/spi/mcp23s08.h>
static const struct mcp23s08_platform_data bfin_mcp23s08_sys_gpio_info = {
.chip[0].is_present = true,
@@ -429,22 +380,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
},
#endif
-#if defined(CONFIG_BFIN_SPI_ADC)
- || defined(CONFIG_BFIN_SPI_ADC_MODULE)
- {
- .modalias = "bfin_spi_adc",
- /* Name of spi_driver for this device */
- .max_speed_hz = 10000000,
- /* max spi clock (SCK) speed in HZ */
- .bus_num = 0, /* Framework bus number */
- .chip_select = EXP_GPIO_SPISEL_BASE + 0x04 + MAX_CTRL_CS,
- /* Framework chip select. */
- .platform_data = NULL, /* No spi_driver specific config */
- .controller_data = &spi_adc_chip_info,
- .mode = SPI_MODE_0,
- },
-#endif
-
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
{
.modalias = "mmc_spi",
@@ -470,7 +405,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
/* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = EXP_GPIO_SPISEL_BASE + 0x07 + MAX_CTRL_CS,
- .controller_data = &spi_ad7879_chip_info,
.mode = SPI_CPHA | SPI_CPOL,
},
#endif
@@ -482,7 +416,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.bus_num = 0,
.chip_select = EXP_GPIO_SPISEL_BASE + 0x03 + MAX_CTRL_CS,
.mode = SPI_CPHA | SPI_CPOL,
- .controller_data = &spidev_chip_info,
},
#endif
#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
@@ -491,7 +424,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 20000000,
.bus_num = 0,
.chip_select = EXP_GPIO_SPISEL_BASE + 0x06 + MAX_CTRL_CS,
- .controller_data = &lq035q1_spi_chip_info,
.mode = SPI_CPHA | SPI_CPOL,
},
#endif
@@ -502,7 +434,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = EXP_GPIO_SPISEL_BASE + 0x01 + MAX_CTRL_CS,
- .controller_data = &spi_mcp23s08_sys_chip_info,
.mode = SPI_CPHA | SPI_CPOL,
},
{
@@ -511,7 +442,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = EXP_GPIO_SPISEL_BASE + 0x02 + MAX_CTRL_CS,
- .controller_data = &spi_mcp23s08_usr_chip_info,
.mode = SPI_CPHA | SPI_CPOL,
},
#endif
diff --git a/arch/blackfin/mach-bf527/include/mach/anomaly.h b/arch/blackfin/mach-bf527/include/mach/anomaly.h
index e66a7e89cd3c..688470611e15 100644
--- a/arch/blackfin/mach-bf527/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf527/include/mach/anomaly.h
@@ -11,8 +11,8 @@
*/
/* This file should be up to date with:
- * - Revision E, 03/15/2010; ADSP-BF526 Blackfin Processor Anomaly List
- * - Revision H, 04/29/2010; ADSP-BF527 Blackfin Processor Anomaly List
+ * - Revision F, 05/23/2011; ADSP-BF526 Blackfin Processor Anomaly List
+ * - Revision I, 05/23/2011; ADSP-BF527 Blackfin Processor Anomaly List
*/
#ifndef _MACH_ANOMALY_H_
@@ -57,7 +57,7 @@
/* Incorrect Access of OTP_STATUS During otp_write() Function */
#define ANOMALY_05000328 (_ANOMALY_BF527(< 2))
/* Host DMA Boot Modes Are Not Functional */
-#define ANOMALY_05000330 (__SILICON_REVISION__ < 2)
+#define ANOMALY_05000330 (_ANOMALY_BF527(< 2))
/* Disallowed Configuration Prevents Subsequent Allowed Configuration on Host DMA Port */
#define ANOMALY_05000337 (_ANOMALY_BF527(< 2))
/* Ethernet MAC MDIO Reads Do Not Meet IEEE Specification */
@@ -135,7 +135,7 @@
/* Incorrect Default Internal Voltage Regulator Setting */
#define ANOMALY_05000410 (_ANOMALY_BF527(< 2))
/* bfrom_SysControl() Firmware Function Cannot be Used to Enter Power Saving Modes */
-#define ANOMALY_05000411 (_ANOMALY_BF526_BF527(< 1, < 2))
+#define ANOMALY_05000411 (_ANOMALY_BF526(< 1))
/* OTP_CHECK_FOR_PREV_WRITE Bit is Not Functional in bfrom_OtpWrite() API */
#define ANOMALY_05000414 (_ANOMALY_BF526_BF527(< 1, < 2))
/* DEB2_URGENT Bit Not Functional */
@@ -181,11 +181,11 @@
/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */
#define ANOMALY_05000443 (1)
/* The WURESET Bit in the SYSCR Register is not Functional */
-#define ANOMALY_05000445 (1)
-/* USB DMA Mode 1 Short Packet Data Corruption */
+#define ANOMALY_05000445 (_ANOMALY_BF527(>= 0))
+/* USB DMA Short Packet Data Corruption */
#define ANOMALY_05000450 (1)
/* BCODE_QUICKBOOT, BCODE_ALLBOOT, and BCODE_FULLBOOT Settings in SYSCR Register Not Functional */
-#define ANOMALY_05000451 (1)
+#define ANOMALY_05000451 (_ANOMALY_BF527(>= 0))
/* Incorrect Default Hysteresis Setting for RESET, NMI, and BMODE Signals */
#define ANOMALY_05000452 (_ANOMALY_BF526_BF527(< 1, >= 0))
/* USB Receive Interrupt Is Not Generated in DMA Mode 1 */
@@ -198,19 +198,19 @@
#define ANOMALY_05000461 (1)
/* Synchronization Problem at Startup May Cause SPORT Transmit Channels to Misalign */
#define ANOMALY_05000462 (1)
-/* USB Rx DMA hang */
+/* USB Rx DMA Hang */
#define ANOMALY_05000465 (1)
/* TxPktRdy Bit Not Set for Transmit Endpoint When Core and DMA Access USB Endpoint FIFOs Simultaneously */
#define ANOMALY_05000466 (1)
-/* Possible RX data corruption when control & data EP FIFOs are accessed via the core */
+/* Possible USB RX Data Corruption When Control & Data EP FIFOs are Accessed via the Core */
#define ANOMALY_05000467 (1)
/* PLL Latches Incorrect Settings During Reset */
#define ANOMALY_05000469 (1)
/* Incorrect Default MSEL Value in PLL_CTL */
#define ANOMALY_05000472 (_ANOMALY_BF526(>= 0))
-/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */
+/* Interrupted SPORT Receive Data Register Read Results In Underflow when SLEN > 15 */
#define ANOMALY_05000473 (1)
-/* Possible Lockup Condition whem Modifying PLL from External Memory */
+/* Possible Lockup Condition when Modifying PLL from External Memory */
#define ANOMALY_05000475 (1)
/* TESTSET Instruction Cannot Be Interrupted */
#define ANOMALY_05000477 (1)
@@ -219,11 +219,19 @@
/* Possible USB Data Corruption When Multiple Endpoints Are Accessed by the Core */
#define ANOMALY_05000483 (1)
/* PLL_CTL Change Using bfrom_SysControl() Can Result in Processor Overclocking */
-#define ANOMALY_05000485 (_ANOMALY_BF526_BF527(< 2, < 3))
+#define ANOMALY_05000485 (_ANOMALY_BF526_BF527(< 2, >= 0))
/* The CODEC Zero-Cross Detect Feature is not Functional */
#define ANOMALY_05000487 (1)
-/* IFLUSH sucks at life */
+/* SPI Master Boot Can Fail Under Certain Conditions */
+#define ANOMALY_05000490 (1)
+/* Instruction Memory Stalls Can Cause IFLUSH to Fail */
#define ANOMALY_05000491 (1)
+/* EXCPT Instruction May Be Lost If NMI Happens Simultaneously */
+#define ANOMALY_05000494 (1)
+/* CNT_COMMAND Functionality Depends on CNT_IMASK Configuration */
+#define ANOMALY_05000498 (1)
+/* RXS Bit in SPI_STAT May Become Stuck In RX DMA Modes */
+#define ANOMALY_05000501 (1)
/* Anomalies that don't exist on this proc */
#define ANOMALY_05000099 (0)
diff --git a/arch/blackfin/mach-bf533/boards/H8606.c b/arch/blackfin/mach-bf533/boards/H8606.c
index d4bfcea56828..eb325ed6607e 100644
--- a/arch/blackfin/mach-bf533/boards/H8606.c
+++ b/arch/blackfin/mach-bf533/boards/H8606.c
@@ -159,22 +159,6 @@ static struct flash_platform_data bfin_spi_flash_data = {
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
-/* SPI ADC chip */
-static struct bfin5xx_spi_chip spi_adc_chip_info = {
- .enable_dma = 1, /* use dma transfer with this chip*/
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
-static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
};
#endif
@@ -195,24 +179,12 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
},
#endif
-#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
- {
- .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
- .max_speed_hz = 4, /* actual baudrate is SCLK/(2xspeed_hz) */
- .bus_num = 1, /* Framework bus number */
- .chip_select = 1, /* Framework chip select. */
- .platform_data = NULL, /* No spi_driver specific config */
- .controller_data = &spi_adc_chip_info,
- },
-#endif
-
#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
{
.modalias = "ad183x",
.max_speed_hz = 16,
.bus_num = 1,
.chip_select = 4,
- .controller_data = &ad1836_spi_chip_info,
},
#endif
diff --git a/arch/blackfin/mach-bf533/boards/blackstamp.c b/arch/blackfin/mach-bf533/boards/blackstamp.c
index 87b5af3693c1..b0ec825fb4ec 100644
--- a/arch/blackfin/mach-bf533/boards/blackstamp.c
+++ b/arch/blackfin/mach-bf533/boards/blackstamp.c
@@ -102,21 +102,12 @@ static struct flash_platform_data bfin_spi_flash_data = {
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
};
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0,
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
-static struct bfin5xx_spi_chip spidev_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
};
#endif
@@ -151,7 +142,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 7,
- .controller_data = &spidev_chip_info,
},
#endif
};
diff --git a/arch/blackfin/mach-bf533/boards/cm_bf533.c b/arch/blackfin/mach-bf533/boards/cm_bf533.c
index 4d5604eaa7c2..14f54a31e74c 100644
--- a/arch/blackfin/mach-bf533/boards/cm_bf533.c
+++ b/arch/blackfin/mach-bf533/boards/cm_bf533.c
@@ -59,29 +59,12 @@ static struct flash_platform_data bfin_spi_flash_data = {
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
-};
-#endif
-
-/* SPI ADC chip */
-#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
-static struct bfin5xx_spi_chip spi_adc_chip_info = {
- .enable_dma = 1, /* use dma transfer with this chip*/
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
-static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
};
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0,
- .bits_per_word = 8,
};
#endif
@@ -99,24 +82,12 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
},
#endif
-#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
- {
- .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
- .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
- .bus_num = 0, /* Framework bus number */
- .chip_select = 2, /* Framework chip select. */
- .platform_data = NULL, /* No spi_driver specific config */
- .controller_data = &spi_adc_chip_info,
- },
-#endif
-
#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
{
.modalias = "ad183x",
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 4,
- .controller_data = &ad1836_spi_chip_info,
},
#endif
diff --git a/arch/blackfin/mach-bf533/boards/ezkit.c b/arch/blackfin/mach-bf533/boards/ezkit.c
index b67b91d82242..ecd2801f050d 100644
--- a/arch/blackfin/mach-bf533/boards/ezkit.c
+++ b/arch/blackfin/mach-bf533/boards/ezkit.c
@@ -210,29 +210,6 @@ static struct flash_platform_data bfin_spi_flash_data = {
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
-/* SPI ADC chip */
-static struct bfin5xx_spi_chip spi_adc_chip_info = {
- .enable_dma = 1, /* use dma transfer with this chip*/
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
-static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
-static struct bfin5xx_spi_chip spidev_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
};
#endif
@@ -250,24 +227,12 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
},
#endif
-#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
- {
- .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
- .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
- .bus_num = 0, /* Framework bus number */
- .chip_select = 1, /* Framework chip select. */
- .platform_data = NULL, /* No spi_driver specific config */
- .controller_data = &spi_adc_chip_info,
- },
-#endif
-
#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
{
.modalias = "ad183x",
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 4,
- .controller_data = &ad1836_spi_chip_info,
},
#endif
#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
@@ -276,7 +241,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
- .controller_data = &spidev_chip_info,
},
#endif
};
diff --git a/arch/blackfin/mach-bf533/boards/ip0x.c b/arch/blackfin/mach-bf533/boards/ip0x.c
index a377d8afea03..fbee77fa9211 100644
--- a/arch/blackfin/mach-bf533/boards/ip0x.c
+++ b/arch/blackfin/mach-bf533/boards/ip0x.c
@@ -110,7 +110,6 @@ static struct platform_device dm9000_device2 = {
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0, /* if 1 - block!!! */
- .bits_per_word = 8,
};
#endif
diff --git a/arch/blackfin/mach-bf533/boards/stamp.c b/arch/blackfin/mach-bf533/boards/stamp.c
index 43224ef00b8c..964a8e5f79b4 100644
--- a/arch/blackfin/mach-bf533/boards/stamp.c
+++ b/arch/blackfin/mach-bf533/boards/stamp.c
@@ -80,6 +80,9 @@ static struct resource net2272_bfin_resources[] = {
.end = 0x20300000 + 0x100,
.flags = IORESOURCE_MEM,
}, {
+ .start = 1,
+ .flags = IORESOURCE_BUS,
+ }, {
.start = IRQ_PF10,
.end = IRQ_PF10,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
@@ -172,29 +175,6 @@ static struct flash_platform_data bfin_spi_flash_data = {
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
-/* SPI ADC chip */
-static struct bfin5xx_spi_chip spi_adc_chip_info = {
- .enable_dma = 1, /* use dma transfer with this chip*/
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
-static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
-static struct bfin5xx_spi_chip spidev_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
};
#endif
@@ -221,7 +201,6 @@ static struct mmc_spi_platform_data bfin_mmc_spi_pdata = {
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0,
- .bits_per_word = 8,
.pio_interrupt = 0,
};
#endif
@@ -240,17 +219,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
},
#endif
-#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
- {
- .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
- .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
- .bus_num = 0, /* Framework bus number */
- .chip_select = 1, /* Framework chip select. */
- .platform_data = NULL, /* No spi_driver specific config */
- .controller_data = &spi_adc_chip_info,
- },
-#endif
-
#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
{
.modalias = "ad183x",
@@ -258,7 +226,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.bus_num = 0,
.chip_select = 4,
.platform_data = "ad1836", /* only includes chip name for the moment */
- .controller_data = &ad1836_spi_chip_info,
.mode = SPI_MODE_3,
},
#endif
@@ -269,7 +236,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
- .controller_data = &spidev_chip_info,
},
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
@@ -659,6 +625,41 @@ static struct platform_device *stamp_devices[] __initdata = {
#endif
};
+static int __init net2272_init(void)
+{
+#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
+ int ret;
+
+ /* Set PF0 to 0, PF1 to 1 make /AMS3 work properly */
+ ret = gpio_request(GPIO_PF0, "net2272");
+ if (ret)
+ return ret;
+
+ ret = gpio_request(GPIO_PF1, "net2272");
+ if (ret) {
+ gpio_free(GPIO_PF0);
+ return ret;
+ }
+
+ ret = gpio_request(GPIO_PF11, "net2272");
+ if (ret) {
+ gpio_free(GPIO_PF0);
+ gpio_free(GPIO_PF1);
+ return ret;
+ }
+
+ gpio_direction_output(GPIO_PF0, 0);
+ gpio_direction_output(GPIO_PF1, 1);
+
+ /* Reset the USB chip */
+ gpio_direction_output(GPIO_PF11, 0);
+ mdelay(2);
+ gpio_set_value(GPIO_PF11, 1);
+#endif
+
+ return 0;
+}
+
static int __init stamp_init(void)
{
int ret;
@@ -685,6 +686,9 @@ static int __init stamp_init(void)
}
#endif
+ if (net2272_init())
+ pr_warning("unable to configure net2272; it probably won't work\n");
+
spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
return 0;
}
diff --git a/arch/blackfin/mach-bf533/include/mach/anomaly.h b/arch/blackfin/mach-bf533/include/mach/anomaly.h
index 72aa59440f82..03f2b40912a3 100644
--- a/arch/blackfin/mach-bf533/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf533/include/mach/anomaly.h
@@ -11,7 +11,7 @@
*/
/* This file should be up to date with:
- * - Revision F, 05/25/2010; ADSP-BF531/BF532/BF533 Blackfin Processor Anomaly List
+ * - Revision G, 05/23/2011; ADSP-BF531/BF532/BF533 Blackfin Processor Anomaly List
*/
#ifndef _MACH_ANOMALY_H_
@@ -152,7 +152,7 @@
#define ANOMALY_05000277 (__SILICON_REVISION__ < 6)
/* Disabling Peripherals with DMA Running May Cause DMA System Instability */
#define ANOMALY_05000278 (__SILICON_REVISION__ < 6)
-/* False Hardware Error Exception when ISR Context Is Not Restored */
+/* False Hardware Error when ISR Context Is Not Restored */
#define ANOMALY_05000281 (__SILICON_REVISION__ < 6)
/* Memory DMA Corruption with 32-Bit Data and Traffic Control */
#define ANOMALY_05000282 (__SILICON_REVISION__ < 6)
@@ -210,18 +210,25 @@
#define ANOMALY_05000462 (1)
/* Boot Failure When SDRAM Control Signals Toggle Coming Out Of Reset */
#define ANOMALY_05000471 (1)
-/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */
+/* Interrupted SPORT Receive Data Register Read Results In Underflow when SLEN > 15 */
#define ANOMALY_05000473 (1)
-/* Possible Lockup Condition whem Modifying PLL from External Memory */
+/* Possible Lockup Condition when Modifying PLL from External Memory */
#define ANOMALY_05000475 (1)
/* TESTSET Instruction Cannot Be Interrupted */
#define ANOMALY_05000477 (1)
/* Reads of ITEST_COMMAND and ITEST_DATA Registers Cause Cache Corruption */
#define ANOMALY_05000481 (1)
-/* IFLUSH sucks at life */
+/* PLL May Latch Incorrect Values Coming Out of Reset */
+#define ANOMALY_05000489 (1)
+/* Instruction Memory Stalls Can Cause IFLUSH to Fail */
#define ANOMALY_05000491 (1)
+/* EXCPT Instruction May Be Lost If NMI Happens Simultaneously */
+#define ANOMALY_05000494 (1)
+/* RXS Bit in SPI_STAT May Become Stuck In RX DMA Modes */
+#define ANOMALY_05000501 (1)
-/* These anomalies have been "phased" out of analog.com anomaly sheets and are
+/*
+ * These anomalies have been "phased" out of analog.com anomaly sheets and are
* here to show running on older silicon just isn't feasible.
*/
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537e.c b/arch/blackfin/mach-bf537/boards/cm_bf537e.c
index d582b810e7a7..44fd8409db10 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537e.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537e.c
@@ -61,29 +61,12 @@ static struct flash_platform_data bfin_spi_flash_data = {
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
-/* SPI ADC chip */
-static struct bfin5xx_spi_chip spi_adc_chip_info = {
- .enable_dma = 1, /* use dma transfer with this chip*/
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
-static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
};
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0,
- .bits_per_word = 8,
};
#endif
@@ -101,24 +84,12 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
},
#endif
-#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
- {
- .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
- .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
- .bus_num = 0, /* Framework bus number */
- .chip_select = 1, /* Framework chip select. */
- .platform_data = NULL, /* No spi_driver specific config */
- .controller_data = &spi_adc_chip_info,
- },
-#endif
-
#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
{
.modalias = "ad183x",
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 4,
- .controller_data = &ad1836_spi_chip_info,
},
#endif
@@ -766,6 +737,24 @@ static struct platform_device *cm_bf537e_devices[] __initdata = {
#endif
};
+static int __init net2272_init(void)
+{
+#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
+ int ret;
+
+ ret = gpio_request(GPIO_PG14, "net2272");
+ if (ret)
+ return ret;
+
+ /* Reset USB Chip, PG14 */
+ gpio_direction_output(GPIO_PG14, 0);
+ mdelay(2);
+ gpio_set_value(GPIO_PG14, 1);
+#endif
+
+ return 0;
+}
+
static int __init cm_bf537e_init(void)
{
printk(KERN_INFO "%s(): registering device resources\n", __func__);
@@ -777,6 +766,10 @@ static int __init cm_bf537e_init(void)
#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
irq_set_status_flags(PATA_INT, IRQ_NOAUTOEN);
#endif
+
+ if (net2272_init())
+ pr_warning("unable to configure net2272; it probably won't work\n");
+
return 0;
}
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537u.c b/arch/blackfin/mach-bf537/boards/cm_bf537u.c
index cbb8098604c5..1b4ac5c64aae 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537u.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537u.c
@@ -62,29 +62,12 @@ static struct flash_platform_data bfin_spi_flash_data = {
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
-/* SPI ADC chip */
-static struct bfin5xx_spi_chip spi_adc_chip_info = {
- .enable_dma = 1, /* use dma transfer with this chip*/
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
-static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
};
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0,
- .bits_per_word = 8,
};
#endif
@@ -102,24 +85,12 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
},
#endif
-#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
- {
- .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
- .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
- .bus_num = 0, /* Framework bus number */
- .chip_select = 1, /* Framework chip select. */
- .platform_data = NULL, /* No spi_driver specific config */
- .controller_data = &spi_adc_chip_info,
- },
-#endif
-
#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
{
.modalias = "ad183x",
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 4,
- .controller_data = &ad1836_spi_chip_info,
},
#endif
@@ -731,6 +702,36 @@ static struct platform_device *cm_bf537u_devices[] __initdata = {
#endif
};
+static int __init net2272_init(void)
+{
+#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
+ int ret;
+
+ ret = gpio_request(GPIO_PH15, driver_name);
+ if (ret)
+ return ret;
+
+ ret = gpio_request(GPIO_PH13, "net2272");
+ if (ret) {
+ gpio_free(GPIO_PH15);
+ return ret;
+ }
+
+ /* Set PH15 Low make /AMS2 work properly */
+ gpio_direction_output(GPIO_PH15, 0);
+
+ /* enable CLKBUF output */
+ bfin_write_VR_CTL(bfin_read_VR_CTL() | CLKBUFOE);
+
+ /* Reset the USB chip */
+ gpio_direction_output(GPIO_PH13, 0);
+ mdelay(2);
+ gpio_set_value(GPIO_PH13, 1);
+#endif
+
+ return 0;
+}
+
static int __init cm_bf537u_init(void)
{
printk(KERN_INFO "%s(): registering device resources\n", __func__);
@@ -742,6 +743,10 @@ static int __init cm_bf537u_init(void)
#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
irq_set_status_flags(PATA_INT, IRQ_NOAUTOEN);
#endif
+
+ if (net2272_init())
+ pr_warning("unable to configure net2272; it probably won't work\n");
+
return 0;
}
diff --git a/arch/blackfin/mach-bf537/boards/dnp5370.c b/arch/blackfin/mach-bf537/boards/dnp5370.c
index 6b4ff4605bff..8bc951de979d 100644
--- a/arch/blackfin/mach-bf537/boards/dnp5370.c
+++ b/arch/blackfin/mach-bf537/boards/dnp5370.c
@@ -130,7 +130,6 @@ static struct platform_device asmb_flash_device = {
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0, /* use no dma transfer with this chip*/
- .bits_per_word = 8,
};
#endif
@@ -161,7 +160,6 @@ static struct flash_platform_data bfin_spi_dataflash_data = {
static struct bfin5xx_spi_chip spi_dataflash_chip_info = {
.enable_dma = 0, /* use no dma transfer with this chip*/
- .bits_per_word = 8,
};
#endif
diff --git a/arch/blackfin/mach-bf537/boards/minotaur.c b/arch/blackfin/mach-bf537/boards/minotaur.c
index bfb3671a78da..c62f9dccd9f7 100644
--- a/arch/blackfin/mach-bf537/boards/minotaur.c
+++ b/arch/blackfin/mach-bf537/boards/minotaur.c
@@ -159,14 +159,12 @@ static struct flash_platform_data bfin_spi_flash_data = {
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
};
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0,
- .bits_per_word = 8,
};
#endif
diff --git a/arch/blackfin/mach-bf537/boards/pnav10.c b/arch/blackfin/mach-bf537/boards/pnav10.c
index 9389f03e3b0a..3b8151d99b9a 100644
--- a/arch/blackfin/mach-bf537/boards/pnav10.c
+++ b/arch/blackfin/mach-bf537/boards/pnav10.c
@@ -184,40 +184,16 @@ static struct flash_platform_data bfin_spi_flash_data = {
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_BFIN_SPI_ADC) \
- || defined(CONFIG_BFIN_SPI_ADC_MODULE)
-/* SPI ADC chip */
-static struct bfin5xx_spi_chip spi_adc_chip_info = {
- .enable_dma = 1, /* use dma transfer with this chip*/
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \
- || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
-static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
};
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0,
- .bits_per_word = 8,
};
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
-static struct bfin5xx_spi_chip spi_ad7877_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
-
static const struct ad7877_platform_data bfin_ad7877_ts_info = {
.model = 7877,
.vref_delay_usecs = 50, /* internal, no capacitor */
@@ -248,18 +224,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
},
#endif
-#if defined(CONFIG_BFIN_SPI_ADC) \
- || defined(CONFIG_BFIN_SPI_ADC_MODULE)
- {
- .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
- .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
- .bus_num = 0, /* Framework bus number */
- .chip_select = 1, /* Framework chip select. */
- .platform_data = NULL, /* No spi_driver specific config */
- .controller_data = &spi_adc_chip_info,
- },
-#endif
-
#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \
|| defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
{
@@ -267,7 +231,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 4,
- .controller_data = &ad1836_spi_chip_info,
},
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
@@ -288,7 +251,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 5,
- .controller_data = &spi_ad7877_chip_info,
},
#endif
diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c
index 76db1d483173..b52e6728f64f 100644
--- a/arch/blackfin/mach-bf537/boards/stamp.c
+++ b/arch/blackfin/mach-bf537/boards/stamp.c
@@ -367,6 +367,9 @@ static struct resource net2272_bfin_resources[] = {
.end = 0x20300000 + 0x100,
.flags = IORESOURCE_MEM,
}, {
+ .start = 1,
+ .flags = IORESOURCE_BUS,
+ }, {
.start = IRQ_PF7,
.end = IRQ_PF7,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
@@ -533,49 +536,11 @@ static struct flash_platform_data bfin_spi_flash_data = {
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_BFIN_SPI_ADC) \
- || defined(CONFIG_BFIN_SPI_ADC_MODULE)
-/* SPI ADC chip */
-static struct bfin5xx_spi_chip spi_adc_chip_info = {
- .enable_dma = 1, /* use dma transfer with this chip*/
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \
- || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
-static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SND_BF5XX_SOC_AD193X) \
- || defined(CONFIG_SND_BF5XX_SOC_AD193X_MODULE)
-static struct bfin5xx_spi_chip ad1938_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_SND_BF5XX_SOC_ADAV80X) \
- || defined(CONFIG_SND_BF5XX_SOC_ADAV80X_MODULE)
-static struct bfin5xx_spi_chip adav801_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
};
#endif
#if defined(CONFIG_INPUT_AD714X_SPI) || defined(CONFIG_INPUT_AD714X_SPI_MODULE)
#include <linux/input/ad714x.h>
-static struct bfin5xx_spi_chip ad7147_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
static struct ad714x_slider_plat ad7147_spi_slider_plat[] = {
{
@@ -685,7 +650,6 @@ static struct ad714x_platform_data ad7142_i2c_platform_data = {
#if defined(CONFIG_AD2S90) || defined(CONFIG_AD2S90_MODULE)
static struct bfin5xx_spi_chip ad2s90_spi_chip_info = {
.enable_dma = 0,
- .bits_per_word = 16,
};
#endif
@@ -697,7 +661,6 @@ static unsigned short ad2s120x_platform_data[] = {
static struct bfin5xx_spi_chip ad2s120x_spi_chip_info = {
.enable_dma = 0,
- .bits_per_word = 16,
};
#endif
@@ -714,14 +677,12 @@ static unsigned short ad2s1210_platform_data[] = {
static struct bfin5xx_spi_chip ad2s1210_spi_chip_info = {
.enable_dma = 0,
- .bits_per_word = 8,
};
#endif
#if defined(CONFIG_AD7314) || defined(CONFIG_AD7314_MODULE)
static struct bfin5xx_spi_chip ad7314_spi_chip_info = {
.enable_dma = 0,
- .bits_per_word = 16,
};
#endif
@@ -735,7 +696,6 @@ static unsigned short ad7816_platform_data[] = {
static struct bfin5xx_spi_chip ad7816_spi_chip_info = {
.enable_dma = 0,
- .bits_per_word = 8,
};
#endif
@@ -749,7 +709,6 @@ static unsigned long adt7310_platform_data[3] = {
static struct bfin5xx_spi_chip adt7310_spi_chip_info = {
.enable_dma = 0,
- .bits_per_word = 8,
};
#endif
@@ -758,11 +717,6 @@ static unsigned short ad7298_platform_data[] = {
GPIO_PF7, /* busy_pin */
0,
};
-
-static struct bfin5xx_spi_chip ad7298_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
#endif
#if defined(CONFIG_ADT7316_SPI) || defined(CONFIG_ADT7316_SPI_MODULE)
@@ -773,7 +727,6 @@ static unsigned long adt7316_spi_data[2] = {
static struct bfin5xx_spi_chip adt7316_spi_chip_info = {
.enable_dma = 0,
- .bits_per_word = 8,
};
#endif
@@ -800,18 +753,12 @@ static struct mmc_spi_platform_data bfin_mmc_spi_pdata = {
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0,
- .bits_per_word = 8,
.pio_interrupt = 0,
};
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
#include <linux/spi/ad7877.h>
-static struct bfin5xx_spi_chip spi_ad7877_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
-
static const struct ad7877_platform_data bfin_ad7877_ts_info = {
.model = 7877,
.vref_delay_usecs = 50, /* internal, no capacitor */
@@ -883,39 +830,13 @@ static const struct adxl34x_platform_data adxl34x_info = {
};
#endif
-#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
-static struct bfin5xx_spi_chip spi_ad7879_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
-static struct bfin5xx_spi_chip spidev_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
-static struct bfin5xx_spi_chip lq035q1_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
-};
-#endif
-
#if defined(CONFIG_ENC28J60) || defined(CONFIG_ENC28J60_MODULE)
static struct bfin5xx_spi_chip enc28j60_spi_chip_info = {
.enable_dma = 1,
- .bits_per_word = 8,
};
#endif
#if defined(CONFIG_ADF702X) || defined(CONFIG_ADF702X_MODULE)
-static struct bfin5xx_spi_chip adf7021_spi_chip_info = {
- .bits_per_word = 16,
-};
-
#include <linux/spi/adf702x.h>
#define TXREG 0x0160A470
static const u32 adf7021_regs[] = {
@@ -959,10 +880,6 @@ static inline void adf702x_mac_init(void) {}
#if defined(CONFIG_TOUCHSCREEN_ADS7846) || defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE)
#include <linux/spi/ads7846.h>
-static struct bfin5xx_spi_chip ad7873_spi_chip_info = {
- .bits_per_word = 8,
-};
-
static int ads7873_get_pendown_state(void)
{
return gpio_get_value(GPIO_PF6);
@@ -1009,21 +926,12 @@ static struct flash_platform_data bfin_spi_dataflash_data = {
/* DataFlash chip */
static struct bfin5xx_spi_chip data_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_INPUT_ADXL34X_SPI) || defined(CONFIG_INPUT_ADXL34X_SPI_MODULE)
-static struct bfin5xx_spi_chip spi_adxl34x_chip_info = {
- .enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
};
#endif
#if defined(CONFIG_AD7476) || defined(CONFIG_AD7476_MODULE)
static struct bfin5xx_spi_chip spi_ad7476_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
};
#endif
@@ -1053,17 +961,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.mode = SPI_MODE_3,
},
#endif
-#if defined(CONFIG_BFIN_SPI_ADC) \
- || defined(CONFIG_BFIN_SPI_ADC_MODULE)
- {
- .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
- .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
- .bus_num = 0, /* Framework bus number */
- .chip_select = 1, /* Framework chip select. */
- .platform_data = NULL, /* No spi_driver specific config */
- .controller_data = &spi_adc_chip_info,
- },
-#endif
#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \
|| defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
@@ -1073,7 +970,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.bus_num = 0,
.chip_select = 4,
.platform_data = "ad1836", /* only includes chip name for the moment */
- .controller_data = &ad1836_spi_chip_info,
.mode = SPI_MODE_3,
},
#endif
@@ -1084,7 +980,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 5,
- .controller_data = &ad1938_spi_chip_info,
.mode = SPI_MODE_3,
},
#endif
@@ -1095,7 +990,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
- .controller_data = &adav801_spi_chip_info,
.mode = SPI_MODE_3,
},
#endif
@@ -1109,7 +1003,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.chip_select = 5,
.mode = SPI_MODE_3,
.platform_data = &ad7147_spi_platform_data,
- .controller_data = &ad7147_spi_chip_info,
},
#endif
@@ -1188,7 +1081,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.bus_num = 0,
.chip_select = 4, /* CS, change it for your board */
.platform_data = ad7298_platform_data,
- .controller_data = &ad7298_spi_chip_info,
.mode = SPI_MODE_3,
},
#endif
@@ -1225,7 +1117,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
- .controller_data = &spi_ad7877_chip_info,
},
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
@@ -1236,7 +1127,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
- .controller_data = &spi_ad7879_chip_info,
.mode = SPI_CPHA | SPI_CPOL,
},
#endif
@@ -1246,7 +1136,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
- .controller_data = &spidev_chip_info,
},
#endif
#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
@@ -1255,7 +1144,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 2,
- .controller_data = &lq035q1_spi_chip_info,
.mode = SPI_CPHA | SPI_CPOL,
},
#endif
@@ -1278,7 +1166,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 2,
- .controller_data = &spi_adxl34x_chip_info,
.mode = SPI_MODE_3,
},
#endif
@@ -1288,7 +1175,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 16000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = GPIO_PF10 + MAX_CTRL_CS, /* GPIO controlled SSEL */
- .controller_data = &adf7021_spi_chip_info,
.platform_data = &adf7021_platform_data,
.mode = SPI_MODE_0,
},
@@ -1300,7 +1186,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.bus_num = 0,
.irq = IRQ_PF6,
.chip_select = GPIO_PF10 + MAX_CTRL_CS, /* GPIO controlled SSEL */
- .controller_data = &ad7873_spi_chip_info,
.platform_data = &ad7873_pdata,
.mode = SPI_MODE_0,
},
@@ -2632,9 +2517,25 @@ static struct resource bfin_snd_resources[][4] = {
BFIN_SND_RES(0),
BFIN_SND_RES(1),
};
+#endif
+
+#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
+static struct platform_device bfin_i2s_pcm = {
+ .name = "bfin-i2s-pcm-audio",
+ .id = -1,
+};
+#endif
-static struct platform_device bfin_pcm = {
- .name = "bfin-pcm-audio",
+#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE)
+static struct platform_device bfin_tdm_pcm = {
+ .name = "bfin-tdm-pcm-audio",
+ .id = -1,
+};
+#endif
+
+#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE)
+static struct platform_device bfin_ac97_pcm = {
+ .name = "bfin-ac97-pcm-audio",
.id = -1,
};
#endif
@@ -2869,10 +2770,16 @@ static struct platform_device *stamp_devices[] __initdata = {
&stamp_flash_device,
#endif
-#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) || \
- defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) || \
- defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE)
- &bfin_pcm,
+#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
+ &bfin_i2s_pcm,
+#endif
+
+#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE)
+ &bfin_tdm_pcm,
+#endif
+
+#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE)
+ &bfin_ac97_pcm,
#endif
#if defined(CONFIG_SND_BF5XX_SOC_AD73311) || defined(CONFIG_SND_BF5XX_SOC_AD73311_MODULE)
@@ -2916,6 +2823,24 @@ static struct platform_device *stamp_devices[] __initdata = {
#endif
};
+static int __init net2272_init(void)
+{
+#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
+ int ret;
+
+ ret = gpio_request(GPIO_PF6, "net2272");
+ if (ret)
+ return ret;
+
+ /* Reset the USB chip */
+ gpio_direction_output(GPIO_PF6, 0);
+ mdelay(2);
+ gpio_set_value(GPIO_PF6, 1);
+#endif
+
+ return 0;
+}
+
static int __init stamp_init(void)
{
printk(KERN_INFO "%s(): registering device resources\n", __func__);
@@ -2926,6 +2851,9 @@ static int __init stamp_init(void)
ARRAY_SIZE(bfin_i2c_board_info));
spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
+ if (net2272_init())
+ pr_warning("unable to configure net2272; it probably won't work\n");
+
return 0;
}
diff --git a/arch/blackfin/mach-bf537/boards/tcm_bf537.c b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
index 164a7e02c022..9b7287abdfa1 100644
--- a/arch/blackfin/mach-bf537/boards/tcm_bf537.c
+++ b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
@@ -62,29 +62,12 @@ static struct flash_platform_data bfin_spi_flash_data = {
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
-/* SPI ADC chip */
-static struct bfin5xx_spi_chip spi_adc_chip_info = {
- .enable_dma = 1, /* use dma transfer with this chip*/
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
-static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
};
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0,
- .bits_per_word = 8,
};
#endif
@@ -102,24 +85,12 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
},
#endif
-#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
- {
- .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
- .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
- .bus_num = 0, /* Framework bus number */
- .chip_select = 1, /* Framework chip select. */
- .platform_data = NULL, /* No spi_driver specific config */
- .controller_data = &spi_adc_chip_info,
- },
-#endif
-
#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
{
.modalias = "ad183x",
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 4,
- .controller_data = &ad1836_spi_chip_info,
},
#endif
@@ -733,6 +704,24 @@ static struct platform_device *cm_bf537_devices[] __initdata = {
#endif
};
+static int __init net2272_init(void)
+{
+#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
+ int ret;
+
+ ret = gpio_request(GPIO_PG14, "net2272");
+ if (ret)
+ return ret;
+
+ /* Reset USB Chip, PG14 */
+ gpio_direction_output(GPIO_PG14, 0);
+ mdelay(2);
+ gpio_set_value(GPIO_PG14, 1);
+#endif
+
+ return 0;
+}
+
static int __init tcm_bf537_init(void)
{
printk(KERN_INFO "%s(): registering device resources\n", __func__);
@@ -744,6 +733,10 @@ static int __init tcm_bf537_init(void)
#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
irq_set_status_flags(PATA_INT, IRQ_NOAUTOEN);
#endif
+
+ if (net2272_init())
+ pr_warning("unable to configure net2272; it probably won't work\n");
+
return 0;
}
diff --git a/arch/blackfin/mach-bf537/include/mach/anomaly.h b/arch/blackfin/mach-bf537/include/mach/anomaly.h
index 7f8e5a9f5db6..543cd3fb305e 100644
--- a/arch/blackfin/mach-bf537/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf537/include/mach/anomaly.h
@@ -11,7 +11,7 @@
*/
/* This file should be up to date with:
- * - Revision E, 05/25/2010; ADSP-BF534/ADSP-BF536/ADSP-BF537 Blackfin Processor Anomaly List
+ * - Revision F, 05/23/2011; ADSP-BF534/ADSP-BF536/ADSP-BF537 Blackfin Processor Anomaly List
*/
#ifndef _MACH_ANOMALY_H_
@@ -44,18 +44,12 @@
#define ANOMALY_05000119 (1)
/* Rx.H Cannot Be Used to Access 16-bit System MMR Registers */
#define ANOMALY_05000122 (1)
-/* Killed 32-Bit MMR Write Leads to Next System MMR Access Thinking It Should Be 32-Bit */
-#define ANOMALY_05000157 (__SILICON_REVISION__ < 2)
/* PPI_DELAY Not Functional in PPI Modes with 0 Frame Syncs */
#define ANOMALY_05000180 (1)
-/* Instruction Cache Is Not Functional */
-#define ANOMALY_05000237 (__SILICON_REVISION__ < 2)
/* If I-Cache Is On, CSYNC/SSYNC/IDLE Around Change of Control Causes Failures */
#define ANOMALY_05000244 (__SILICON_REVISION__ < 3)
/* False Hardware Error from an Access in the Shadow of a Conditional Branch */
#define ANOMALY_05000245 (1)
-/* Buffered CLKIN Output Is Disabled by Default */
-#define ANOMALY_05000247 (1)
/* Incorrect Bit Shift of Data Word in Multichannel (TDM) Mode in Certain Conditions */
#define ANOMALY_05000250 (__SILICON_REVISION__ < 3)
/* EMAC TX DMA Error After an Early Frame Abort */
@@ -98,7 +92,7 @@
#define ANOMALY_05000278 (((ANOMALY_BF536 || ANOMALY_BF537) && __SILICON_REVISION__ < 3) || (ANOMALY_BF534 && __SILICON_REVISION__ < 2))
/* SPI Master Boot Mode Does Not Work Well with Atmel Data Flash Devices */
#define ANOMALY_05000280 (1)
-/* False Hardware Error Exception when ISR Context Is Not Restored */
+/* False Hardware Error when ISR Context Is Not Restored */
#define ANOMALY_05000281 (__SILICON_REVISION__ < 3)
/* Memory DMA Corruption with 32-Bit Data and Traffic Control */
#define ANOMALY_05000282 (__SILICON_REVISION__ < 3)
@@ -162,9 +156,9 @@
#define ANOMALY_05000461 (1)
/* Synchronization Problem at Startup May Cause SPORT Transmit Channels to Misalign */
#define ANOMALY_05000462 (1)
-/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */
+/* Interrupted SPORT Receive Data Register Read Results In Underflow when SLEN > 15 */
#define ANOMALY_05000473 (1)
-/* Possible Lockup Condition whem Modifying PLL from External Memory */
+/* Possible Lockup Condition when Modifying PLL from External Memory */
#define ANOMALY_05000475 (1)
/* TESTSET Instruction Cannot Be Interrupted */
#define ANOMALY_05000477 (1)
@@ -172,8 +166,26 @@
#define ANOMALY_05000480 (__SILICON_REVISION__ < 3)
/* Reads of ITEST_COMMAND and ITEST_DATA Registers Cause Cache Corruption */
#define ANOMALY_05000481 (1)
-/* IFLUSH sucks at life */
+/* PLL May Latch Incorrect Values Coming Out of Reset */
+#define ANOMALY_05000489 (1)
+/* Instruction Memory Stalls Can Cause IFLUSH to Fail */
#define ANOMALY_05000491 (1)
+/* EXCPT Instruction May Be Lost If NMI Happens Simultaneously */
+#define ANOMALY_05000494 (1)
+/* RXS Bit in SPI_STAT May Become Stuck In RX DMA Modes */
+#define ANOMALY_05000501 (1)
+
+/*
+ * These anomalies have been "phased" out of analog.com anomaly sheets and are
+ * here to show running on older silicon just isn't feasible.
+ */
+
+/* Killed 32-Bit MMR Write Leads to Next System MMR Access Thinking It Should Be 32-Bit */
+#define ANOMALY_05000157 (__SILICON_REVISION__ < 2)
+/* Instruction Cache Is Not Functional */
+#define ANOMALY_05000237 (__SILICON_REVISION__ < 2)
+/* Buffered CLKIN Output Is Disabled by Default */
+#define ANOMALY_05000247 (__SILICON_REVISION__ < 2)
/* Anomalies that don't exist on this proc */
#define ANOMALY_05000099 (0)
diff --git a/arch/blackfin/mach-bf538/boards/ezkit.c b/arch/blackfin/mach-bf538/boards/ezkit.c
index e61424ef35eb..629f3c333415 100644
--- a/arch/blackfin/mach-bf538/boards/ezkit.c
+++ b/arch/blackfin/mach-bf538/boards/ezkit.c
@@ -502,7 +502,6 @@ static struct flash_platform_data bfin_spi_flash_data = {
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
};
#endif
@@ -523,13 +522,6 @@ static const struct ad7879_platform_data bfin_ad7879_ts_info = {
};
#endif
-#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
-static struct bfin5xx_spi_chip spi_ad7879_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
-#endif
-
#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
#include <asm/bfin-lq035q1.h>
@@ -559,20 +551,6 @@ static struct platform_device bfin_lq035q1_device = {
};
#endif
-#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
-static struct bfin5xx_spi_chip spidev_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
-static struct bfin5xx_spi_chip lq035q1_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
-};
-#endif
-
static struct spi_board_info bf538_spi_board_info[] __initdata = {
#if defined(CONFIG_MTD_M25P80) \
|| defined(CONFIG_MTD_M25P80_MODULE)
@@ -595,7 +573,6 @@ static struct spi_board_info bf538_spi_board_info[] __initdata = {
.max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
- .controller_data = &spi_ad7879_chip_info,
.mode = SPI_CPHA | SPI_CPOL,
},
#endif
@@ -605,7 +582,6 @@ static struct spi_board_info bf538_spi_board_info[] __initdata = {
.max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 2,
- .controller_data = &lq035q1_spi_chip_info,
.mode = SPI_CPHA | SPI_CPOL,
},
#endif
@@ -615,7 +591,6 @@ static struct spi_board_info bf538_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
- .controller_data = &spidev_chip_info,
},
#endif
};
diff --git a/arch/blackfin/mach-bf538/ext-gpio.c b/arch/blackfin/mach-bf538/ext-gpio.c
index 180b1252679f..471a9b184d5b 100644
--- a/arch/blackfin/mach-bf538/ext-gpio.c
+++ b/arch/blackfin/mach-bf538/ext-gpio.c
@@ -1,7 +1,7 @@
/*
* GPIOLIB interface for BF538/9 PORT C, D, and E GPIOs
*
- * Copyright 2009 Analog Devices Inc.
+ * Copyright 2009-2011 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
@@ -121,3 +121,38 @@ static int __init bf538_extgpio_setup(void)
gpiochip_add(&bf538_porte_chip);
}
arch_initcall(bf538_extgpio_setup);
+
+#ifdef CONFIG_PM
+static struct {
+ u16 data, dir, inen;
+} gpio_bank_saved[3];
+
+static void __iomem * const port_bases[3] = {
+ (void *)PORTCIO,
+ (void *)PORTDIO,
+ (void *)PORTEIO,
+};
+
+void bfin_special_gpio_pm_hibernate_suspend(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(port_bases); ++i) {
+ gpio_bank_saved[i].data = read_PORTIO(port_bases[i]);
+ gpio_bank_saved[i].inen = read_PORTIO_INEN(port_bases[i]);
+ gpio_bank_saved[i].dir = read_PORTIO_DIR(port_bases[i]);
+ }
+}
+
+void bfin_special_gpio_pm_hibernate_restore(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(port_bases); ++i) {
+ write_PORTIO_INEN(port_bases[i], gpio_bank_saved[i].inen);
+ write_PORTIO_SET(port_bases[i],
+ gpio_bank_saved[i].data & gpio_bank_saved[i].dir);
+ write_PORTIO_DIR(port_bases[i], gpio_bank_saved[i].dir);
+ }
+}
+#endif
diff --git a/arch/blackfin/mach-bf538/include/mach/anomaly.h b/arch/blackfin/mach-bf538/include/mach/anomaly.h
index 55e7d0712a94..b6ca99788710 100644
--- a/arch/blackfin/mach-bf538/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf538/include/mach/anomaly.h
@@ -11,8 +11,8 @@
*/
/* This file should be up to date with:
- * - Revision I, 05/25/2010; ADSP-BF538/BF538F Blackfin Processor Anomaly List
- * - Revision N, 05/25/2010; ADSP-BF539/BF539F Blackfin Processor Anomaly List
+ * - Revision J, 05/23/2011; ADSP-BF538/BF538F Blackfin Processor Anomaly List
+ * - Revision O, 05/23/2011; ADSP-BF539/BF539F Blackfin Processor Anomaly List
*/
#ifndef _MACH_ANOMALY_H_
@@ -56,25 +56,21 @@
#define ANOMALY_05000229 (1)
/* PPI_FS3 Is Not Driven in 2 or 3 Internal Frame Sync Transmit Modes */
#define ANOMALY_05000233 (1)
-/* If I-Cache Is On, CSYNC/SSYNC/IDLE Around Change of Control Causes Failures */
-#define ANOMALY_05000244 (__SILICON_REVISION__ < 3)
/* False Hardware Error from an Access in the Shadow of a Conditional Branch */
#define ANOMALY_05000245 (1)
/* Maximum External Clock Speed for Timers */
#define ANOMALY_05000253 (1)
-/* DCPLB_FAULT_ADDR MMR Register May Be Corrupted */
-#define ANOMALY_05000261 (__SILICON_REVISION__ < 3)
/* High I/O Activity Causes Output Voltage of Internal Voltage Regulator (Vddint) to Decrease */
#define ANOMALY_05000270 (__SILICON_REVISION__ < 4)
/* Certain Data Cache Writethrough Modes Fail for Vddint <= 0.9V */
-#define ANOMALY_05000272 (1)
+#define ANOMALY_05000272 (ANOMALY_BF538)
/* Writes to Synchronous SDRAM Memory May Be Lost */
#define ANOMALY_05000273 (__SILICON_REVISION__ < 4)
/* Writes to an I/O Data Register One SCLK Cycle after an Edge Is Detected May Clear Interrupt */
#define ANOMALY_05000277 (__SILICON_REVISION__ < 4)
/* Disabling Peripherals with DMA Running May Cause DMA System Instability */
#define ANOMALY_05000278 (__SILICON_REVISION__ < 4)
-/* False Hardware Error Exception when ISR Context Is Not Restored */
+/* False Hardware Error when ISR Context Is Not Restored */
#define ANOMALY_05000281 (__SILICON_REVISION__ < 4)
/* Memory DMA Corruption with 32-Bit Data and Traffic Control */
#define ANOMALY_05000282 (__SILICON_REVISION__ < 4)
@@ -102,8 +98,10 @@
#define ANOMALY_05000313 (__SILICON_REVISION__ < 4)
/* Killed System MMR Write Completes Erroneously on Next System MMR Access */
#define ANOMALY_05000315 (__SILICON_REVISION__ < 4)
+/* PFx Glitch on Write to PORTFIO or PORTFIO_TOGGLE */
+#define ANOMALY_05000317 (__SILICON_REVISION__ < 4) /* XXX: Same as 05000318 */
/* PFx Glitch on Write to FIO_FLAG_D or FIO_FLAG_T */
-#define ANOMALY_05000318 (ANOMALY_BF539 && __SILICON_REVISION__ < 4)
+#define ANOMALY_05000318 (__SILICON_REVISION__ < 4) /* XXX: Same as 05000317 */
/* Regulator Programming Blocked when Hibernate Wakeup Source Remains Active */
#define ANOMALY_05000355 (__SILICON_REVISION__ < 5)
/* Serial Port (SPORT) Multichannel Transmit Failure when Channel 0 Is Disabled */
@@ -134,16 +132,32 @@
#define ANOMALY_05000461 (1)
/* Synchronization Problem at Startup May Cause SPORT Transmit Channels to Misalign */
#define ANOMALY_05000462 (1)
-/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */
+/* Interrupted SPORT Receive Data Register Read Results In Underflow when SLEN > 15 */
#define ANOMALY_05000473 (1)
-/* Possible Lockup Condition whem Modifying PLL from External Memory */
+/* Possible Lockup Condition when Modifying PLL from External Memory */
#define ANOMALY_05000475 (1)
/* TESTSET Instruction Cannot Be Interrupted */
#define ANOMALY_05000477 (1)
/* Reads of ITEST_COMMAND and ITEST_DATA Registers Cause Cache Corruption */
#define ANOMALY_05000481 (1)
-/* IFLUSH sucks at life */
+/* PLL May Latch Incorrect Values Coming Out of Reset */
+#define ANOMALY_05000489 (1)
+/* Instruction Memory Stalls Can Cause IFLUSH to Fail */
#define ANOMALY_05000491 (1)
+/* EXCPT Instruction May Be Lost If NMI Happens Simultaneously */
+#define ANOMALY_05000494 (1)
+/* RXS Bit in SPI_STAT May Become Stuck In RX DMA Modes */
+#define ANOMALY_05000501 (1)
+
+/*
+ * These anomalies have been "phased" out of analog.com anomaly sheets and are
+ * here to show running on older silicon just isn't feasible.
+ */
+
+/* If I-Cache Is On, CSYNC/SSYNC/IDLE Around Change of Control Causes Failures */
+#define ANOMALY_05000244 (__SILICON_REVISION__ < 3)
+/* DCPLB_FAULT_ADDR MMR Register May Be Corrupted */
+#define ANOMALY_05000261 (__SILICON_REVISION__ < 3)
/* Anomalies that don't exist on this proc */
#define ANOMALY_05000099 (0)
diff --git a/arch/blackfin/mach-bf538/include/mach/gpio.h b/arch/blackfin/mach-bf538/include/mach/gpio.h
index 8a5beeece996..3561c7d8935b 100644
--- a/arch/blackfin/mach-bf538/include/mach/gpio.h
+++ b/arch/blackfin/mach-bf538/include/mach/gpio.h
@@ -8,7 +8,10 @@
#define _MACH_GPIO_H_
#define MAX_BLACKFIN_GPIOS 16
+#ifdef CONFIG_GPIOLIB
+/* We only use the special logic with GPIOLIB devices */
#define BFIN_SPECIAL_GPIO_BANKS 3
+#endif
#define GPIO_PF0 0 /* PF */
#define GPIO_PF1 1
diff --git a/arch/blackfin/mach-bf548/boards/cm_bf548.c b/arch/blackfin/mach-bf548/boards/cm_bf548.c
index d11502ac5623..212b9e0a08c8 100644
--- a/arch/blackfin/mach-bf548/boards/cm_bf548.c
+++ b/arch/blackfin/mach-bf548/boards/cm_bf548.c
@@ -861,16 +861,10 @@ static struct flash_platform_data bfin_spi_flash_data = {
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
};
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
-static struct bfin5xx_spi_chip spi_ad7877_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
-
static const struct ad7877_platform_data bfin_ad7877_ts_info = {
.model = 7877,
.vref_delay_usecs = 50, /* internal, no capacitor */
@@ -886,13 +880,6 @@ static const struct ad7877_platform_data bfin_ad7877_ts_info = {
};
#endif
-#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
-static struct bfin5xx_spi_chip spidev_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
-};
-#endif
-
static struct spi_board_info bf54x_spi_board_info[] __initdata = {
#if defined(CONFIG_MTD_M25P80) \
|| defined(CONFIG_MTD_M25P80_MODULE)
@@ -915,7 +902,6 @@ static struct spi_board_info bf54x_spi_board_info[] __initdata = {
.max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 2,
- .controller_data = &spi_ad7877_chip_info,
},
#endif
#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
@@ -924,7 +910,6 @@ static struct spi_board_info bf54x_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
- .controller_data = &spidev_chip_info,
},
#endif
};
diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c
index 311bf9970fe7..cd9cbb68de69 100644
--- a/arch/blackfin/mach-bf548/boards/ezkit.c
+++ b/arch/blackfin/mach-bf548/boards/ezkit.c
@@ -1018,24 +1018,10 @@ static struct flash_platform_data bfin_spi_flash_data = {
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \
- || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
-static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
};
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
-static struct bfin5xx_spi_chip spi_ad7877_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
-
static const struct ad7877_platform_data bfin_ad7877_ts_info = {
.model = 7877,
.vref_delay_usecs = 50, /* internal, no capacitor */
@@ -1051,20 +1037,6 @@ static const struct ad7877_platform_data bfin_ad7877_ts_info = {
};
#endif
-#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
-static struct bfin5xx_spi_chip spidev_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_INPUT_ADXL34X_SPI) || defined(CONFIG_INPUT_ADXL34X_SPI_MODULE)
-static struct bfin5xx_spi_chip spi_adxl34x_chip_info = {
- .enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
-};
-#endif
-
static struct spi_board_info bfin_spi_board_info[] __initdata = {
#if defined(CONFIG_MTD_M25P80) \
|| defined(CONFIG_MTD_M25P80_MODULE)
@@ -1086,7 +1058,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 1,
.chip_select = 4,
- .controller_data = &ad1836_spi_chip_info,
},
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
@@ -1097,7 +1068,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 2,
- .controller_data = &spi_ad7877_chip_info,
},
#endif
#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
@@ -1106,7 +1076,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
- .controller_data = &spidev_chip_info,
},
#endif
#if defined(CONFIG_INPUT_ADXL34X_SPI) || defined(CONFIG_INPUT_ADXL34X_SPI_MODULE)
@@ -1117,7 +1086,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 1,
.chip_select = 2,
- .controller_data = &spi_adxl34x_chip_info,
.mode = SPI_MODE_3,
},
#endif
diff --git a/arch/blackfin/mach-bf548/include/mach/anomaly.h b/arch/blackfin/mach-bf548/include/mach/anomaly.h
index 9e70785bdde3..ac96ee83b00e 100644
--- a/arch/blackfin/mach-bf548/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf548/include/mach/anomaly.h
@@ -11,7 +11,7 @@
*/
/* This file should be up to date with:
- * - Revision J, 06/03/2010; ADSP-BF542/BF544/BF547/BF548/BF549 Blackfin Processor Anomaly List
+ * - Revision K, 05/23/2011; ADSP-BF542/BF544/BF547/BF548/BF549 Blackfin Processor Anomaly List
*/
#ifndef _MACH_ANOMALY_H_
@@ -29,117 +29,37 @@
/* Rx.H Cannot Be Used to Access 16-bit System MMR Registers */
#define ANOMALY_05000122 (1)
/* Data Corruption/Core Hang with L2/L3 Configured in Writeback Cache Mode */
-#define ANOMALY_05000220 (1)
+#define ANOMALY_05000220 (__SILICON_REVISION__ < 4)
/* False Hardware Error from an Access in the Shadow of a Conditional Branch */
#define ANOMALY_05000245 (1)
/* Sensitivity To Noise with Slow Input Edge Rates on External SPORT TX and RX Clocks */
#define ANOMALY_05000265 (1)
/* Certain Data Cache Writethrough Modes Fail for Vddint <= 0.9V */
#define ANOMALY_05000272 (1)
-/* False Hardware Error Exception when ISR Context Is Not Restored */
-#define ANOMALY_05000281 (__SILICON_REVISION__ < 1)
-/* SSYNCs After Writes To CAN/DMA MMR Registers Are Not Always Handled Correctly */
-#define ANOMALY_05000304 (__SILICON_REVISION__ < 1)
/* False Hardware Errors Caused by Fetches at the Boundary of Reserved Memory */
#define ANOMALY_05000310 (1)
-/* Errors when SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */
-#define ANOMALY_05000312 (__SILICON_REVISION__ < 1)
-/* TWI Slave Boot Mode Is Not Functional */
-#define ANOMALY_05000324 (__SILICON_REVISION__ < 1)
/* FIFO Boot Mode Not Functional */
#define ANOMALY_05000325 (__SILICON_REVISION__ < 2)
-/* Data Lost When Core and DMA Accesses Are Made to the USB FIFO Simultaneously */
-#define ANOMALY_05000327 (__SILICON_REVISION__ < 1)
-/* Incorrect Access of OTP_STATUS During otp_write() Function */
-#define ANOMALY_05000328 (__SILICON_REVISION__ < 1)
-/* Synchronous Burst Flash Boot Mode Is Not Functional */
-#define ANOMALY_05000329 (__SILICON_REVISION__ < 1)
-/* Host DMA Boot Modes Are Not Functional */
-#define ANOMALY_05000330 (__SILICON_REVISION__ < 1)
-/* Inadequate Timing Margins on DDR DQS to DQ and DQM Skew */
-#define ANOMALY_05000334 (__SILICON_REVISION__ < 1)
-/* Inadequate Rotary Debounce Logic Duration */
-#define ANOMALY_05000335 (__SILICON_REVISION__ < 1)
-/* Phantom Interrupt Occurs After First Configuration of Host DMA Port */
-#define ANOMALY_05000336 (__SILICON_REVISION__ < 1)
-/* Disallowed Configuration Prevents Subsequent Allowed Configuration on Host DMA Port */
-#define ANOMALY_05000337 (__SILICON_REVISION__ < 1)
-/* Slave-Mode SPI0 MISO Failure With CPHA = 0 */
-#define ANOMALY_05000338 (__SILICON_REVISION__ < 1)
-/* If Memory Reads Are Enabled on SDH or HOSTDP, Other DMAC1 Peripherals Cannot Read */
-#define ANOMALY_05000340 (__SILICON_REVISION__ < 1)
-/* Boot Host Wait (HWAIT) and Boot Host Wait Alternate (HWAITA) Signals Are Swapped */
-#define ANOMALY_05000344 (__SILICON_REVISION__ < 1)
-/* USB Calibration Value Is Not Initialized */
-#define ANOMALY_05000346 (__SILICON_REVISION__ < 1)
-/* USB Calibration Value to use */
-#define ANOMALY_05000346_value 0x5411
-/* Preboot Routine Incorrectly Alters Reset Value of USB Register */
-#define ANOMALY_05000347 (__SILICON_REVISION__ < 1)
-/* Data Lost when Core Reads SDH Data FIFO */
-#define ANOMALY_05000349 (__SILICON_REVISION__ < 1)
-/* PLL Status Register Is Inaccurate */
-#define ANOMALY_05000351 (__SILICON_REVISION__ < 1)
/* bfrom_SysControl() Firmware Function Performs Improper System Reset */
/*
* Note: anomaly sheet says this is fixed with bf54x-0.2+, but testing
* shows that the fix itself does not cover all cases.
*/
#define ANOMALY_05000353 (1)
-/* Regulator Programming Blocked when Hibernate Wakeup Source Remains Active */
-#define ANOMALY_05000355 (__SILICON_REVISION__ < 1)
-/* System Stalled During A Core Access To AMC While A Core Access To NFC FIFO Is Required */
-#define ANOMALY_05000356 (__SILICON_REVISION__ < 1)
/* Serial Port (SPORT) Multichannel Transmit Failure when Channel 0 Is Disabled */
#define ANOMALY_05000357 (1)
/* External Memory Read Access Hangs Core With PLL Bypass */
#define ANOMALY_05000360 (1)
/* DMAs that Go Urgent during Tight Core Writes to External Memory Are Blocked */
#define ANOMALY_05000365 (1)
-/* WURESET Bit In SYSCR Register Does Not Properly Indicate Hibernate Wake-Up */
-#define ANOMALY_05000367 (__SILICON_REVISION__ < 1)
/* Addressing Conflict between Boot ROM and Asynchronous Memory */
#define ANOMALY_05000369 (1)
-/* Default PLL MSEL and SSEL Settings Can Cause 400MHz Product To Violate Specifications */
-#define ANOMALY_05000370 (__SILICON_REVISION__ < 1)
/* Possible RETS Register Corruption when Subroutine Is under 5 Cycles in Duration */
#define ANOMALY_05000371 (__SILICON_REVISION__ < 2)
-/* USB DP/DM Data Pins May Lose State When Entering Hibernate */
-#define ANOMALY_05000372 (__SILICON_REVISION__ < 1)
/* Security/Authentication Speedpath Causes Authentication To Fail To Initiate */
#define ANOMALY_05000378 (__SILICON_REVISION__ < 2)
/* 16-Bit NAND FLASH Boot Mode Is Not Functional */
#define ANOMALY_05000379 (1)
-/* 8-Bit NAND Flash Boot Mode Not Functional */
-#define ANOMALY_05000382 (__SILICON_REVISION__ < 1)
-/* Some ATAPI Modes Are Not Functional */
-#define ANOMALY_05000383 (1)
-/* Boot from OTP Memory Not Functional */
-#define ANOMALY_05000385 (__SILICON_REVISION__ < 1)
-/* bfrom_SysControl() Firmware Routine Not Functional */
-#define ANOMALY_05000386 (__SILICON_REVISION__ < 1)
-/* Programmable Preboot Settings Not Functional */
-#define ANOMALY_05000387 (__SILICON_REVISION__ < 1)
-/* CRC32 Checksum Support Not Functional */
-#define ANOMALY_05000388 (__SILICON_REVISION__ < 1)
-/* Reset Vector Must Not Be in SDRAM Memory Space */
-#define ANOMALY_05000389 (__SILICON_REVISION__ < 1)
-/* Changed Meaning of BCODE Field in SYSCR Register */
-#define ANOMALY_05000390 (__SILICON_REVISION__ < 1)
-/* Repeated Boot from Page-Mode or Burst-Mode Flash Memory May Fail */
-#define ANOMALY_05000391 (__SILICON_REVISION__ < 1)
-/* pTempCurrent Not Present in ADI_BOOT_DATA Structure */
-#define ANOMALY_05000392 (__SILICON_REVISION__ < 1)
-/* Deprecated Value of dTempByteCount in ADI_BOOT_DATA Structure */
-#define ANOMALY_05000393 (__SILICON_REVISION__ < 1)
-/* Log Buffer Not Functional */
-#define ANOMALY_05000394 (__SILICON_REVISION__ < 1)
-/* Hook Routine Not Functional */
-#define ANOMALY_05000395 (__SILICON_REVISION__ < 1)
-/* Header Indirect Bit Not Functional */
-#define ANOMALY_05000396 (__SILICON_REVISION__ < 1)
-/* BK_ONES, BK_ZEROS, and BK_DATECODE Constants Not Functional */
-#define ANOMALY_05000397 (__SILICON_REVISION__ < 1)
/* Lockbox SESR Disallows Certain User Interrupts */
#define ANOMALY_05000404 (__SILICON_REVISION__ < 2)
/* Lockbox SESR Firmware Does Not Save/Restore Full Context */
@@ -161,7 +81,7 @@
/* Speculative Fetches Can Cause Undesired External FIFO Operations */
#define ANOMALY_05000416 (1)
/* Multichannel SPORT Channel Misalignment Under Specific Configuration */
-#define ANOMALY_05000425 (1)
+#define ANOMALY_05000425 (__SILICON_REVISION__ < 4)
/* Speculative Fetches of Indirect-Pointer Instructions Can Cause False Hardware Errors */
#define ANOMALY_05000426 (1)
/* CORE_EPPI_PRIO bit and SYS_EPPI_PRIO bit in the HMDMA1_CONTROL register are not functional */
@@ -174,8 +94,6 @@
#define ANOMALY_05000431 (__SILICON_REVISION__ < 3)
/* SW Breakpoints Ignored Upon Return From Lockbox Authentication */
#define ANOMALY_05000434 (1)
-/* OTP Write Accesses Not Supported */
-#define ANOMALY_05000442 (__SILICON_REVISION__ < 1)
/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */
#define ANOMALY_05000443 (1)
/* CDMAPRIO and L2DMAPRIO Bits in the SYSCR Register Are Not Functional */
@@ -186,34 +104,32 @@
#define ANOMALY_05000448 (__SILICON_REVISION__ == 1)
/* Reduced Timing Margins on DDR Output Setup and Hold (tDS and tDH) */
#define ANOMALY_05000449 (__SILICON_REVISION__ == 1)
-/* USB DMA Mode 1 Short Packet Data Corruption */
+/* USB DMA Short Packet Data Corruption */
#define ANOMALY_05000450 (1)
-/* Incorrect Default Hysteresis Setting for RESET, NMI, and BMODE Signals */
-#define ANOMALY_05000452 (__SILICON_REVISION__ < 1)
/* USB Receive Interrupt Is Not Generated in DMA Mode 1 */
#define ANOMALY_05000456 (1)
/* Host DMA Port Responds to Certain Bus Activity Without HOST_CE Assertion */
#define ANOMALY_05000457 (1)
/* USB DMA Mode 1 Failure When Multiple USB DMA Channels Are Concurrently Enabled */
-#define ANOMALY_05000460 (1)
+#define ANOMALY_05000460 (__SILICON_REVISION__ < 4)
/* False Hardware Error when RETI Points to Invalid Memory */
#define ANOMALY_05000461 (1)
/* Synchronization Problem at Startup May Cause SPORT Transmit Channels to Misalign */
-#define ANOMALY_05000462 (1)
+#define ANOMALY_05000462 (__SILICON_REVISION__ < 4)
/* USB DMA RX Data Corruption */
-#define ANOMALY_05000463 (1)
+#define ANOMALY_05000463 (__SILICON_REVISION__ < 4)
/* USB TX DMA Hang */
-#define ANOMALY_05000464 (1)
-/* USB Rx DMA hang */
+#define ANOMALY_05000464 (__SILICON_REVISION__ < 4)
+/* USB Rx DMA Hang */
#define ANOMALY_05000465 (1)
/* TxPktRdy Bit Not Set for Transmit Endpoint When Core and DMA Access USB Endpoint FIFOs Simultaneously */
-#define ANOMALY_05000466 (1)
-/* Possible RX data corruption when control & data EP FIFOs are accessed via the core */
-#define ANOMALY_05000467 (1)
-/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */
+#define ANOMALY_05000466 (__SILICON_REVISION__ < 4)
+/* Possible USB RX Data Corruption When Control & Data EP FIFOs are Accessed via the Core */
+#define ANOMALY_05000467 (__SILICON_REVISION__ < 4)
+/* Interrupted SPORT Receive Data Register Read Results In Underflow when SLEN > 15 */
#define ANOMALY_05000473 (1)
-/* Access to DDR-SDRAM causes system hang under certain PLL/VR settings */
-#define ANOMALY_05000474 (1)
+/* Access to DDR SDRAM Causes System Hang with Certain PLL Settings */
+#define ANOMALY_05000474 (__SILICON_REVISION__ < 4)
/* TESTSET Instruction Cannot Be Interrupted */
#define ANOMALY_05000477 (1)
/* Reads of ITEST_COMMAND and ITEST_DATA Registers Cause Cache Corruption */
@@ -223,9 +139,111 @@
/* DDR Trim May Not Be Performed for Certain VLEV Values in OTP Page PBS00L */
#define ANOMALY_05000484 (__SILICON_REVISION__ < 3)
/* PLL_CTL Change Using bfrom_SysControl() Can Result in Processor Overclocking */
-#define ANOMALY_05000485 (__SILICON_REVISION__ >= 2)
-/* IFLUSH sucks at life */
+#define ANOMALY_05000485 (__SILICON_REVISION__ > 1 && __SILICON_REVISION__ < 4)
+/* PLL May Latch Incorrect Values Coming Out of Reset */
+#define ANOMALY_05000489 (1)
+/* SPI Master Boot Can Fail Under Certain Conditions */
+#define ANOMALY_05000490 (1)
+/* Instruction Memory Stalls Can Cause IFLUSH to Fail */
#define ANOMALY_05000491 (1)
+/* EXCPT Instruction May Be Lost If NMI Happens Simultaneously */
+#define ANOMALY_05000494 (1)
+/* CNT_COMMAND Functionality Depends on CNT_IMASK Configuration */
+#define ANOMALY_05000498 (1)
+/* Nand Flash Controller Hangs When the AMC Requests the Async Pins During the last 16 Bytes of a Page Write Operation. */
+#define ANOMALY_05000500 (1)
+/* RXS Bit in SPI_STAT May Become Stuck In RX DMA Modes */
+#define ANOMALY_05000501 (1)
+/* Async Memory Writes May Be Skipped When Using Odd Clock Ratios */
+#define ANOMALY_05000502 (1)
+
+/*
+ * These anomalies have been "phased" out of analog.com anomaly sheets and are
+ * here to show running on older silicon just isn't feasible.
+ */
+
+/* False Hardware Error when ISR Context Is Not Restored */
+#define ANOMALY_05000281 (__SILICON_REVISION__ < 1)
+/* SSYNCs After Writes To CAN/DMA MMR Registers Are Not Always Handled Correctly */
+#define ANOMALY_05000304 (__SILICON_REVISION__ < 1)
+/* Errors when SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */
+#define ANOMALY_05000312 (__SILICON_REVISION__ < 1)
+/* TWI Slave Boot Mode Is Not Functional */
+#define ANOMALY_05000324 (__SILICON_REVISION__ < 1)
+/* Data Lost When Core and DMA Accesses Are Made to the USB FIFO Simultaneously */
+#define ANOMALY_05000327 (__SILICON_REVISION__ < 1)
+/* Incorrect Access of OTP_STATUS During otp_write() Function */
+#define ANOMALY_05000328 (__SILICON_REVISION__ < 1)
+/* Synchronous Burst Flash Boot Mode Is Not Functional */
+#define ANOMALY_05000329 (__SILICON_REVISION__ < 1)
+/* Host DMA Boot Modes Are Not Functional */
+#define ANOMALY_05000330 (__SILICON_REVISION__ < 1)
+/* Inadequate Timing Margins on DDR DQS to DQ and DQM Skew */
+#define ANOMALY_05000334 (__SILICON_REVISION__ < 1)
+/* Inadequate Rotary Debounce Logic Duration */
+#define ANOMALY_05000335 (__SILICON_REVISION__ < 1)
+/* Phantom Interrupt Occurs After First Configuration of Host DMA Port */
+#define ANOMALY_05000336 (__SILICON_REVISION__ < 1)
+/* Disallowed Configuration Prevents Subsequent Allowed Configuration on Host DMA Port */
+#define ANOMALY_05000337 (__SILICON_REVISION__ < 1)
+/* Slave-Mode SPI0 MISO Failure With CPHA = 0 */
+#define ANOMALY_05000338 (__SILICON_REVISION__ < 1)
+/* If Memory Reads Are Enabled on SDH or HOSTDP, Other DMAC1 Peripherals Cannot Read */
+#define ANOMALY_05000340 (__SILICON_REVISION__ < 1)
+/* Boot Host Wait (HWAIT) and Boot Host Wait Alternate (HWAITA) Signals Are Swapped */
+#define ANOMALY_05000344 (__SILICON_REVISION__ < 1)
+/* USB Calibration Value Is Not Initialized */
+#define ANOMALY_05000346 (__SILICON_REVISION__ < 1)
+/* USB Calibration Value to use */
+#define ANOMALY_05000346_value 0x5411
+/* Preboot Routine Incorrectly Alters Reset Value of USB Register */
+#define ANOMALY_05000347 (__SILICON_REVISION__ < 1)
+/* Data Lost when Core Reads SDH Data FIFO */
+#define ANOMALY_05000349 (__SILICON_REVISION__ < 1)
+/* PLL Status Register Is Inaccurate */
+#define ANOMALY_05000351 (__SILICON_REVISION__ < 1)
+/* Regulator Programming Blocked when Hibernate Wakeup Source Remains Active */
+#define ANOMALY_05000355 (__SILICON_REVISION__ < 1)
+/* System Stalled During A Core Access To AMC While A Core Access To NFC FIFO Is Required */
+#define ANOMALY_05000356 (__SILICON_REVISION__ < 1)
+/* WURESET Bit In SYSCR Register Does Not Properly Indicate Hibernate Wake-Up */
+#define ANOMALY_05000367 (__SILICON_REVISION__ < 1)
+/* Default PLL MSEL and SSEL Settings Can Cause 400MHz Product To Violate Specifications */
+#define ANOMALY_05000370 (__SILICON_REVISION__ < 1)
+/* USB DP/DM Data Pins May Lose State When Entering Hibernate */
+#define ANOMALY_05000372 (__SILICON_REVISION__ < 1)
+/* 8-Bit NAND Flash Boot Mode Not Functional */
+#define ANOMALY_05000382 (__SILICON_REVISION__ < 1)
+/* Boot from OTP Memory Not Functional */
+#define ANOMALY_05000385 (__SILICON_REVISION__ < 1)
+/* bfrom_SysControl() Firmware Routine Not Functional */
+#define ANOMALY_05000386 (__SILICON_REVISION__ < 1)
+/* Programmable Preboot Settings Not Functional */
+#define ANOMALY_05000387 (__SILICON_REVISION__ < 1)
+/* CRC32 Checksum Support Not Functional */
+#define ANOMALY_05000388 (__SILICON_REVISION__ < 1)
+/* Reset Vector Must Not Be in SDRAM Memory Space */
+#define ANOMALY_05000389 (__SILICON_REVISION__ < 1)
+/* Changed Meaning of BCODE Field in SYSCR Register */
+#define ANOMALY_05000390 (__SILICON_REVISION__ < 1)
+/* Repeated Boot from Page-Mode or Burst-Mode Flash Memory May Fail */
+#define ANOMALY_05000391 (__SILICON_REVISION__ < 1)
+/* pTempCurrent Not Present in ADI_BOOT_DATA Structure */
+#define ANOMALY_05000392 (__SILICON_REVISION__ < 1)
+/* Deprecated Value of dTempByteCount in ADI_BOOT_DATA Structure */
+#define ANOMALY_05000393 (__SILICON_REVISION__ < 1)
+/* Log Buffer Not Functional */
+#define ANOMALY_05000394 (__SILICON_REVISION__ < 1)
+/* Hook Routine Not Functional */
+#define ANOMALY_05000395 (__SILICON_REVISION__ < 1)
+/* Header Indirect Bit Not Functional */
+#define ANOMALY_05000396 (__SILICON_REVISION__ < 1)
+/* BK_ONES, BK_ZEROS, and BK_DATECODE Constants Not Functional */
+#define ANOMALY_05000397 (__SILICON_REVISION__ < 1)
+/* OTP Write Accesses Not Supported */
+#define ANOMALY_05000442 (__SILICON_REVISION__ < 1)
+/* Incorrect Default Hysteresis Setting for RESET, NMI, and BMODE Signals */
+#define ANOMALY_05000452 (__SILICON_REVISION__ < 1)
/* Anomalies that don't exist on this proc */
#define ANOMALY_05000099 (0)
diff --git a/arch/blackfin/mach-bf548/include/mach/gpio.h b/arch/blackfin/mach-bf548/include/mach/gpio.h
index 7db433514e3f..35c8ced46158 100644
--- a/arch/blackfin/mach-bf548/include/mach/gpio.h
+++ b/arch/blackfin/mach-bf548/include/mach/gpio.h
@@ -170,6 +170,8 @@
#define MAX_BLACKFIN_GPIOS 160
+#define BFIN_GPIO_PINT 1
+
#ifndef __ASSEMBLY__
struct gpio_port_t {
diff --git a/arch/blackfin/mach-bf548/include/mach/irq.h b/arch/blackfin/mach-bf548/include/mach/irq.h
index 533b8095b540..10dc142c518d 100644
--- a/arch/blackfin/mach-bf548/include/mach/irq.h
+++ b/arch/blackfin/mach-bf548/include/mach/irq.h
@@ -438,7 +438,7 @@
struct bfin_pint_regs {
u32 mask_set;
u32 mask_clear;
- u32 irq;
+ u32 request;
u32 assign;
u32 edge_set;
u32 edge_clear;
diff --git a/arch/blackfin/mach-bf561/boards/acvilon.c b/arch/blackfin/mach-bf561/boards/acvilon.c
index 9231a942892b..972e1347c6bc 100644
--- a/arch/blackfin/mach-bf561/boards/acvilon.c
+++ b/arch/blackfin/mach-bf561/boards/acvilon.c
@@ -364,14 +364,6 @@ static struct flash_platform_data bfin_spi_dataflash_data = {
/* DataFlash chip */
static struct bfin5xx_spi_chip data_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip */
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
-static struct bfin5xx_spi_chip spidev_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
};
#endif
@@ -420,7 +412,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 3,
- .controller_data = &spidev_chip_info,
},
#endif
#if defined(CONFIG_MTD_DATAFLASH) || defined(CONFIG_MTD_DATAFLASH_MODULE)
diff --git a/arch/blackfin/mach-bf561/boards/cm_bf561.c b/arch/blackfin/mach-bf561/boards/cm_bf561.c
index 87595cd38afe..e4f397d1d65b 100644
--- a/arch/blackfin/mach-bf561/boards/cm_bf561.c
+++ b/arch/blackfin/mach-bf561/boards/cm_bf561.c
@@ -60,29 +60,6 @@ static struct flash_platform_data bfin_spi_flash_data = {
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
- .bits_per_word = 8,
-};
-#endif
-
-#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
-/* SPI ADC chip */
-static struct bfin5xx_spi_chip spi_adc_chip_info = {
- .enable_dma = 1, /* use dma transfer with this chip*/
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
-static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
-static struct bfin5xx_spi_chip mmc_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
};
#endif
@@ -100,24 +77,12 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
},
#endif
-#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
- {
- .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
- .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
- .bus_num = 0, /* Framework bus number */
- .chip_select = 1, /* Framework chip select. */
- .platform_data = NULL, /* No spi_driver specific config */
- .controller_data = &spi_adc_chip_info,
- },
-#endif
-
#if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
{
.modalias = "ad183x",
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 4,
- .controller_data = &ad1836_spi_chip_info,
},
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
@@ -126,7 +91,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
- .controller_data = &mmc_spi_chip_info,
.mode = SPI_MODE_3,
},
#endif
@@ -532,6 +496,24 @@ static struct platform_device *cm_bf561_devices[] __initdata = {
#endif
};
+static int __init net2272_init(void)
+{
+#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
+ int ret;
+
+ ret = gpio_request(GPIO_PF46, "net2272");
+ if (ret)
+ return ret;
+
+ /* Reset USB Chip, PF46 */
+ gpio_direction_output(GPIO_PF46, 0);
+ mdelay(2);
+ gpio_set_value(GPIO_PF46, 1);
+#endif
+
+ return 0;
+}
+
static int __init cm_bf561_init(void)
{
printk(KERN_INFO "%s(): registering device resources\n", __func__);
@@ -543,6 +525,10 @@ static int __init cm_bf561_init(void)
#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
irq_set_status_flags(PATA_INT, IRQ_NOAUTOEN);
#endif
+
+ if (net2272_init())
+ pr_warning("unable to configure net2272; it probably won't work\n");
+
return 0;
}
diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c
index 5067984a62e7..9490dc800ca5 100644
--- a/arch/blackfin/mach-bf561/boards/ezkit.c
+++ b/arch/blackfin/mach-bf561/boards/ezkit.c
@@ -108,6 +108,9 @@ static struct resource net2272_bfin_resources[] = {
.end = 0x2C000000 + 0x7F,
.flags = IORESOURCE_MEM,
}, {
+ .start = 1,
+ .flags = IORESOURCE_BUS,
+ }, {
.start = IRQ_PF10,
.end = IRQ_PF10,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
@@ -283,21 +286,6 @@ static struct platform_device ezkit_flash_device = {
};
#endif
-#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \
- || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
-static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 16,
-};
-#endif
-
-#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
-static struct bfin5xx_spi_chip spidev_chip_info = {
- .enable_dma = 0,
- .bits_per_word = 8,
-};
-#endif
-
#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
/* SPI (0) */
static struct resource bfin_spi0_resource[] = {
@@ -345,7 +333,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.bus_num = 0,
.chip_select = 4,
.platform_data = "ad1836", /* only includes chip name for the moment */
- .controller_data = &ad1836_spi_chip_info,
.mode = SPI_MODE_3,
},
#endif
@@ -355,7 +342,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
- .controller_data = &spidev_chip_info,
},
#endif
};
@@ -516,6 +502,24 @@ static struct platform_device *ezkit_devices[] __initdata = {
#endif
};
+static int __init net2272_init(void)
+{
+#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
+ int ret;
+
+ ret = gpio_request(GPIO_PF11, "net2272");
+ if (ret)
+ return ret;
+
+ /* Reset the USB chip */
+ gpio_direction_output(GPIO_PF11, 0);
+ mdelay(2);
+ gpio_set_value(GPIO_PF11, 1);
+#endif
+
+ return 0;
+}
+
static int __init ezkit_init(void)
{
int ret;
@@ -542,6 +546,9 @@ static int __init ezkit_init(void)
udelay(400);
#endif
+ if (net2272_init())
+ pr_warning("unable to configure net2272; it probably won't work\n");
+
spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
return 0;
}
diff --git a/arch/blackfin/mach-bf561/include/mach/anomaly.h b/arch/blackfin/mach-bf561/include/mach/anomaly.h
index 22b5ab773027..836baeed303a 100644
--- a/arch/blackfin/mach-bf561/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf561/include/mach/anomaly.h
@@ -11,7 +11,7 @@
*/
/* This file should be up to date with:
- * - Revision R, 05/25/2010; ADSP-BF561 Blackfin Processor Anomaly List
+ * - Revision S, 05/23/2011; ADSP-BF561 Blackfin Processor Anomaly List
*/
#ifndef _MACH_ANOMALY_H_
@@ -26,62 +26,16 @@
#define ANOMALY_05000074 (1)
/* UART Line Status Register (UART_LSR) Bits Are Not Updated at the Same Time */
#define ANOMALY_05000099 (__SILICON_REVISION__ < 5)
-/* Trace Buffers May Contain Errors in Emulation Mode and/or Exception, NMI, Reset Handlers */
-#define ANOMALY_05000116 (__SILICON_REVISION__ < 3)
/* TESTSET Instructions Restricted to 32-Bit Aligned Memory Locations */
#define ANOMALY_05000120 (1)
/* Rx.H Cannot Be Used to Access 16-bit System MMR Registers */
#define ANOMALY_05000122 (1)
-/* Erroneous Exception when Enabling Cache */
-#define ANOMALY_05000125 (__SILICON_REVISION__ < 3)
/* SIGNBITS Instruction Not Functional under Certain Conditions */
#define ANOMALY_05000127 (1)
-/* Two bits in the Watchpoint Status Register (WPSTAT) are swapped */
-#define ANOMALY_05000134 (__SILICON_REVISION__ < 3)
-/* Enable wires from the Data Watchpoint Address Control Register (WPDACTL) are swapped */
-#define ANOMALY_05000135 (__SILICON_REVISION__ < 3)
-/* Stall in multi-unit DMA operations */
-#define ANOMALY_05000136 (__SILICON_REVISION__ < 3)
-/* Allowing the SPORT RX FIFO to fill will cause an overflow */
-#define ANOMALY_05000140 (__SILICON_REVISION__ < 3)
-/* Infinite Stall may occur with a particular sequence of consecutive dual dag events */
-#define ANOMALY_05000141 (__SILICON_REVISION__ < 3)
-/* Interrupts may be lost when a programmable input flag is configured to be edge sensitive */
-#define ANOMALY_05000142 (__SILICON_REVISION__ < 3)
-/* DMA and TESTSET conflict when both are accessing external memory */
-#define ANOMALY_05000144 (__SILICON_REVISION__ < 3)
-/* In PWM_OUT mode, you must enable the PPI block to generate a waveform from PPI_CLK */
-#define ANOMALY_05000145 (__SILICON_REVISION__ < 3)
-/* MDMA may lose the first few words of a descriptor chain */
-#define ANOMALY_05000146 (__SILICON_REVISION__ < 3)
-/* Source MDMA descriptor may stop with a DMA Error near beginning of descriptor fetch */
-#define ANOMALY_05000147 (__SILICON_REVISION__ < 3)
/* IMDMA S1/D1 Channel May Stall */
#define ANOMALY_05000149 (1)
-/* DMA engine may lose data due to incorrect handshaking */
-#define ANOMALY_05000150 (__SILICON_REVISION__ < 3)
-/* DMA stalls when all three controllers read data from the same source */
-#define ANOMALY_05000151 (__SILICON_REVISION__ < 3)
-/* Execution stall when executing in L2 and doing external accesses */
-#define ANOMALY_05000152 (__SILICON_REVISION__ < 3)
-/* Frame Delay in SPORT Multichannel Mode */
-#define ANOMALY_05000153 (__SILICON_REVISION__ < 3)
-/* SPORT TFS signal stays active in multichannel mode outside of valid channels */
-#define ANOMALY_05000154 (__SILICON_REVISION__ < 3)
/* Timers in PWM-Out Mode with PPI GP Receive (Input) Mode with 0 Frame Syncs */
#define ANOMALY_05000156 (__SILICON_REVISION__ < 4)
-/* Killed 32-Bit MMR Write Leads to Next System MMR Access Thinking It Should Be 32-Bit */
-#define ANOMALY_05000157 (__SILICON_REVISION__ < 3)
-/* DMA Lock-up at CCLK to SCLK ratios of 4:1, 2:1, or 1:1 */
-#define ANOMALY_05000159 (__SILICON_REVISION__ < 3)
-/* A read from external memory may return a wrong value with data cache enabled */
-#define ANOMALY_05000160 (__SILICON_REVISION__ < 3)
-/* Data Cache Fill data can be corrupted after/during Instruction DMA if certain core stalls exist */
-#define ANOMALY_05000161 (__SILICON_REVISION__ < 3)
-/* DMEM_CONTROL<12> is not set on Reset */
-#define ANOMALY_05000162 (__SILICON_REVISION__ < 3)
-/* SPORT Transmit Data Is Not Gated by External Frame Sync in Certain Conditions */
-#define ANOMALY_05000163 (__SILICON_REVISION__ < 3)
/* PPI Data Lengths between 8 and 16 Do Not Zero Out Upper Bits */
#define ANOMALY_05000166 (1)
/* Turning SPORTs on while External Frame Sync Is Active May Corrupt Data */
@@ -92,10 +46,6 @@
#define ANOMALY_05000169 (__SILICON_REVISION__ < 5)
/* Boot-ROM Modifies SICA_IWRx Wakeup Registers */
#define ANOMALY_05000171 (__SILICON_REVISION__ < 5)
-/* DSPID register values incorrect */
-#define ANOMALY_05000172 (__SILICON_REVISION__ < 3)
-/* DMA vs Core accesses to external memory */
-#define ANOMALY_05000173 (__SILICON_REVISION__ < 3)
/* Cache Fill Buffer Data lost */
#define ANOMALY_05000174 (__SILICON_REVISION__ < 5)
/* Overlapping Sequencer and Memory Stalls */
@@ -124,8 +74,6 @@
#define ANOMALY_05000189 (__SILICON_REVISION__ < 5)
/* PPI Not Functional at Core Voltage < 1Volt */
#define ANOMALY_05000190 (1)
-/* PPI does not invert the Driving PPICLK edge in Transmit Modes */
-#define ANOMALY_05000191 (__SILICON_REVISION__ < 3)
/* False I/O Pin Interrupts on Edge-Sensitive Inputs When Polarity Setting Is Changed */
#define ANOMALY_05000193 (__SILICON_REVISION__ < 5)
/* Restarting SPORT in Specific Modes May Cause Data Corruption */
@@ -217,10 +165,10 @@
/* Timing Requirements Change for External Frame Sync PPI Modes with Non-Zero PPI_DELAY */
#define ANOMALY_05000276 (__SILICON_REVISION__ < 5)
/* Writes to an I/O Data Register One SCLK Cycle after an Edge Is Detected May Clear Interrupt */
-#define ANOMALY_05000277 (__SILICON_REVISION__ < 3)
+#define ANOMALY_05000277 (__SILICON_REVISION__ < 5)
/* Disabling Peripherals with DMA Running May Cause DMA System Instability */
#define ANOMALY_05000278 (__SILICON_REVISION__ < 5)
-/* False Hardware Error Exception when ISR Context Is Not Restored */
+/* False Hardware Error when ISR Context Is Not Restored */
/* Temporarily walk around for bug 5423 till this issue is confirmed by
* official anomaly document. It looks 05000281 still exists on bf561
* v0.5.
@@ -274,8 +222,6 @@
#define ANOMALY_05000366 (1)
/* Possible RETS Register Corruption when Subroutine Is under 5 Cycles in Duration */
#define ANOMALY_05000371 (1)
-/* SSYNC Stalls Processor when Executed from Non-Cacheable Memory */
-#define ANOMALY_05000402 (__SILICON_REVISION__ == 4)
/* Level-Sensitive External GPIO Wakeups May Cause Indefinite Stall */
#define ANOMALY_05000403 (1)
/* TESTSET Instruction Causes Data Corruption with Writeback Data Cache Enabled */
@@ -298,16 +244,82 @@
#define ANOMALY_05000462 (1)
/* Boot Failure When SDRAM Control Signals Toggle Coming Out Of Reset */
#define ANOMALY_05000471 (1)
-/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */
+/* Interrupted SPORT Receive Data Register Read Results In Underflow when SLEN > 15 */
#define ANOMALY_05000473 (1)
-/* Possible Lockup Condition whem Modifying PLL from External Memory */
+/* Possible Lockup Condition when Modifying PLL from External Memory */
#define ANOMALY_05000475 (1)
/* TESTSET Instruction Cannot Be Interrupted */
#define ANOMALY_05000477 (1)
/* Reads of ITEST_COMMAND and ITEST_DATA Registers Cause Cache Corruption */
#define ANOMALY_05000481 (1)
-/* IFLUSH sucks at life */
+/* PLL May Latch Incorrect Values Coming Out of Reset */
+#define ANOMALY_05000489 (1)
+/* Instruction Memory Stalls Can Cause IFLUSH to Fail */
#define ANOMALY_05000491 (1)
+/* EXCPT Instruction May Be Lost If NMI Happens Simultaneously */
+#define ANOMALY_05000494 (1)
+/* RXS Bit in SPI_STAT May Become Stuck In RX DMA Modes */
+#define ANOMALY_05000501 (1)
+
+/*
+ * These anomalies have been "phased" out of analog.com anomaly sheets and are
+ * here to show running on older silicon just isn't feasible.
+ */
+
+/* Trace Buffers May Contain Errors in Emulation Mode and/or Exception, NMI, Reset Handlers */
+#define ANOMALY_05000116 (__SILICON_REVISION__ < 3)
+/* Erroneous Exception when Enabling Cache */
+#define ANOMALY_05000125 (__SILICON_REVISION__ < 3)
+/* Two bits in the Watchpoint Status Register (WPSTAT) are swapped */
+#define ANOMALY_05000134 (__SILICON_REVISION__ < 3)
+/* Enable wires from the Data Watchpoint Address Control Register (WPDACTL) are swapped */
+#define ANOMALY_05000135 (__SILICON_REVISION__ < 3)
+/* Stall in multi-unit DMA operations */
+#define ANOMALY_05000136 (__SILICON_REVISION__ < 3)
+/* Allowing the SPORT RX FIFO to fill will cause an overflow */
+#define ANOMALY_05000140 (__SILICON_REVISION__ < 3)
+/* Infinite Stall may occur with a particular sequence of consecutive dual dag events */
+#define ANOMALY_05000141 (__SILICON_REVISION__ < 3)
+/* Interrupts may be lost when a programmable input flag is configured to be edge sensitive */
+#define ANOMALY_05000142 (__SILICON_REVISION__ < 3)
+/* DMA and TESTSET conflict when both are accessing external memory */
+#define ANOMALY_05000144 (__SILICON_REVISION__ < 3)
+/* In PWM_OUT mode, you must enable the PPI block to generate a waveform from PPI_CLK */
+#define ANOMALY_05000145 (__SILICON_REVISION__ < 3)
+/* MDMA may lose the first few words of a descriptor chain */
+#define ANOMALY_05000146 (__SILICON_REVISION__ < 3)
+/* Source MDMA descriptor may stop with a DMA Error near beginning of descriptor fetch */
+#define ANOMALY_05000147 (__SILICON_REVISION__ < 3)
+/* DMA engine may lose data due to incorrect handshaking */
+#define ANOMALY_05000150 (__SILICON_REVISION__ < 3)
+/* DMA stalls when all three controllers read data from the same source */
+#define ANOMALY_05000151 (__SILICON_REVISION__ < 3)
+/* Execution stall when executing in L2 and doing external accesses */
+#define ANOMALY_05000152 (__SILICON_REVISION__ < 3)
+/* Frame Delay in SPORT Multichannel Mode */
+#define ANOMALY_05000153 (__SILICON_REVISION__ < 3)
+/* SPORT TFS signal stays active in multichannel mode outside of valid channels */
+#define ANOMALY_05000154 (__SILICON_REVISION__ < 3)
+/* Killed 32-Bit MMR Write Leads to Next System MMR Access Thinking It Should Be 32-Bit */
+#define ANOMALY_05000157 (__SILICON_REVISION__ < 3)
+/* DMA Lock-up at CCLK to SCLK ratios of 4:1, 2:1, or 1:1 */
+#define ANOMALY_05000159 (__SILICON_REVISION__ < 3)
+/* A read from external memory may return a wrong value with data cache enabled */
+#define ANOMALY_05000160 (__SILICON_REVISION__ < 3)
+/* Data Cache Fill data can be corrupted after/during Instruction DMA if certain core stalls exist */
+#define ANOMALY_05000161 (__SILICON_REVISION__ < 3)
+/* DMEM_CONTROL<12> is not set on Reset */
+#define ANOMALY_05000162 (__SILICON_REVISION__ < 3)
+/* SPORT Transmit Data Is Not Gated by External Frame Sync in Certain Conditions */
+#define ANOMALY_05000163 (__SILICON_REVISION__ < 3)
+/* DSPID register values incorrect */
+#define ANOMALY_05000172 (__SILICON_REVISION__ < 3)
+/* DMA vs Core accesses to external memory */
+#define ANOMALY_05000173 (__SILICON_REVISION__ < 3)
+/* PPI does not invert the Driving PPICLK edge in Transmit Modes */
+#define ANOMALY_05000191 (__SILICON_REVISION__ < 3)
+/* SSYNC Stalls Processor when Executed from Non-Cacheable Memory */
+#define ANOMALY_05000402 (__SILICON_REVISION__ == 4)
/* Anomalies that don't exist on this proc */
#define ANOMALY_05000119 (0)
diff --git a/arch/blackfin/mach-bf561/include/mach/gpio.h b/arch/blackfin/mach-bf561/include/mach/gpio.h
index 57d5eab59faf..f9f8b2adf4ba 100644
--- a/arch/blackfin/mach-bf561/include/mach/gpio.h
+++ b/arch/blackfin/mach-bf561/include/mach/gpio.h
@@ -58,9 +58,9 @@
#define GPIO_PF46 46
#define GPIO_PF47 47
-#define PORT_FIO0 GPIO_0
-#define PORT_FIO1 GPIO_16
-#define PORT_FIO2 GPIO_32
+#define PORT_FIO0 GPIO_PF0
+#define PORT_FIO1 GPIO_PF16
+#define PORT_FIO2 GPIO_PF32
#include <mach-common/ports-f.h>
diff --git a/arch/blackfin/mach-bf561/secondary.S b/arch/blackfin/mach-bf561/secondary.S
index 4c462838f4e1..01e5408620ac 100644
--- a/arch/blackfin/mach-bf561/secondary.S
+++ b/arch/blackfin/mach-bf561/secondary.S
@@ -23,108 +23,78 @@
#define INITIAL_STACK (COREB_L1_SCRATCH_START + L1_SCRATCH_LENGTH - 12)
ENTRY(_coreb_trampoline_start)
- /* Set the SYSCFG register */
- R0 = 0x36;
- SYSCFG = R0; /*Enable Cycle Counter and Nesting Of Interrupts(3rd Bit)*/
- R0 = 0;
-
- /*Clear Out All the data and pointer Registers*/
- R1 = R0;
- R2 = R0;
- R3 = R0;
- R4 = R0;
- R5 = R0;
- R6 = R0;
- R7 = R0;
-
- P0 = R0;
- P1 = R0;
- P2 = R0;
- P3 = R0;
- P4 = R0;
- P5 = R0;
-
- LC0 = r0;
- LC1 = r0;
- L0 = r0;
- L1 = r0;
- L2 = r0;
- L3 = r0;
-
- /* Clear Out All the DAG Registers*/
- B0 = r0;
- B1 = r0;
- B2 = r0;
- B3 = r0;
-
- I0 = r0;
- I1 = r0;
- I2 = r0;
- I3 = r0;
-
- M0 = r0;
- M1 = r0;
- M2 = r0;
- M3 = r0;
+ /* Enable Cycle Counter and Nesting Of Interrupts */
+#ifdef CONFIG_BFIN_SCRATCH_REG_CYCLES
+ R0 = SYSCFG_SNEN;
+#else
+ R0 = SYSCFG_SNEN | SYSCFG_CCEN;
+#endif
+ SYSCFG = R0;
- trace_buffer_init(p0,r0);
+ /* Optimization register tricks: keep a base value in the
+ * reserved P registers so we use the load/store with an
+ * offset syntax. R0 = [P5 + <constant>];
+ * P5 - core MMR base
+ * R6 - 0
+ */
+ r6 = 0;
+ p5.l = 0;
+ p5.h = hi(COREMMR_BASE);
- /* Turn off the icache */
- p0.l = LO(IMEM_CONTROL);
- p0.h = HI(IMEM_CONTROL);
- R1 = [p0];
- R0 = ~ENICPLB;
- R0 = R0 & R1;
+ /* Zero out registers required by Blackfin ABI */
- /* Disabling of CPLBs should be proceeded by a CSYNC */
+ /* Disable circular buffers */
+ L0 = r6;
+ L1 = r6;
+ L2 = r6;
+ L3 = r6;
+
+ /* Disable hardware loops in case we were started by 'go' */
+ LC0 = r6;
+ LC1 = r6;
+
+ /*
+ * Clear ITEST_COMMAND and DTEST_COMMAND registers,
+ * Leaving these as non-zero can confuse the emulator
+ */
+ [p5 + (DTEST_COMMAND - COREMMR_BASE)] = r6;
+ [p5 + (ITEST_COMMAND - COREMMR_BASE)] = r6;
CSYNC;
- [p0] = R0;
+
+ trace_buffer_init(p0,r0);
+
+ /* Turn off the icache */
+ r1 = [p5 + (IMEM_CONTROL - COREMMR_BASE)];
+ BITCLR (r1, ENICPLB_P);
+ [p5 + (IMEM_CONTROL - COREMMR_BASE)] = r1;
SSYNC;
/* Turn off the dcache */
- p0.l = LO(DMEM_CONTROL);
- p0.h = HI(DMEM_CONTROL);
- R1 = [p0];
- R0 = ~ENDCPLB;
- R0 = R0 & R1;
-
- /* Disabling of CPLBs should be proceeded by a CSYNC */
- CSYNC;
- [p0] = R0;
+ r1 = [p5 + (DMEM_CONTROL - COREMMR_BASE)];
+ BITCLR (r1, ENDCPLB_P);
+ [p5 + (DMEM_CONTROL - COREMMR_BASE)] = r1;
SSYNC;
/* in case of double faults, save a few things */
- p0.l = _init_retx_coreb;
- p0.h = _init_retx_coreb;
- R0 = RETX;
- [P0] = R0;
-
+ p1.l = _initial_pda_coreb;
+ p1.h = _initial_pda_coreb;
+ r4 = RETX;
#ifdef CONFIG_DEBUG_DOUBLEFAULT
/* Only save these if we are storing them,
* This happens here, since L1 gets clobbered
* below
*/
GET_PDA(p0, r0);
- r7 = [p0 + PDA_DF_RETX];
- p1.l = _init_saved_retx_coreb;
- p1.h = _init_saved_retx_coreb;
- [p1] = r7;
-
- r7 = [p0 + PDA_DF_DCPLB];
- p1.l = _init_saved_dcplb_fault_addr_coreb;
- p1.h = _init_saved_dcplb_fault_addr_coreb;
- [p1] = r7;
-
- r7 = [p0 + PDA_DF_ICPLB];
- p1.l = _init_saved_icplb_fault_addr_coreb;
- p1.h = _init_saved_icplb_fault_addr_coreb;
- [p1] = r7;
-
- r7 = [p0 + PDA_DF_SEQSTAT];
- p1.l = _init_saved_seqstat_coreb;
- p1.h = _init_saved_seqstat_coreb;
- [p1] = r7;
+ r0 = [p0 + PDA_DF_RETX];
+ r1 = [p0 + PDA_DF_DCPLB];
+ r2 = [p0 + PDA_DF_ICPLB];
+ r3 = [p0 + PDA_DF_SEQSTAT];
+ [p1 + PDA_INIT_DF_RETX] = r0;
+ [p1 + PDA_INIT_DF_DCPLB] = r1;
+ [p1 + PDA_INIT_DF_ICPLB] = r2;
+ [p1 + PDA_INIT_DF_SEQSTAT] = r3;
#endif
+ [p1 + PDA_INIT_RETX] = r4;
/* Initialize stack pointer */
sp.l = lo(INITIAL_STACK);
@@ -138,19 +108,13 @@ ENTRY(_coreb_trampoline_start)
/* EVT15 = _real_start */
- p0.l = lo(EVT15);
- p0.h = hi(EVT15);
p1.l = _coreb_start;
p1.h = _coreb_start;
- [p0] = p1;
+ [p5 + (EVT15 - COREMMR_BASE)] = p1;
csync;
- p0.l = lo(IMASK);
- p0.h = hi(IMASK);
- p1.l = IMASK_IVG15;
- p1.h = 0x0;
- [p0] = p1;
- csync;
+ r0 = EVT_IVG15 (z);
+ sti r0;
raise 15;
p0.l = .LWAIT_HERE;
diff --git a/arch/blackfin/mach-common/dpmc_modes.S b/arch/blackfin/mach-common/dpmc_modes.S
index 9cfdd49a3127..1c534d298de4 100644
--- a/arch/blackfin/mach-common/dpmc_modes.S
+++ b/arch/blackfin/mach-common/dpmc_modes.S
@@ -12,8 +12,8 @@
.section .l1.text
ENTRY(_sleep_mode)
- [--SP] = ( R7:0, P5:0 );
- [--SP] = RETS;
+ [--SP] = (R7:4, P5:3);
+ [--SP] = RETS;
call _set_sic_iwr;
@@ -46,15 +46,25 @@ ENTRY(_sleep_mode)
call _test_pll_locked;
RETS = [SP++];
- ( R7:0, P5:0 ) = [SP++];
+ (R7:4, P5:3) = [SP++];
RTS;
ENDPROC(_sleep_mode)
+/*
+ * This func never returns as it puts the part into hibernate, and
+ * is only called from do_hibernate, so we don't bother saving or
+ * restoring any of the normal C runtime state. When we wake up,
+ * the entry point will be in do_hibernate and not here.
+ *
+ * We accept just one argument -- the value to write to VR_CTL.
+ */
ENTRY(_hibernate_mode)
- [--SP] = ( R7:0, P5:0 );
- [--SP] = RETS;
+ /* Save/setup the regs we need early for minor pipeline optimization */
+ R4 = R0;
+ P3.H = hi(VR_CTL);
+ P3.L = lo(VR_CTL);
- R3 = R0;
+ /* Disable all wakeup sources */
R0 = IWR_DISABLE_ALL;
R1 = IWR_DISABLE_ALL;
R2 = IWR_DISABLE_ALL;
@@ -62,10 +72,8 @@ ENTRY(_hibernate_mode)
call _set_dram_srfs;
SSYNC;
- P0.H = hi(VR_CTL);
- P0.L = lo(VR_CTL);
-
- W[P0] = R3.L;
+ /* Finally, we climb into our cave to hibernate */
+ W[P3] = R4.L;
CLI R2;
IDLE;
.Lforever:
@@ -73,8 +81,8 @@ ENTRY(_hibernate_mode)
ENDPROC(_hibernate_mode)
ENTRY(_sleep_deeper)
- [--SP] = ( R7:0, P5:0 );
- [--SP] = RETS;
+ [--SP] = (R7:4, P5:3);
+ [--SP] = RETS;
CLI R4;
@@ -167,7 +175,7 @@ ENTRY(_sleep_deeper)
STI R4;
RETS = [SP++];
- ( R7:0, P5:0 ) = [SP++];
+ (R7:4, P5:3) = [SP++];
RTS;
ENDPROC(_sleep_deeper)
@@ -188,21 +196,20 @@ ENTRY(_set_dram_srfs)
#else /* SDRAM */
P0.L = lo(EBIU_SDGCTL);
P0.H = hi(EBIU_SDGCTL);
+ P1.L = lo(EBIU_SDSTAT);
+ P1.H = hi(EBIU_SDSTAT);
+
R2 = [P0];
BITSET(R2, 24); /* SRFS enter self-refresh mode */
[P0] = R2;
SSYNC;
- P0.L = lo(EBIU_SDSTAT);
- P0.H = hi(EBIU_SDSTAT);
1:
- R2 = w[P0];
+ R2 = w[P1];
SSYNC;
cc = BITTST(R2, 1); /* SDSRA poll self-refresh status */
if !cc jump 1b;
- P0.L = lo(EBIU_SDGCTL);
- P0.H = hi(EBIU_SDGCTL);
R2 = [P0];
BITCLR(R2, 0); /* SCTLE disable CLKOUT */
[P0] = R2;
@@ -212,6 +219,7 @@ ENDPROC(_set_dram_srfs)
ENTRY(_unset_dram_srfs)
/* set the dram out of self refresh mode */
+
#if defined(EBIU_RSTCTL) /* DDR */
P0.H = hi(EBIU_RSTCTL);
P0.L = lo(EBIU_RSTCTL);
@@ -219,42 +227,39 @@ ENTRY(_unset_dram_srfs)
BITCLR(R2, 3); /* clear SRREQ bit */
[P0] = R2;
#elif defined(EBIU_SDGCTL) /* SDRAM */
-
- P0.L = lo(EBIU_SDGCTL); /* release CLKOUT from self-refresh */
+ /* release CLKOUT from self-refresh */
+ P0.L = lo(EBIU_SDGCTL);
P0.H = hi(EBIU_SDGCTL);
+
R2 = [P0];
BITSET(R2, 0); /* SCTLE enable CLKOUT */
[P0] = R2
SSYNC;
- P0.L = lo(EBIU_SDGCTL); /* release SDRAM from self-refresh */
- P0.H = hi(EBIU_SDGCTL);
+ /* release SDRAM from self-refresh */
R2 = [P0];
BITCLR(R2, 24); /* clear SRFS bit */
[P0] = R2
#endif
+
SSYNC;
RTS;
ENDPROC(_unset_dram_srfs)
ENTRY(_set_sic_iwr)
-#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561) || \
- defined(CONFIG_BF538) || defined(CONFIG_BF539) || defined(CONFIG_BF51x)
- P0.H = hi(SIC_IWR0);
- P0.L = lo(SIC_IWR0);
- P1.H = hi(SIC_IWR1);
- P1.L = lo(SIC_IWR1);
- [P1] = R1;
-#if defined(CONFIG_BF54x)
- P1.H = hi(SIC_IWR2);
- P1.L = lo(SIC_IWR2);
- [P1] = R2;
-#endif
+#ifdef SIC_IWR0
+ P0.H = hi(SYSMMR_BASE);
+ P0.L = lo(SYSMMR_BASE);
+ [P0 + (SIC_IWR0 - SYSMMR_BASE)] = R0;
+ [P0 + (SIC_IWR1 - SYSMMR_BASE)] = R1;
+# ifdef SIC_IWR2
+ [P0 + (SIC_IWR2 - SYSMMR_BASE)] = R2;
+# endif
#else
P0.H = hi(SIC_IWR);
P0.L = lo(SIC_IWR);
-#endif
[P0] = R0;
+#endif
SSYNC;
RTS;
@@ -272,206 +277,55 @@ ENDPROC(_test_pll_locked)
.section .text
-ENTRY(_do_hibernate)
- [--SP] = ( R7:0, P5:0 );
- [--SP] = RETS;
- /* Save System MMRs */
- R2 = R0;
- P0.H = hi(PLL_CTL);
- P0.L = lo(PLL_CTL);
-
-#ifdef SIC_IMASK0
- PM_SYS_PUSH(SIC_IMASK0)
-#endif
-#ifdef SIC_IMASK1
- PM_SYS_PUSH(SIC_IMASK1)
-#endif
-#ifdef SIC_IMASK2
- PM_SYS_PUSH(SIC_IMASK2)
-#endif
-#ifdef SIC_IMASK
- PM_SYS_PUSH(SIC_IMASK)
-#endif
-#ifdef SIC_IAR0
- PM_SYS_PUSH(SIC_IAR0)
- PM_SYS_PUSH(SIC_IAR1)
- PM_SYS_PUSH(SIC_IAR2)
-#endif
-#ifdef SIC_IAR3
- PM_SYS_PUSH(SIC_IAR3)
-#endif
-#ifdef SIC_IAR4
- PM_SYS_PUSH(SIC_IAR4)
- PM_SYS_PUSH(SIC_IAR5)
- PM_SYS_PUSH(SIC_IAR6)
-#endif
-#ifdef SIC_IAR7
- PM_SYS_PUSH(SIC_IAR7)
-#endif
-#ifdef SIC_IAR8
- PM_SYS_PUSH(SIC_IAR8)
- PM_SYS_PUSH(SIC_IAR9)
- PM_SYS_PUSH(SIC_IAR10)
- PM_SYS_PUSH(SIC_IAR11)
-#endif
+#define PM_REG0 R7
+#define PM_REG1 R6
+#define PM_REG2 R5
+#define PM_REG3 R4
+#define PM_REG4 R3
+#define PM_REG5 R2
+#define PM_REG6 R1
+#define PM_REG7 R0
+#define PM_REG8 P5
+#define PM_REG9 P4
+#define PM_REG10 P3
+#define PM_REG11 P2
+#define PM_REG12 P1
+#define PM_REG13 P0
+
+#define PM_REGSET0 R7:7
+#define PM_REGSET1 R7:6
+#define PM_REGSET2 R7:5
+#define PM_REGSET3 R7:4
+#define PM_REGSET4 R7:3
+#define PM_REGSET5 R7:2
+#define PM_REGSET6 R7:1
+#define PM_REGSET7 R7:0
+#define PM_REGSET8 R7:0, P5:5
+#define PM_REGSET9 R7:0, P5:4
+#define PM_REGSET10 R7:0, P5:3
+#define PM_REGSET11 R7:0, P5:2
+#define PM_REGSET12 R7:0, P5:1
+#define PM_REGSET13 R7:0, P5:0
+
+#define _PM_PUSH(n, x, w, base) PM_REG##n = w[FP + ((x) - (base))];
+#define _PM_POP(n, x, w, base) w[FP + ((x) - (base))] = PM_REG##n;
+#define PM_PUSH_SYNC(n) [--sp] = (PM_REGSET##n);
+#define PM_POP_SYNC(n) (PM_REGSET##n) = [sp++];
+#define PM_PUSH(n, x) PM_REG##n = [FP++];
+#define PM_POP(n, x) [FP--] = PM_REG##n;
+#define PM_CORE_PUSH(n, x) _PM_PUSH(n, x, , COREMMR_BASE)
+#define PM_CORE_POP(n, x) _PM_POP(n, x, , COREMMR_BASE)
+#define PM_SYS_PUSH(n, x) _PM_PUSH(n, x, , SYSMMR_BASE)
+#define PM_SYS_POP(n, x) _PM_POP(n, x, , SYSMMR_BASE)
+#define PM_SYS_PUSH16(n, x) _PM_PUSH(n, x, w, SYSMMR_BASE)
+#define PM_SYS_POP16(n, x) _PM_POP(n, x, w, SYSMMR_BASE)
-#ifdef SIC_IWR
- PM_SYS_PUSH(SIC_IWR)
-#endif
-#ifdef SIC_IWR0
- PM_SYS_PUSH(SIC_IWR0)
-#endif
-#ifdef SIC_IWR1
- PM_SYS_PUSH(SIC_IWR1)
-#endif
-#ifdef SIC_IWR2
- PM_SYS_PUSH(SIC_IWR2)
-#endif
-
-#ifdef PINT0_ASSIGN
- PM_SYS_PUSH(PINT0_MASK_SET)
- PM_SYS_PUSH(PINT1_MASK_SET)
- PM_SYS_PUSH(PINT2_MASK_SET)
- PM_SYS_PUSH(PINT3_MASK_SET)
- PM_SYS_PUSH(PINT0_ASSIGN)
- PM_SYS_PUSH(PINT1_ASSIGN)
- PM_SYS_PUSH(PINT2_ASSIGN)
- PM_SYS_PUSH(PINT3_ASSIGN)
- PM_SYS_PUSH(PINT0_INVERT_SET)
- PM_SYS_PUSH(PINT1_INVERT_SET)
- PM_SYS_PUSH(PINT2_INVERT_SET)
- PM_SYS_PUSH(PINT3_INVERT_SET)
- PM_SYS_PUSH(PINT0_EDGE_SET)
- PM_SYS_PUSH(PINT1_EDGE_SET)
- PM_SYS_PUSH(PINT2_EDGE_SET)
- PM_SYS_PUSH(PINT3_EDGE_SET)
-#endif
-
- PM_SYS_PUSH(EBIU_AMBCTL0)
- PM_SYS_PUSH(EBIU_AMBCTL1)
- PM_SYS_PUSH16(EBIU_AMGCTL)
-
-#ifdef EBIU_FCTL
- PM_SYS_PUSH(EBIU_MBSCTL)
- PM_SYS_PUSH(EBIU_MODE)
- PM_SYS_PUSH(EBIU_FCTL)
-#endif
-
-#ifdef PORTCIO_FER
- PM_SYS_PUSH16(PORTCIO_DIR)
- PM_SYS_PUSH16(PORTCIO_INEN)
- PM_SYS_PUSH16(PORTCIO)
- PM_SYS_PUSH16(PORTCIO_FER)
- PM_SYS_PUSH16(PORTDIO_DIR)
- PM_SYS_PUSH16(PORTDIO_INEN)
- PM_SYS_PUSH16(PORTDIO)
- PM_SYS_PUSH16(PORTDIO_FER)
- PM_SYS_PUSH16(PORTEIO_DIR)
- PM_SYS_PUSH16(PORTEIO_INEN)
- PM_SYS_PUSH16(PORTEIO)
- PM_SYS_PUSH16(PORTEIO_FER)
-#endif
-
- PM_SYS_PUSH16(SYSCR)
-
- /* Save Core MMRs */
- P0.H = hi(SRAM_BASE_ADDRESS);
- P0.L = lo(SRAM_BASE_ADDRESS);
-
- PM_PUSH(DMEM_CONTROL)
- PM_PUSH(DCPLB_ADDR0)
- PM_PUSH(DCPLB_ADDR1)
- PM_PUSH(DCPLB_ADDR2)
- PM_PUSH(DCPLB_ADDR3)
- PM_PUSH(DCPLB_ADDR4)
- PM_PUSH(DCPLB_ADDR5)
- PM_PUSH(DCPLB_ADDR6)
- PM_PUSH(DCPLB_ADDR7)
- PM_PUSH(DCPLB_ADDR8)
- PM_PUSH(DCPLB_ADDR9)
- PM_PUSH(DCPLB_ADDR10)
- PM_PUSH(DCPLB_ADDR11)
- PM_PUSH(DCPLB_ADDR12)
- PM_PUSH(DCPLB_ADDR13)
- PM_PUSH(DCPLB_ADDR14)
- PM_PUSH(DCPLB_ADDR15)
- PM_PUSH(DCPLB_DATA0)
- PM_PUSH(DCPLB_DATA1)
- PM_PUSH(DCPLB_DATA2)
- PM_PUSH(DCPLB_DATA3)
- PM_PUSH(DCPLB_DATA4)
- PM_PUSH(DCPLB_DATA5)
- PM_PUSH(DCPLB_DATA6)
- PM_PUSH(DCPLB_DATA7)
- PM_PUSH(DCPLB_DATA8)
- PM_PUSH(DCPLB_DATA9)
- PM_PUSH(DCPLB_DATA10)
- PM_PUSH(DCPLB_DATA11)
- PM_PUSH(DCPLB_DATA12)
- PM_PUSH(DCPLB_DATA13)
- PM_PUSH(DCPLB_DATA14)
- PM_PUSH(DCPLB_DATA15)
- PM_PUSH(IMEM_CONTROL)
- PM_PUSH(ICPLB_ADDR0)
- PM_PUSH(ICPLB_ADDR1)
- PM_PUSH(ICPLB_ADDR2)
- PM_PUSH(ICPLB_ADDR3)
- PM_PUSH(ICPLB_ADDR4)
- PM_PUSH(ICPLB_ADDR5)
- PM_PUSH(ICPLB_ADDR6)
- PM_PUSH(ICPLB_ADDR7)
- PM_PUSH(ICPLB_ADDR8)
- PM_PUSH(ICPLB_ADDR9)
- PM_PUSH(ICPLB_ADDR10)
- PM_PUSH(ICPLB_ADDR11)
- PM_PUSH(ICPLB_ADDR12)
- PM_PUSH(ICPLB_ADDR13)
- PM_PUSH(ICPLB_ADDR14)
- PM_PUSH(ICPLB_ADDR15)
- PM_PUSH(ICPLB_DATA0)
- PM_PUSH(ICPLB_DATA1)
- PM_PUSH(ICPLB_DATA2)
- PM_PUSH(ICPLB_DATA3)
- PM_PUSH(ICPLB_DATA4)
- PM_PUSH(ICPLB_DATA5)
- PM_PUSH(ICPLB_DATA6)
- PM_PUSH(ICPLB_DATA7)
- PM_PUSH(ICPLB_DATA8)
- PM_PUSH(ICPLB_DATA9)
- PM_PUSH(ICPLB_DATA10)
- PM_PUSH(ICPLB_DATA11)
- PM_PUSH(ICPLB_DATA12)
- PM_PUSH(ICPLB_DATA13)
- PM_PUSH(ICPLB_DATA14)
- PM_PUSH(ICPLB_DATA15)
- PM_PUSH(EVT0)
- PM_PUSH(EVT1)
- PM_PUSH(EVT2)
- PM_PUSH(EVT3)
- PM_PUSH(EVT4)
- PM_PUSH(EVT5)
- PM_PUSH(EVT6)
- PM_PUSH(EVT7)
- PM_PUSH(EVT8)
- PM_PUSH(EVT9)
- PM_PUSH(EVT10)
- PM_PUSH(EVT11)
- PM_PUSH(EVT12)
- PM_PUSH(EVT13)
- PM_PUSH(EVT14)
- PM_PUSH(EVT15)
- PM_PUSH(IMASK)
- PM_PUSH(ILAT)
- PM_PUSH(IPRIO)
- PM_PUSH(TCNTL)
- PM_PUSH(TPERIOD)
- PM_PUSH(TSCALE)
- PM_PUSH(TCOUNT)
- PM_PUSH(TBUFCTL)
-
- /* Save Core Registers */
- [--sp] = SYSCFG;
- [--sp] = ( R7:0, P5:0 );
+ENTRY(_do_hibernate)
+ /*
+ * Save the core regs early so we can blow them away when
+ * saving/restoring MMR states
+ */
+ [--sp] = (R7:0, P5:0);
[--sp] = fp;
[--sp] = usp;
@@ -506,47 +360,497 @@ ENTRY(_do_hibernate)
[--sp] = LB0;
[--sp] = LB1;
+ /* We can't push RETI directly as that'll change IPEND[4] */
+ r7 = RETI;
+ [--sp] = RETS;
[--sp] = ASTAT;
[--sp] = CYCLES;
[--sp] = CYCLES2;
-
- [--sp] = RETS;
- r0 = RETI;
- [--sp] = r0;
+ [--sp] = SYSCFG;
[--sp] = RETX;
- [--sp] = RETN;
- [--sp] = RETE;
[--sp] = SEQSTAT;
+ [--sp] = r7;
+
+ /* Save first func arg in M3 */
+ M3 = R0;
+
+ /* Save system MMRs */
+ FP.H = hi(SYSMMR_BASE);
+ FP.L = lo(SYSMMR_BASE);
+
+#ifdef SIC_IMASK0
+ PM_SYS_PUSH(0, SIC_IMASK0)
+ PM_SYS_PUSH(1, SIC_IMASK1)
+# ifdef SIC_IMASK2
+ PM_SYS_PUSH(2, SIC_IMASK2)
+# endif
+#else
+ PM_SYS_PUSH(0, SIC_IMASK)
+#endif
+#ifdef SIC_IAR0
+ PM_SYS_PUSH(3, SIC_IAR0)
+ PM_SYS_PUSH(4, SIC_IAR1)
+ PM_SYS_PUSH(5, SIC_IAR2)
+#endif
+#ifdef SIC_IAR3
+ PM_SYS_PUSH(6, SIC_IAR3)
+#endif
+#ifdef SIC_IAR4
+ PM_SYS_PUSH(7, SIC_IAR4)
+ PM_SYS_PUSH(8, SIC_IAR5)
+ PM_SYS_PUSH(9, SIC_IAR6)
+#endif
+#ifdef SIC_IAR7
+ PM_SYS_PUSH(10, SIC_IAR7)
+#endif
+#ifdef SIC_IAR8
+ PM_SYS_PUSH(11, SIC_IAR8)
+ PM_SYS_PUSH(12, SIC_IAR9)
+ PM_SYS_PUSH(13, SIC_IAR10)
+#endif
+ PM_PUSH_SYNC(13)
+#ifdef SIC_IAR11
+ PM_SYS_PUSH(0, SIC_IAR11)
+#endif
+
+#ifdef SIC_IWR
+ PM_SYS_PUSH(1, SIC_IWR)
+#endif
+#ifdef SIC_IWR0
+ PM_SYS_PUSH(1, SIC_IWR0)
+#endif
+#ifdef SIC_IWR1
+ PM_SYS_PUSH(2, SIC_IWR1)
+#endif
+#ifdef SIC_IWR2
+ PM_SYS_PUSH(3, SIC_IWR2)
+#endif
+
+#ifdef PINT0_ASSIGN
+ PM_SYS_PUSH(4, PINT0_MASK_SET)
+ PM_SYS_PUSH(5, PINT1_MASK_SET)
+ PM_SYS_PUSH(6, PINT2_MASK_SET)
+ PM_SYS_PUSH(7, PINT3_MASK_SET)
+ PM_SYS_PUSH(8, PINT0_ASSIGN)
+ PM_SYS_PUSH(9, PINT1_ASSIGN)
+ PM_SYS_PUSH(10, PINT2_ASSIGN)
+ PM_SYS_PUSH(11, PINT3_ASSIGN)
+ PM_SYS_PUSH(12, PINT0_INVERT_SET)
+ PM_SYS_PUSH(13, PINT1_INVERT_SET)
+ PM_PUSH_SYNC(13)
+ PM_SYS_PUSH(0, PINT2_INVERT_SET)
+ PM_SYS_PUSH(1, PINT3_INVERT_SET)
+ PM_SYS_PUSH(2, PINT0_EDGE_SET)
+ PM_SYS_PUSH(3, PINT1_EDGE_SET)
+ PM_SYS_PUSH(4, PINT2_EDGE_SET)
+ PM_SYS_PUSH(5, PINT3_EDGE_SET)
+#endif
+
+ PM_SYS_PUSH16(6, SYSCR)
+
+ PM_SYS_PUSH16(7, EBIU_AMGCTL)
+ PM_SYS_PUSH(8, EBIU_AMBCTL0)
+ PM_SYS_PUSH(9, EBIU_AMBCTL1)
+#ifdef EBIU_FCTL
+ PM_SYS_PUSH(10, EBIU_MBSCTL)
+ PM_SYS_PUSH(11, EBIU_MODE)
+ PM_SYS_PUSH(12, EBIU_FCTL)
+ PM_PUSH_SYNC(12)
+#else
+ PM_PUSH_SYNC(9)
+#endif
+
+ /* Save Core MMRs */
+ I0.H = hi(COREMMR_BASE);
+ I0.L = lo(COREMMR_BASE);
+ I1 = I0;
+ I2 = I0;
+ I3 = I0;
+ B0 = I0;
+ B1 = I0;
+ B2 = I0;
+ B3 = I0;
+ I1.L = lo(DCPLB_ADDR0);
+ I2.L = lo(DCPLB_DATA0);
+ I3.L = lo(ICPLB_ADDR0);
+ B0.L = lo(ICPLB_DATA0);
+ B1.L = lo(EVT2);
+ B2.L = lo(IMASK);
+ B3.L = lo(TCNTL);
+
+ /* DCPLB Addr */
+ FP = I1;
+ PM_PUSH(0, DCPLB_ADDR0)
+ PM_PUSH(1, DCPLB_ADDR1)
+ PM_PUSH(2, DCPLB_ADDR2)
+ PM_PUSH(3, DCPLB_ADDR3)
+ PM_PUSH(4, DCPLB_ADDR4)
+ PM_PUSH(5, DCPLB_ADDR5)
+ PM_PUSH(6, DCPLB_ADDR6)
+ PM_PUSH(7, DCPLB_ADDR7)
+ PM_PUSH(8, DCPLB_ADDR8)
+ PM_PUSH(9, DCPLB_ADDR9)
+ PM_PUSH(10, DCPLB_ADDR10)
+ PM_PUSH(11, DCPLB_ADDR11)
+ PM_PUSH(12, DCPLB_ADDR12)
+ PM_PUSH(13, DCPLB_ADDR13)
+ PM_PUSH_SYNC(13)
+ PM_PUSH(0, DCPLB_ADDR14)
+ PM_PUSH(1, DCPLB_ADDR15)
+
+ /* DCPLB Data */
+ FP = I2;
+ PM_PUSH(2, DCPLB_DATA0)
+ PM_PUSH(3, DCPLB_DATA1)
+ PM_PUSH(4, DCPLB_DATA2)
+ PM_PUSH(5, DCPLB_DATA3)
+ PM_PUSH(6, DCPLB_DATA4)
+ PM_PUSH(7, DCPLB_DATA5)
+ PM_PUSH(8, DCPLB_DATA6)
+ PM_PUSH(9, DCPLB_DATA7)
+ PM_PUSH(10, DCPLB_DATA8)
+ PM_PUSH(11, DCPLB_DATA9)
+ PM_PUSH(12, DCPLB_DATA10)
+ PM_PUSH(13, DCPLB_DATA11)
+ PM_PUSH_SYNC(13)
+ PM_PUSH(0, DCPLB_DATA12)
+ PM_PUSH(1, DCPLB_DATA13)
+ PM_PUSH(2, DCPLB_DATA14)
+ PM_PUSH(3, DCPLB_DATA15)
+
+ /* ICPLB Addr */
+ FP = I3;
+ PM_PUSH(4, ICPLB_ADDR0)
+ PM_PUSH(5, ICPLB_ADDR1)
+ PM_PUSH(6, ICPLB_ADDR2)
+ PM_PUSH(7, ICPLB_ADDR3)
+ PM_PUSH(8, ICPLB_ADDR4)
+ PM_PUSH(9, ICPLB_ADDR5)
+ PM_PUSH(10, ICPLB_ADDR6)
+ PM_PUSH(11, ICPLB_ADDR7)
+ PM_PUSH(12, ICPLB_ADDR8)
+ PM_PUSH(13, ICPLB_ADDR9)
+ PM_PUSH_SYNC(13)
+ PM_PUSH(0, ICPLB_ADDR10)
+ PM_PUSH(1, ICPLB_ADDR11)
+ PM_PUSH(2, ICPLB_ADDR12)
+ PM_PUSH(3, ICPLB_ADDR13)
+ PM_PUSH(4, ICPLB_ADDR14)
+ PM_PUSH(5, ICPLB_ADDR15)
+
+ /* ICPLB Data */
+ FP = B0;
+ PM_PUSH(6, ICPLB_DATA0)
+ PM_PUSH(7, ICPLB_DATA1)
+ PM_PUSH(8, ICPLB_DATA2)
+ PM_PUSH(9, ICPLB_DATA3)
+ PM_PUSH(10, ICPLB_DATA4)
+ PM_PUSH(11, ICPLB_DATA5)
+ PM_PUSH(12, ICPLB_DATA6)
+ PM_PUSH(13, ICPLB_DATA7)
+ PM_PUSH_SYNC(13)
+ PM_PUSH(0, ICPLB_DATA8)
+ PM_PUSH(1, ICPLB_DATA9)
+ PM_PUSH(2, ICPLB_DATA10)
+ PM_PUSH(3, ICPLB_DATA11)
+ PM_PUSH(4, ICPLB_DATA12)
+ PM_PUSH(5, ICPLB_DATA13)
+ PM_PUSH(6, ICPLB_DATA14)
+ PM_PUSH(7, ICPLB_DATA15)
+
+ /* Event Vectors */
+ FP = B1;
+ PM_PUSH(8, EVT2)
+ PM_PUSH(9, EVT3)
+ FP += 4; /* EVT4 */
+ PM_PUSH(10, EVT5)
+ PM_PUSH(11, EVT6)
+ PM_PUSH(12, EVT7)
+ PM_PUSH(13, EVT8)
+ PM_PUSH_SYNC(13)
+ PM_PUSH(0, EVT9)
+ PM_PUSH(1, EVT10)
+ PM_PUSH(2, EVT11)
+ PM_PUSH(3, EVT12)
+ PM_PUSH(4, EVT13)
+ PM_PUSH(5, EVT14)
+ PM_PUSH(6, EVT15)
+
+ /* CEC */
+ FP = B2;
+ PM_PUSH(7, IMASK)
+ FP += 4; /* IPEND */
+ PM_PUSH(8, ILAT)
+ PM_PUSH(9, IPRIO)
+
+ /* Core Timer */
+ FP = B3;
+ PM_PUSH(10, TCNTL)
+ PM_PUSH(11, TPERIOD)
+ PM_PUSH(12, TSCALE)
+ PM_PUSH(13, TCOUNT)
+ PM_PUSH_SYNC(13)
+
+ /* Misc non-contiguous registers */
+ FP = I0;
+ PM_CORE_PUSH(0, DMEM_CONTROL);
+ PM_CORE_PUSH(1, IMEM_CONTROL);
+ PM_CORE_PUSH(2, TBUFCTL);
+ PM_PUSH_SYNC(2)
+
+ /* Setup args to hibernate mode early for pipeline optimization */
+ R0 = M3;
+ P1.H = _hibernate_mode;
+ P1.L = _hibernate_mode;
/* Save Magic, return address and Stack Pointer */
- P0.H = 0;
- P0.L = 0;
- R0.H = 0xDEAD; /* Hibernate Magic */
- R0.L = 0xBEEF;
- [P0++] = R0; /* Store Hibernate Magic */
- R0.H = .Lpm_resume_here;
- R0.L = .Lpm_resume_here;
- [P0++] = R0; /* Save Return Address */
+ P0 = 0;
+ R1.H = 0xDEAD; /* Hibernate Magic */
+ R1.L = 0xBEEF;
+ R2.H = .Lpm_resume_here;
+ R2.L = .Lpm_resume_here;
+ [P0++] = R1; /* Store Hibernate Magic */
+ [P0++] = R2; /* Save Return Address */
[P0++] = SP; /* Save Stack Pointer */
- P0.H = _hibernate_mode;
- P0.L = _hibernate_mode;
- R0 = R2;
- call (P0); /* Goodbye */
+
+ /* Must use an indirect call as we need to jump to L1 */
+ call (P1); /* Goodbye */
.Lpm_resume_here:
+ /* Restore Core MMRs */
+ I0.H = hi(COREMMR_BASE);
+ I0.L = lo(COREMMR_BASE);
+ I1 = I0;
+ I2 = I0;
+ I3 = I0;
+ B0 = I0;
+ B1 = I0;
+ B2 = I0;
+ B3 = I0;
+ I1.L = lo(DCPLB_ADDR15);
+ I2.L = lo(DCPLB_DATA15);
+ I3.L = lo(ICPLB_ADDR15);
+ B0.L = lo(ICPLB_DATA15);
+ B1.L = lo(EVT15);
+ B2.L = lo(IPRIO);
+ B3.L = lo(TCOUNT);
+
+ /* Misc non-contiguous registers */
+ FP = I0;
+ PM_POP_SYNC(2)
+ PM_CORE_POP(2, TBUFCTL)
+ PM_CORE_POP(1, IMEM_CONTROL)
+ PM_CORE_POP(0, DMEM_CONTROL)
+
+ /* Core Timer */
+ PM_POP_SYNC(13)
+ FP = B3;
+ PM_POP(13, TCOUNT)
+ PM_POP(12, TSCALE)
+ PM_POP(11, TPERIOD)
+ PM_POP(10, TCNTL)
+
+ /* CEC */
+ FP = B2;
+ PM_POP(9, IPRIO)
+ PM_POP(8, ILAT)
+ FP += -4; /* IPEND */
+ PM_POP(7, IMASK)
+
+ /* Event Vectors */
+ FP = B1;
+ PM_POP(6, EVT15)
+ PM_POP(5, EVT14)
+ PM_POP(4, EVT13)
+ PM_POP(3, EVT12)
+ PM_POP(2, EVT11)
+ PM_POP(1, EVT10)
+ PM_POP(0, EVT9)
+ PM_POP_SYNC(13)
+ PM_POP(13, EVT8)
+ PM_POP(12, EVT7)
+ PM_POP(11, EVT6)
+ PM_POP(10, EVT5)
+ FP += -4; /* EVT4 */
+ PM_POP(9, EVT3)
+ PM_POP(8, EVT2)
+
+ /* ICPLB Data */
+ FP = B0;
+ PM_POP(7, ICPLB_DATA15)
+ PM_POP(6, ICPLB_DATA14)
+ PM_POP(5, ICPLB_DATA13)
+ PM_POP(4, ICPLB_DATA12)
+ PM_POP(3, ICPLB_DATA11)
+ PM_POP(2, ICPLB_DATA10)
+ PM_POP(1, ICPLB_DATA9)
+ PM_POP(0, ICPLB_DATA8)
+ PM_POP_SYNC(13)
+ PM_POP(13, ICPLB_DATA7)
+ PM_POP(12, ICPLB_DATA6)
+ PM_POP(11, ICPLB_DATA5)
+ PM_POP(10, ICPLB_DATA4)
+ PM_POP(9, ICPLB_DATA3)
+ PM_POP(8, ICPLB_DATA2)
+ PM_POP(7, ICPLB_DATA1)
+ PM_POP(6, ICPLB_DATA0)
+
+ /* ICPLB Addr */
+ FP = I3;
+ PM_POP(5, ICPLB_ADDR15)
+ PM_POP(4, ICPLB_ADDR14)
+ PM_POP(3, ICPLB_ADDR13)
+ PM_POP(2, ICPLB_ADDR12)
+ PM_POP(1, ICPLB_ADDR11)
+ PM_POP(0, ICPLB_ADDR10)
+ PM_POP_SYNC(13)
+ PM_POP(13, ICPLB_ADDR9)
+ PM_POP(12, ICPLB_ADDR8)
+ PM_POP(11, ICPLB_ADDR7)
+ PM_POP(10, ICPLB_ADDR6)
+ PM_POP(9, ICPLB_ADDR5)
+ PM_POP(8, ICPLB_ADDR4)
+ PM_POP(7, ICPLB_ADDR3)
+ PM_POP(6, ICPLB_ADDR2)
+ PM_POP(5, ICPLB_ADDR1)
+ PM_POP(4, ICPLB_ADDR0)
+
+ /* DCPLB Data */
+ FP = I2;
+ PM_POP(3, DCPLB_DATA15)
+ PM_POP(2, DCPLB_DATA14)
+ PM_POP(1, DCPLB_DATA13)
+ PM_POP(0, DCPLB_DATA12)
+ PM_POP_SYNC(13)
+ PM_POP(13, DCPLB_DATA11)
+ PM_POP(12, DCPLB_DATA10)
+ PM_POP(11, DCPLB_DATA9)
+ PM_POP(10, DCPLB_DATA8)
+ PM_POP(9, DCPLB_DATA7)
+ PM_POP(8, DCPLB_DATA6)
+ PM_POP(7, DCPLB_DATA5)
+ PM_POP(6, DCPLB_DATA4)
+ PM_POP(5, DCPLB_DATA3)
+ PM_POP(4, DCPLB_DATA2)
+ PM_POP(3, DCPLB_DATA1)
+ PM_POP(2, DCPLB_DATA0)
+
+ /* DCPLB Addr */
+ FP = I1;
+ PM_POP(1, DCPLB_ADDR15)
+ PM_POP(0, DCPLB_ADDR14)
+ PM_POP_SYNC(13)
+ PM_POP(13, DCPLB_ADDR13)
+ PM_POP(12, DCPLB_ADDR12)
+ PM_POP(11, DCPLB_ADDR11)
+ PM_POP(10, DCPLB_ADDR10)
+ PM_POP(9, DCPLB_ADDR9)
+ PM_POP(8, DCPLB_ADDR8)
+ PM_POP(7, DCPLB_ADDR7)
+ PM_POP(6, DCPLB_ADDR6)
+ PM_POP(5, DCPLB_ADDR5)
+ PM_POP(4, DCPLB_ADDR4)
+ PM_POP(3, DCPLB_ADDR3)
+ PM_POP(2, DCPLB_ADDR2)
+ PM_POP(1, DCPLB_ADDR1)
+ PM_POP(0, DCPLB_ADDR0)
+
+ /* Restore System MMRs */
+ FP.H = hi(SYSMMR_BASE);
+ FP.L = lo(SYSMMR_BASE);
+
+#ifdef EBIU_FCTL
+ PM_POP_SYNC(12)
+ PM_SYS_POP(12, EBIU_FCTL)
+ PM_SYS_POP(11, EBIU_MODE)
+ PM_SYS_POP(10, EBIU_MBSCTL)
+#else
+ PM_POP_SYNC(9)
+#endif
+ PM_SYS_POP(9, EBIU_AMBCTL1)
+ PM_SYS_POP(8, EBIU_AMBCTL0)
+ PM_SYS_POP16(7, EBIU_AMGCTL)
+
+ PM_SYS_POP16(6, SYSCR)
+
+#ifdef PINT0_ASSIGN
+ PM_SYS_POP(5, PINT3_EDGE_SET)
+ PM_SYS_POP(4, PINT2_EDGE_SET)
+ PM_SYS_POP(3, PINT1_EDGE_SET)
+ PM_SYS_POP(2, PINT0_EDGE_SET)
+ PM_SYS_POP(1, PINT3_INVERT_SET)
+ PM_SYS_POP(0, PINT2_INVERT_SET)
+ PM_POP_SYNC(13)
+ PM_SYS_POP(13, PINT1_INVERT_SET)
+ PM_SYS_POP(12, PINT0_INVERT_SET)
+ PM_SYS_POP(11, PINT3_ASSIGN)
+ PM_SYS_POP(10, PINT2_ASSIGN)
+ PM_SYS_POP(9, PINT1_ASSIGN)
+ PM_SYS_POP(8, PINT0_ASSIGN)
+ PM_SYS_POP(7, PINT3_MASK_SET)
+ PM_SYS_POP(6, PINT2_MASK_SET)
+ PM_SYS_POP(5, PINT1_MASK_SET)
+ PM_SYS_POP(4, PINT0_MASK_SET)
+#endif
+
+#ifdef SIC_IWR2
+ PM_SYS_POP(3, SIC_IWR2)
+#endif
+#ifdef SIC_IWR1
+ PM_SYS_POP(2, SIC_IWR1)
+#endif
+#ifdef SIC_IWR0
+ PM_SYS_POP(1, SIC_IWR0)
+#endif
+#ifdef SIC_IWR
+ PM_SYS_POP(1, SIC_IWR)
+#endif
+
+#ifdef SIC_IAR11
+ PM_SYS_POP(0, SIC_IAR11)
+#endif
+ PM_POP_SYNC(13)
+#ifdef SIC_IAR8
+ PM_SYS_POP(13, SIC_IAR10)
+ PM_SYS_POP(12, SIC_IAR9)
+ PM_SYS_POP(11, SIC_IAR8)
+#endif
+#ifdef SIC_IAR7
+ PM_SYS_POP(10, SIC_IAR7)
+#endif
+#ifdef SIC_IAR6
+ PM_SYS_POP(9, SIC_IAR6)
+ PM_SYS_POP(8, SIC_IAR5)
+ PM_SYS_POP(7, SIC_IAR4)
+#endif
+#ifdef SIC_IAR3
+ PM_SYS_POP(6, SIC_IAR3)
+#endif
+#ifdef SIC_IAR0
+ PM_SYS_POP(5, SIC_IAR2)
+ PM_SYS_POP(4, SIC_IAR1)
+ PM_SYS_POP(3, SIC_IAR0)
+#endif
+#ifdef SIC_IMASK0
+# ifdef SIC_IMASK2
+ PM_SYS_POP(2, SIC_IMASK2)
+# endif
+ PM_SYS_POP(1, SIC_IMASK1)
+ PM_SYS_POP(0, SIC_IMASK0)
+#else
+ PM_SYS_POP(0, SIC_IMASK)
+#endif
+
/* Restore Core Registers */
+ RETI = [sp++];
SEQSTAT = [sp++];
- RETE = [sp++];
- RETN = [sp++];
RETX = [sp++];
- r0 = [sp++];
- RETI = r0;
- RETS = [sp++];
-
+ SYSCFG = [sp++];
CYCLES2 = [sp++];
CYCLES = [sp++];
ASTAT = [sp++];
+ RETS = [sp++];
LB1 = [sp++];
LB0 = [sp++];
@@ -581,204 +885,10 @@ ENTRY(_do_hibernate)
usp = [sp++];
fp = [sp++];
-
- ( R7 : 0, P5 : 0) = [ SP ++ ];
- SYSCFG = [sp++];
-
- /* Restore Core MMRs */
-
- PM_POP(TBUFCTL)
- PM_POP(TCOUNT)
- PM_POP(TSCALE)
- PM_POP(TPERIOD)
- PM_POP(TCNTL)
- PM_POP(IPRIO)
- PM_POP(ILAT)
- PM_POP(IMASK)
- PM_POP(EVT15)
- PM_POP(EVT14)
- PM_POP(EVT13)
- PM_POP(EVT12)
- PM_POP(EVT11)
- PM_POP(EVT10)
- PM_POP(EVT9)
- PM_POP(EVT8)
- PM_POP(EVT7)
- PM_POP(EVT6)
- PM_POP(EVT5)
- PM_POP(EVT4)
- PM_POP(EVT3)
- PM_POP(EVT2)
- PM_POP(EVT1)
- PM_POP(EVT0)
- PM_POP(ICPLB_DATA15)
- PM_POP(ICPLB_DATA14)
- PM_POP(ICPLB_DATA13)
- PM_POP(ICPLB_DATA12)
- PM_POP(ICPLB_DATA11)
- PM_POP(ICPLB_DATA10)
- PM_POP(ICPLB_DATA9)
- PM_POP(ICPLB_DATA8)
- PM_POP(ICPLB_DATA7)
- PM_POP(ICPLB_DATA6)
- PM_POP(ICPLB_DATA5)
- PM_POP(ICPLB_DATA4)
- PM_POP(ICPLB_DATA3)
- PM_POP(ICPLB_DATA2)
- PM_POP(ICPLB_DATA1)
- PM_POP(ICPLB_DATA0)
- PM_POP(ICPLB_ADDR15)
- PM_POP(ICPLB_ADDR14)
- PM_POP(ICPLB_ADDR13)
- PM_POP(ICPLB_ADDR12)
- PM_POP(ICPLB_ADDR11)
- PM_POP(ICPLB_ADDR10)
- PM_POP(ICPLB_ADDR9)
- PM_POP(ICPLB_ADDR8)
- PM_POP(ICPLB_ADDR7)
- PM_POP(ICPLB_ADDR6)
- PM_POP(ICPLB_ADDR5)
- PM_POP(ICPLB_ADDR4)
- PM_POP(ICPLB_ADDR3)
- PM_POP(ICPLB_ADDR2)
- PM_POP(ICPLB_ADDR1)
- PM_POP(ICPLB_ADDR0)
- PM_POP(IMEM_CONTROL)
- PM_POP(DCPLB_DATA15)
- PM_POP(DCPLB_DATA14)
- PM_POP(DCPLB_DATA13)
- PM_POP(DCPLB_DATA12)
- PM_POP(DCPLB_DATA11)
- PM_POP(DCPLB_DATA10)
- PM_POP(DCPLB_DATA9)
- PM_POP(DCPLB_DATA8)
- PM_POP(DCPLB_DATA7)
- PM_POP(DCPLB_DATA6)
- PM_POP(DCPLB_DATA5)
- PM_POP(DCPLB_DATA4)
- PM_POP(DCPLB_DATA3)
- PM_POP(DCPLB_DATA2)
- PM_POP(DCPLB_DATA1)
- PM_POP(DCPLB_DATA0)
- PM_POP(DCPLB_ADDR15)
- PM_POP(DCPLB_ADDR14)
- PM_POP(DCPLB_ADDR13)
- PM_POP(DCPLB_ADDR12)
- PM_POP(DCPLB_ADDR11)
- PM_POP(DCPLB_ADDR10)
- PM_POP(DCPLB_ADDR9)
- PM_POP(DCPLB_ADDR8)
- PM_POP(DCPLB_ADDR7)
- PM_POP(DCPLB_ADDR6)
- PM_POP(DCPLB_ADDR5)
- PM_POP(DCPLB_ADDR4)
- PM_POP(DCPLB_ADDR3)
- PM_POP(DCPLB_ADDR2)
- PM_POP(DCPLB_ADDR1)
- PM_POP(DCPLB_ADDR0)
- PM_POP(DMEM_CONTROL)
-
- /* Restore System MMRs */
-
- P0.H = hi(PLL_CTL);
- P0.L = lo(PLL_CTL);
- PM_SYS_POP16(SYSCR)
-
-#ifdef PORTCIO_FER
- PM_SYS_POP16(PORTEIO_FER)
- PM_SYS_POP16(PORTEIO)
- PM_SYS_POP16(PORTEIO_INEN)
- PM_SYS_POP16(PORTEIO_DIR)
- PM_SYS_POP16(PORTDIO_FER)
- PM_SYS_POP16(PORTDIO)
- PM_SYS_POP16(PORTDIO_INEN)
- PM_SYS_POP16(PORTDIO_DIR)
- PM_SYS_POP16(PORTCIO_FER)
- PM_SYS_POP16(PORTCIO)
- PM_SYS_POP16(PORTCIO_INEN)
- PM_SYS_POP16(PORTCIO_DIR)
-#endif
-
-#ifdef EBIU_FCTL
- PM_SYS_POP(EBIU_FCTL)
- PM_SYS_POP(EBIU_MODE)
- PM_SYS_POP(EBIU_MBSCTL)
-#endif
- PM_SYS_POP16(EBIU_AMGCTL)
- PM_SYS_POP(EBIU_AMBCTL1)
- PM_SYS_POP(EBIU_AMBCTL0)
-
-#ifdef PINT0_ASSIGN
- PM_SYS_POP(PINT3_EDGE_SET)
- PM_SYS_POP(PINT2_EDGE_SET)
- PM_SYS_POP(PINT1_EDGE_SET)
- PM_SYS_POP(PINT0_EDGE_SET)
- PM_SYS_POP(PINT3_INVERT_SET)
- PM_SYS_POP(PINT2_INVERT_SET)
- PM_SYS_POP(PINT1_INVERT_SET)
- PM_SYS_POP(PINT0_INVERT_SET)
- PM_SYS_POP(PINT3_ASSIGN)
- PM_SYS_POP(PINT2_ASSIGN)
- PM_SYS_POP(PINT1_ASSIGN)
- PM_SYS_POP(PINT0_ASSIGN)
- PM_SYS_POP(PINT3_MASK_SET)
- PM_SYS_POP(PINT2_MASK_SET)
- PM_SYS_POP(PINT1_MASK_SET)
- PM_SYS_POP(PINT0_MASK_SET)
-#endif
-
-#ifdef SIC_IWR2
- PM_SYS_POP(SIC_IWR2)
-#endif
-#ifdef SIC_IWR1
- PM_SYS_POP(SIC_IWR1)
-#endif
-#ifdef SIC_IWR0
- PM_SYS_POP(SIC_IWR0)
-#endif
-#ifdef SIC_IWR
- PM_SYS_POP(SIC_IWR)
-#endif
-
-#ifdef SIC_IAR8
- PM_SYS_POP(SIC_IAR11)
- PM_SYS_POP(SIC_IAR10)
- PM_SYS_POP(SIC_IAR9)
- PM_SYS_POP(SIC_IAR8)
-#endif
-#ifdef SIC_IAR7
- PM_SYS_POP(SIC_IAR7)
-#endif
-#ifdef SIC_IAR6
- PM_SYS_POP(SIC_IAR6)
- PM_SYS_POP(SIC_IAR5)
- PM_SYS_POP(SIC_IAR4)
-#endif
-#ifdef SIC_IAR3
- PM_SYS_POP(SIC_IAR3)
-#endif
-#ifdef SIC_IAR0
- PM_SYS_POP(SIC_IAR2)
- PM_SYS_POP(SIC_IAR1)
- PM_SYS_POP(SIC_IAR0)
-#endif
-#ifdef SIC_IMASK
- PM_SYS_POP(SIC_IMASK)
-#endif
-#ifdef SIC_IMASK2
- PM_SYS_POP(SIC_IMASK2)
-#endif
-#ifdef SIC_IMASK1
- PM_SYS_POP(SIC_IMASK1)
-#endif
-#ifdef SIC_IMASK0
- PM_SYS_POP(SIC_IMASK0)
-#endif
+ (R7:0, P5:0) = [sp++];
[--sp] = RETI; /* Clear Global Interrupt Disable */
SP += 4;
- RETS = [SP++];
- ( R7:0, P5:0 ) = [SP++];
RTS;
ENDPROC(_do_hibernate)
diff --git a/arch/blackfin/mach-common/head.S b/arch/blackfin/mach-common/head.S
index 76de5724c1e3..8b4d98854403 100644
--- a/arch/blackfin/mach-common/head.S
+++ b/arch/blackfin/mach-common/head.S
@@ -85,37 +85,25 @@ ENTRY(__start)
SSYNC;
/* in case of double faults, save a few things */
- p0.l = _init_retx;
- p0.h = _init_retx;
- R0 = RETX;
- [P0] = R0;
-
+ p1.l = _initial_pda;
+ p1.h = _initial_pda;
+ r4 = RETX;
#ifdef CONFIG_DEBUG_DOUBLEFAULT
/* Only save these if we are storing them,
* This happens here, since L1 gets clobbered
* below
*/
GET_PDA(p0, r0);
- r5 = [p0 + PDA_DF_RETX];
- p1.l = _init_saved_retx;
- p1.h = _init_saved_retx;
- [p1] = r5;
-
- r5 = [p0 + PDA_DF_DCPLB];
- p1.l = _init_saved_dcplb_fault_addr;
- p1.h = _init_saved_dcplb_fault_addr;
- [p1] = r5;
-
- r5 = [p0 + PDA_DF_ICPLB];
- p1.l = _init_saved_icplb_fault_addr;
- p1.h = _init_saved_icplb_fault_addr;
- [p1] = r5;
-
- r5 = [p0 + PDA_DF_SEQSTAT];
- p1.l = _init_saved_seqstat;
- p1.h = _init_saved_seqstat;
- [p1] = r5;
+ r0 = [p0 + PDA_DF_RETX];
+ r1 = [p0 + PDA_DF_DCPLB];
+ r2 = [p0 + PDA_DF_ICPLB];
+ r3 = [p0 + PDA_DF_SEQSTAT];
+ [p1 + PDA_INIT_DF_RETX] = r0;
+ [p1 + PDA_INIT_DF_DCPLB] = r1;
+ [p1 + PDA_INIT_DF_ICPLB] = r2;
+ [p1 + PDA_INIT_DF_SEQSTAT] = r3;
#endif
+ [p1 + PDA_INIT_RETX] = r4;
/* Initialize stack pointer */
sp.l = _init_thread_union + THREAD_SIZE;
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index 1177369f9922..332dace6af34 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -444,7 +444,7 @@ static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle)
static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS);
extern void bfin_gpio_irq_prepare(unsigned gpio);
-#if !defined(CONFIG_BF54x)
+#if !BFIN_GPIO_PINT
static void bfin_gpio_ack_irq(struct irq_data *d)
{
@@ -633,7 +633,7 @@ void bfin_demux_gpio_irq(unsigned int inta_irq,
bfin_demux_gpio_block(irq);
}
-#else /* CONFIG_BF54x */
+#else
#define NR_PINT_SYS_IRQS 4
#define NR_PINT_BITS 32
@@ -647,24 +647,11 @@ void bfin_demux_gpio_irq(unsigned int inta_irq,
static unsigned char irq2pint_lut[NR_PINTS];
static unsigned char pint2irq_lut[NR_PINT_SYS_IRQS * NR_PINT_BITS];
-struct pin_int_t {
- unsigned int mask_set;
- unsigned int mask_clear;
- unsigned int request;
- unsigned int assign;
- unsigned int edge_set;
- unsigned int edge_clear;
- unsigned int invert_set;
- unsigned int invert_clear;
- unsigned int pinstate;
- unsigned int latch;
-};
-
-static struct pin_int_t *pint[NR_PINT_SYS_IRQS] = {
- (struct pin_int_t *)PINT0_MASK_SET,
- (struct pin_int_t *)PINT1_MASK_SET,
- (struct pin_int_t *)PINT2_MASK_SET,
- (struct pin_int_t *)PINT3_MASK_SET,
+static struct bfin_pint_regs * const pint[NR_PINT_SYS_IRQS] = {
+ (struct bfin_pint_regs *)PINT0_MASK_SET,
+ (struct bfin_pint_regs *)PINT1_MASK_SET,
+ (struct bfin_pint_regs *)PINT2_MASK_SET,
+ (struct bfin_pint_regs *)PINT3_MASK_SET,
};
inline unsigned int get_irq_base(u32 bank, u8 bmap)
@@ -981,7 +968,7 @@ int __init init_arch_irq(void)
local_irq_disable();
-#ifdef CONFIG_BF54x
+#if BFIN_GPIO_PINT
# ifdef CONFIG_PINTx_REASSIGN
pint[0]->assign = CONFIG_PINT0_ASSIGN;
pint[1]->assign = CONFIG_PINT1_ASSIGN;
@@ -999,16 +986,16 @@ int __init init_arch_irq(void)
irq_set_chip(irq, &bfin_internal_irqchip);
switch (irq) {
-#if defined(BF537_FAMILY)
- case IRQ_PH_INTA_MAC_RX:
- case IRQ_PF_INTA_PG_INTA:
-#elif defined(BF533_FAMILY)
- case IRQ_PROG_INTA:
-#elif defined(CONFIG_BF54x)
+#if BFIN_GPIO_PINT
case IRQ_PINT0:
case IRQ_PINT1:
case IRQ_PINT2:
case IRQ_PINT3:
+#elif defined(BF537_FAMILY)
+ case IRQ_PH_INTA_MAC_RX:
+ case IRQ_PF_INTA_PG_INTA:
+#elif defined(BF533_FAMILY)
+ case IRQ_PROG_INTA:
#elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
case IRQ_PORTF_INTA:
case IRQ_PORTG_INTA:
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 35e7e1eb0188..1c143a4de5f5 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -45,9 +45,7 @@ struct corelock_slot corelock __attribute__ ((__section__(".l2.bss")));
unsigned long blackfin_iflush_l1_entry[NR_CPUS];
#endif
-void __cpuinitdata *init_retx_coreb, *init_saved_retx_coreb,
- *init_saved_seqstat_coreb, *init_saved_icplb_fault_addr_coreb,
- *init_saved_dcplb_fault_addr_coreb;
+struct blackfin_initial_pda __cpuinitdata initial_pda_coreb;
#define BFIN_IPI_RESCHEDULE 0
#define BFIN_IPI_CALL_FUNC 1
@@ -369,13 +367,16 @@ void __cpuinit secondary_start_kernel(void)
if (_bfin_swrst & SWRST_DBL_FAULT_B) {
printk(KERN_EMERG "CoreB Recovering from DOUBLE FAULT event\n");
#ifdef CONFIG_DEBUG_DOUBLEFAULT
- printk(KERN_EMERG " While handling exception (EXCAUSE = 0x%x) at %pF\n",
- (int)init_saved_seqstat_coreb & SEQSTAT_EXCAUSE, init_saved_retx_coreb);
- printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %pF\n", init_saved_dcplb_fault_addr_coreb);
- printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %pF\n", init_saved_icplb_fault_addr_coreb);
+ printk(KERN_EMERG " While handling exception (EXCAUSE = %#x) at %pF\n",
+ initial_pda_coreb.seqstat_doublefault & SEQSTAT_EXCAUSE,
+ initial_pda_coreb.retx_doublefault);
+ printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %pF\n",
+ initial_pda_coreb.dcplb_doublefault_addr);
+ printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %pF\n",
+ initial_pda_coreb.icplb_doublefault_addr);
#endif
printk(KERN_NOTICE " The instruction at %pF caused a double exception\n",
- init_retx_coreb);
+ initial_pda_coreb.retx);
}
/*
diff --git a/arch/cris/kernel/module.c b/arch/cris/kernel/module.c
index bcd502f74cda..37400f5869e6 100644
--- a/arch/cris/kernel/module.c
+++ b/arch/cris/kernel/module.c
@@ -30,45 +30,19 @@
#endif
#ifdef CONFIG_ETRAX_KMALLOCED_MODULES
-#define MALLOC_MODULE(size) kmalloc(size, GFP_KERNEL)
-#define FREE_MODULE(region) kfree(region)
-#else
-#define MALLOC_MODULE(size) vmalloc_exec(size)
-#define FREE_MODULE(region) vfree(region)
-#endif
-
void *module_alloc(unsigned long size)
{
if (size == 0)
return NULL;
- return MALLOC_MODULE(size);
+ return kmalloc(size, GFP_KERNEL);
}
-
/* Free memory returned from module_alloc */
void module_free(struct module *mod, void *module_region)
{
- FREE_MODULE(module_region);
-}
-
-/* We don't need anything special. */
-int module_frob_arch_sections(Elf_Ehdr *hdr,
- Elf_Shdr *sechdrs,
- char *secstrings,
- struct module *mod)
-{
- return 0;
-}
-
-int apply_relocate(Elf32_Shdr *sechdrs,
- const char *strtab,
- unsigned int symindex,
- unsigned int relsec,
- struct module *me)
-{
- printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
- return -ENOEXEC;
+ kfree(module_region);
}
+#endif
int apply_relocate_add(Elf32_Shdr *sechdrs,
const char *strtab,
@@ -108,14 +82,3 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
return 0;
}
-
-int module_finalize(const Elf_Ehdr *hdr,
- const Elf_Shdr *sechdrs,
- struct module *me)
-{
- return 0;
-}
-
-void module_arch_cleanup(struct module *mod)
-{
-}
diff --git a/arch/frv/kernel/module.c b/arch/frv/kernel/module.c
index 711763c8a6f3..9d9835f1fe2b 100644
--- a/arch/frv/kernel/module.c
+++ b/arch/frv/kernel/module.c
@@ -22,57 +22,6 @@
#define DEBUGP(fmt...)
#endif
-void *module_alloc(unsigned long size)
-{
- if (size == 0)
- return NULL;
-
- return vmalloc_exec(size);
-}
-
-
-/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
-{
- vfree(module_region);
-}
-
-/* We don't need anything special. */
-int module_frob_arch_sections(Elf_Ehdr *hdr,
- Elf_Shdr *sechdrs,
- char *secstrings,
- struct module *mod)
-{
- return 0;
-}
-
-int apply_relocate(Elf32_Shdr *sechdrs,
- const char *strtab,
- unsigned int symindex,
- unsigned int relsec,
- struct module *me)
-{
- printk(KERN_ERR "module %s: ADD RELOCATION unsupported\n", me->name);
- return -ENOEXEC;
-}
-
-int apply_relocate_add(Elf32_Shdr *sechdrs,
- const char *strtab,
- unsigned int symindex,
- unsigned int relsec,
- struct module *me)
-{
- printk(KERN_ERR "module %s: ADD RELOCATION unsupported\n", me->name);
- return -ENOEXEC;
-}
-
-int module_finalize(const Elf_Ehdr *hdr,
- const Elf_Shdr *sechdrs,
- struct module *me)
-{
- return 0;
-}
-
-void module_arch_cleanup(struct module *mod)
-{
-}
+/* TODO: At least one of apply_relocate or apply_relocate_add must be
+ * implemented in order to get working module support.
+ */
diff --git a/arch/h8300/kernel/module.c b/arch/h8300/kernel/module.c
index db4953dc4e1b..1d526e05db19 100644
--- a/arch/h8300/kernel/module.c
+++ b/arch/h8300/kernel/module.c
@@ -11,40 +11,6 @@
#define DEBUGP(fmt...)
#endif
-void *module_alloc(unsigned long size)
-{
- if (size == 0)
- return NULL;
- return vmalloc(size);
-}
-
-
-/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
-{
- vfree(module_region);
-}
-
-/* We don't need anything special. */
-int module_frob_arch_sections(Elf_Ehdr *hdr,
- Elf_Shdr *sechdrs,
- char *secstrings,
- struct module *mod)
-{
- return 0;
-}
-
-int apply_relocate(Elf32_Shdr *sechdrs,
- const char *strtab,
- unsigned int symindex,
- unsigned int relsec,
- struct module *me)
-{
- printk(KERN_ERR "module %s: RELOCATION unsupported\n",
- me->name);
- return -ENOEXEC;
-}
-
int apply_relocate_add(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
@@ -107,14 +73,3 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
me->name, rela[i].r_offset);
return -ENOEXEC;
}
-
-int module_finalize(const Elf_Ehdr *hdr,
- const Elf_Shdr *sechdrs,
- struct module *me)
-{
- return 0;
-}
-
-void module_arch_cleanup(struct module *mod)
-{
-}
diff --git a/arch/ia64/include/asm/paravirt.h b/arch/ia64/include/asm/paravirt.h
index 2eb0a981a09a..32551d304cd7 100644
--- a/arch/ia64/include/asm/paravirt.h
+++ b/arch/ia64/include/asm/paravirt.h
@@ -281,6 +281,10 @@ paravirt_init_missing_ticks_accounting(int cpu)
pv_time_ops.init_missing_ticks_accounting(cpu);
}
+struct jump_label_key;
+extern struct jump_label_key paravirt_steal_enabled;
+extern struct jump_label_key paravirt_steal_rq_enabled;
+
static inline int
paravirt_do_steal_accounting(unsigned long *new_itm)
{
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
index 1481b0a28ca0..24603be24c14 100644
--- a/arch/ia64/kernel/module.c
+++ b/arch/ia64/kernel/module.c
@@ -304,14 +304,6 @@ plt_target (struct plt_entry *plt)
#endif /* !USE_BRL */
-void *
-module_alloc (unsigned long size)
-{
- if (!size)
- return NULL;
- return vmalloc(size);
-}
-
void
module_free (struct module *mod, void *module_region)
{
@@ -853,14 +845,6 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
return 0;
}
-int
-apply_relocate (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symindex,
- unsigned int relsec, struct module *mod)
-{
- printk(KERN_ERR "module %s: REL relocs in section %u unsupported\n", mod->name, relsec);
- return -ENOEXEC;
-}
-
/*
* Modules contain a single unwind table which covers both the core and the init text
* sections but since the two are not contiguous, we need to split this table up such that
diff --git a/arch/ia64/kernel/paravirt.c b/arch/ia64/kernel/paravirt.c
index a21d7bb9c69c..100868216c55 100644
--- a/arch/ia64/kernel/paravirt.c
+++ b/arch/ia64/kernel/paravirt.c
@@ -634,6 +634,8 @@ struct pv_irq_ops pv_irq_ops = {
* pv_time_ops
* time operations
*/
+struct jump_label_key paravirt_steal_enabled;
+struct jump_label_key paravirt_steal_rq_enabled;
static int
ia64_native_do_steal_accounting(unsigned long *new_itm)
diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig
index fa4d1e59deb0..9806e55f91be 100644
--- a/arch/ia64/kvm/Kconfig
+++ b/arch/ia64/kvm/Kconfig
@@ -49,6 +49,5 @@ config KVM_INTEL
extensions.
source drivers/vhost/Kconfig
-source drivers/virtio/Kconfig
endif # VIRTUALIZATION
diff --git a/arch/m32r/include/asm/delay.h b/arch/m32r/include/asm/delay.h
index 9dd9e999ea69..9670e127b7b2 100644
--- a/arch/m32r/include/asm/delay.h
+++ b/arch/m32r/include/asm/delay.h
@@ -1,26 +1 @@
-#ifndef _ASM_M32R_DELAY_H
-#define _ASM_M32R_DELAY_H
-
-/*
- * Copyright (C) 1993 Linus Torvalds
- *
- * Delay routines calling functions in arch/m32r/lib/delay.c
- */
-
-extern void __bad_udelay(void);
-extern void __bad_ndelay(void);
-
-extern void __udelay(unsigned long usecs);
-extern void __ndelay(unsigned long nsecs);
-extern void __const_udelay(unsigned long xloops);
-extern void __delay(unsigned long loops);
-
-#define udelay(n) (__builtin_constant_p(n) ? \
- ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \
- __udelay(n))
-
-#define ndelay(n) (__builtin_constant_p(n) ? \
- ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
- __ndelay(n))
-
-#endif /* _ASM_M32R_DELAY_H */
+#include <asm-generic/delay.h>
diff --git a/arch/m32r/kernel/module.c b/arch/m32r/kernel/module.c
index cb5f37d78d49..3071fe83ffc8 100644
--- a/arch/m32r/kernel/module.c
+++ b/arch/m32r/kernel/module.c
@@ -28,33 +28,6 @@
#define DEBUGP(fmt...)
#endif
-void *module_alloc(unsigned long size)
-{
- if (size == 0)
- return NULL;
-#ifdef CONFIG_MMU
- return vmalloc_exec(size);
-#else
- return vmalloc(size);
-#endif
-}
-
-
-/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
-{
- vfree(module_region);
-}
-
-/* We don't need anything special. */
-int module_frob_arch_sections(Elf_Ehdr *hdr,
- Elf_Shdr *sechdrs,
- char *secstrings,
- struct module *mod)
-{
- return 0;
-}
-
#define COPY_UNALIGNED_WORD(sw, tw, align) \
{ \
void *__s = &(sw), *__t = &(tw); \
@@ -243,14 +216,3 @@ int apply_relocate(Elf32_Shdr *sechdrs,
return 0;
}
-
-int module_finalize(const Elf_Ehdr *hdr,
- const Elf_Shdr *sechdrs,
- struct module *me)
-{
- return 0;
-}
-
-void module_arch_cleanup(struct module *mod)
-{
-}
diff --git a/arch/m68k/kernel/module_mm.c b/arch/m68k/kernel/module_mm.c
index cd6bcb1c957e..ceafc47c96d5 100644
--- a/arch/m68k/kernel/module_mm.c
+++ b/arch/m68k/kernel/module_mm.c
@@ -19,29 +19,6 @@
#ifdef CONFIG_MODULES
-void *module_alloc(unsigned long size)
-{
- if (size == 0)
- return NULL;
- return vmalloc(size);
-}
-
-
-/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
-{
- vfree(module_region);
-}
-
-/* We don't need anything special. */
-int module_frob_arch_sections(Elf_Ehdr *hdr,
- Elf_Shdr *sechdrs,
- char *secstrings,
- struct module *mod)
-{
- return 0;
-}
-
int apply_relocate(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
@@ -131,10 +108,6 @@ int module_finalize(const Elf_Ehdr *hdr,
return 0;
}
-void module_arch_cleanup(struct module *mod)
-{
-}
-
#endif /* CONFIG_MODULES */
void module_fixup(struct module *mod, struct m68k_fixup_info *start,
diff --git a/arch/m68k/kernel/module_no.c b/arch/m68k/kernel/module_no.c
index d11ffae7956a..5a097c6063fa 100644
--- a/arch/m68k/kernel/module_no.c
+++ b/arch/m68k/kernel/module_no.c
@@ -11,29 +11,6 @@
#define DEBUGP(fmt...)
#endif
-void *module_alloc(unsigned long size)
-{
- if (size == 0)
- return NULL;
- return vmalloc(size);
-}
-
-
-/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
-{
- vfree(module_region);
-}
-
-/* We don't need anything special. */
-int module_frob_arch_sections(Elf_Ehdr *hdr,
- Elf_Shdr *sechdrs,
- char *secstrings,
- struct module *mod)
-{
- return 0;
-}
-
int apply_relocate(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
@@ -113,14 +90,3 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
}
return 0;
}
-
-int module_finalize(const Elf_Ehdr *hdr,
- const Elf_Shdr *sechdrs,
- struct module *me)
-{
- return 0;
-}
-
-void module_arch_cleanup(struct module *mod)
-{
-}
diff --git a/arch/microblaze/kernel/module.c b/arch/microblaze/kernel/module.c
index 0e73f6606547..142426f631bb 100644
--- a/arch/microblaze/kernel/module.c
+++ b/arch/microblaze/kernel/module.c
@@ -18,37 +18,6 @@
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
-void *module_alloc(unsigned long size)
-{
- void *ret;
- ret = (size == 0) ? NULL : vmalloc(size);
- pr_debug("module_alloc (%08lx@%08lx)\n", size, (unsigned long int)ret);
- return ret;
-}
-
-void module_free(struct module *module, void *region)
-{
- pr_debug("module_free(%s,%08lx)\n", module->name,
- (unsigned long)region);
- vfree(region);
-}
-
-int module_frob_arch_sections(Elf_Ehdr *hdr,
- Elf_Shdr *sechdrs,
- char *secstrings,
- struct module *mod)
-{
- return 0;
-}
-
-int apply_relocate(Elf32_Shdr *sechdrs, const char *strtab,
- unsigned int symindex, unsigned int relsec, struct module *module)
-{
- printk(KERN_ERR "module %s: ADD RELOCATION unsupported\n",
- module->name);
- return -ENOEXEC;
-}
-
int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
unsigned int symindex, unsigned int relsec, struct module *module)
{
@@ -155,7 +124,3 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
flush_dcache();
return 0;
}
-
-void module_arch_cleanup(struct module *mod)
-{
-}
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 6cb60adb7b30..177cdaf83564 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -2493,20 +2493,4 @@ source "security/Kconfig"
source "crypto/Kconfig"
-menuconfig VIRTUALIZATION
- bool "Virtualization"
- default n
- ---help---
- Say Y here to get to see options for using your Linux host to run other
- operating systems inside virtual machines (guests).
- This option alone does not add any kernel code.
-
- If you say N, all options in this submenu will be skipped and disabled.
-
-if VIRTUALIZATION
-
-source drivers/virtio/Kconfig
-
-endif # VIRTUALIZATION
-
source "lib/Kconfig"
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
index dd940b701963..4b930ac4aff2 100644
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -45,30 +45,14 @@ static struct mips_hi16 *mips_hi16_list;
static LIST_HEAD(dbe_list);
static DEFINE_SPINLOCK(dbe_lock);
+#ifdef MODULE_START
void *module_alloc(unsigned long size)
{
-#ifdef MODULE_START
return __vmalloc_node_range(size, 1, MODULE_START, MODULE_END,
GFP_KERNEL, PAGE_KERNEL, -1,
__builtin_return_address(0));
-#else
- if (size == 0)
- return NULL;
- return vmalloc(size);
-#endif
-}
-
-/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
-{
- vfree(module_region);
-}
-
-int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
- char *secstrings, struct module *mod)
-{
- return 0;
}
+#endif
static int apply_r_mips_none(struct module *me, u32 *location, Elf_Addr v)
{
diff --git a/arch/mn10300/kernel/module.c b/arch/mn10300/kernel/module.c
index 196a111e2e29..216ad23c9570 100644
--- a/arch/mn10300/kernel/module.c
+++ b/arch/mn10300/kernel/module.c
@@ -32,36 +32,6 @@
#define DEBUGP(fmt, ...)
#endif
-/*
- * allocate storage for a module
- */
-void *module_alloc(unsigned long size)
-{
- if (size == 0)
- return NULL;
- return vmalloc_exec(size);
-}
-
-/*
- * free memory returned from module_alloc()
- */
-void module_free(struct module *mod, void *module_region)
-{
- vfree(module_region);
-}
-
-/*
- * allow the arch to fix up the section table
- * - we don't need anything special
- */
-int module_frob_arch_sections(Elf_Ehdr *hdr,
- Elf_Shdr *sechdrs,
- char *secstrings,
- struct module *mod)
-{
- return 0;
-}
-
static void reloc_put16(uint8_t *p, uint32_t val)
{
p[0] = val & 0xff;
@@ -81,20 +51,6 @@ static void reloc_put32(uint8_t *p, uint32_t val)
}
/*
- * apply a REL relocation
- */
-int apply_relocate(Elf32_Shdr *sechdrs,
- const char *strtab,
- unsigned int symindex,
- unsigned int relsec,
- struct module *me)
-{
- printk(KERN_ERR "module %s: RELOCATION unsupported\n",
- me->name);
- return -ENOEXEC;
-}
-
-/*
* apply a RELA relocation
*/
int apply_relocate_add(Elf32_Shdr *sechdrs,
@@ -198,20 +154,3 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
}
return 0;
}
-
-/*
- * finish loading the module
- */
-int module_finalize(const Elf_Ehdr *hdr,
- const Elf_Shdr *sechdrs,
- struct module *me)
-{
- return 0;
-}
-
-/*
- * finish clearing the module
- */
-void module_arch_cleanup(struct module *mod)
-{
-}
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
new file mode 100644
index 000000000000..4558bafbd1a2
--- /dev/null
+++ b/arch/openrisc/Kconfig
@@ -0,0 +1,207 @@
+#
+# For a description of the syntax of this configuration file,
+# see Documentation/kbuild/config-language.txt.
+#
+
+config OPENRISC
+ def_bool y
+ select OF
+ select OF_EARLY_FLATTREE
+ select HAVE_MEMBLOCK
+ select ARCH_WANT_OPTIONAL_GPIOLIB
+ select HAVE_ARCH_TRACEHOOK
+ select HAVE_GENERIC_HARDIRQS
+ select GENERIC_IRQ_CHIP
+ select GENERIC_IRQ_PROBE
+ select GENERIC_IRQ_SHOW
+ select GENERIC_IOMAP
+
+config MMU
+ def_bool y
+
+config WISHBONE_BUS_BIG_ENDIAN
+ def_bool y
+
+config SYMBOL_PREFIX
+ string
+ default ""
+
+config HAVE_DMA_ATTRS
+ def_bool y
+
+config UID16
+ def_bool y
+
+config RWSEM_GENERIC_SPINLOCK
+ def_bool y
+
+config RWSEM_XCHGADD_ALGORITHM
+ def_bool n
+
+config GENERIC_HWEIGHT
+ def_bool y
+
+config GENERIC_IOMAP
+ def_bool y
+
+config NO_IOPORT
+ def_bool y
+
+config GENERIC_GPIO
+ def_bool y
+
+config GENERIC_CLOCKEVENTS
+ def_bool y
+
+config TRACE_IRQFLAGS_SUPPORT
+ def_bool y
+
+# For now, use generic checksum functions
+#These can be reimplemented in assembly later if so inclined
+config GENERIC_CSUM
+ def_bool y
+
+config GENERIC_FIND_NEXT_BIT
+ def_bool y
+
+source "init/Kconfig"
+
+
+menu "Processor type and features"
+
+choice
+ prompt "Subarchitecture"
+ default OR1K_1200
+
+config OR1K_1200
+ bool "OR1200"
+ help
+ Generic OpenRISC 1200 architecture
+
+endchoice
+
+config OPENRISC_BUILTIN_DTB
+ string "Builtin DTB"
+ default ""
+
+menu "Class II Instructions"
+
+config OPENRISC_HAVE_INST_FF1
+ bool "Have instruction l.ff1"
+ default y
+ help
+ Select this if your implementation has the Class II instruction l.ff1
+
+config OPENRISC_HAVE_INST_FL1
+ bool "Have instruction l.fl1"
+ default y
+ help
+ Select this if your implementation has the Class II instruction l.fl1
+
+config OPENRISC_HAVE_INST_MUL
+ bool "Have instruction l.mul for hardware multiply"
+ default y
+ help
+ Select this if your implementation has a hardware multiply instruction
+
+config OPENRISC_HAVE_INST_DIV
+ bool "Have instruction l.div for hardware divide"
+ default y
+ help
+ Select this if your implementation has a hardware divide instruction
+endmenu
+
+
+source "kernel/time/Kconfig"
+source kernel/Kconfig.hz
+source kernel/Kconfig.preempt
+source "mm/Kconfig"
+
+config OPENRISC_NO_SPR_SR_DSX
+ bool "use SPR_SR_DSX software emulation" if OR1K_1200
+ default y
+ help
+ SPR_SR_DSX bit is status register bit indicating whether
+ the last exception has happened in delay slot.
+
+ OpenRISC architecture makes it optional to have it implemented
+ in hardware and the OR1200 does not have it.
+
+ Say N here if you know that your OpenRISC processor has
+ SPR_SR_DSX bit implemented. Say Y if you are unsure.
+
+config CMDLINE
+ string "Default kernel command string"
+ default ""
+ help
+ On some architectures there is currently no way for the boot loader
+ to pass arguments to the kernel. For these architectures, you should
+ supply some command-line options at build time by entering them
+ here.
+
+menu "Debugging options"
+
+config DEBUG_STACKOVERFLOW
+ bool "Check for kernel stack overflow"
+ default y
+ help
+ Make extra checks for space avaliable on stack in some
+ critical functions. This will cause kernel to run a bit slower,
+ but will catch most of kernel stack overruns and exit gracefuly.
+
+ Say Y if you are unsure.
+
+config JUMP_UPON_UNHANDLED_EXCEPTION
+ bool "Try to die gracefully"
+ default y
+ help
+ Now this puts kernel into infinite loop after first oops. Till
+ your kernel crashes this doesn't have any influence.
+
+ Say Y if you are unsure.
+
+config OPENRISC_EXCEPTION_DEBUG
+ bool "Print processor state at each exception"
+ default n
+ help
+ This option will make your kernel unusable for all but kernel
+ debugging.
+
+ Say N if you are unsure.
+
+config OPENRISC_ESR_EXCEPTION_BUG_CHECK
+ bool "Check for possible ESR exception bug"
+ default n
+ help
+ This option enables some checks that might expose some problems
+ in kernel.
+
+ Say N if you are unsure.
+
+endmenu
+
+endmenu
+
+menu "Executable file formats"
+
+source "fs/Kconfig.binfmt"
+
+endmenu
+
+source "net/Kconfig"
+
+source "drivers/Kconfig"
+
+source "fs/Kconfig"
+
+source "security/Kconfig"
+
+source "crypto/Kconfig"
+
+source "lib/Kconfig"
+
+menu "Kernel hacking"
+
+source "lib/Kconfig.debug"
+
+endmenu
diff --git a/arch/openrisc/Makefile b/arch/openrisc/Makefile
new file mode 100644
index 000000000000..158ae4c0dc6c
--- /dev/null
+++ b/arch/openrisc/Makefile
@@ -0,0 +1,55 @@
+# BK Id: %F% %I% %G% %U% %#%
+#
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies. Remember to do have actions
+# for "archclean" and "archdep" for cleaning up and making dependencies for
+# this architecture
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1994 by Linus Torvalds
+# Modifications for the OpenRISC architecture:
+# Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+# Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+#
+# Based on:
+# arch/i386/Makefile
+
+KBUILD_DEFCONFIG := or1ksim_defconfig
+
+LDFLAGS :=
+OBJCOPYFLAGS := -O binary -R .note -R .comment -S
+LDFLAGS_vmlinux :=
+LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
+
+KBUILD_CFLAGS += -pipe -ffixed-r10
+
+ifeq ($(CONFIG_OPENRISC_HAVE_INST_MUL),y)
+ KBUILD_CFLAGS += $(call cc-option,-mhard-mul)
+else
+ KBUILD_CFLAGS += $(call cc-option,-msoft-mul)
+endif
+
+ifeq ($(CONFIG_OPENRISC_HAVE_INST_DIV),y)
+ KBUILD_CFLAGS += $(call cc-option,-mhard-div)
+else
+ KBUILD_CFLAGS += $(call cc-option,-msoft-div)
+endif
+
+head-y := arch/openrisc/kernel/head.o arch/openrisc/kernel/init_task.o
+
+core-y += arch/openrisc/lib/ \
+ arch/openrisc/kernel/ \
+ arch/openrisc/mm/
+libs-y += $(LIBGCC)
+
+ifneq '$(CONFIG_OPENRISC_BUILTIN_DTB)' '""'
+BUILTIN_DTB := y
+else
+BUILTIN_DTB := n
+endif
+core-$(BUILTIN_DTB) += arch/openrisc/boot/
+
+all: vmlinux
diff --git a/arch/openrisc/README.openrisc b/arch/openrisc/README.openrisc
new file mode 100644
index 000000000000..c9f7edf2b9a2
--- /dev/null
+++ b/arch/openrisc/README.openrisc
@@ -0,0 +1,99 @@
+OpenRISC Linux
+==============
+
+This is a port of Linux to the OpenRISC class of microprocessors; the initial
+target architecture, specifically, is the 32-bit OpenRISC 1000 family (or1k).
+
+For information about OpenRISC processors and ongoing development:
+
+ website http://openrisc.net
+
+For more information about Linux on OpenRISC, please contact South Pole AB.
+
+ email: info@southpole.se
+
+ website: http://southpole.se
+ http://southpoleconsulting.com
+
+---------------------------------------------------------------------
+
+Build instructions for OpenRISC toolchain and Linux
+===================================================
+
+In order to build and run Linux for OpenRISC, you'll need at least a basic
+toolchain and, perhaps, the architectural simulator. Steps to get these bits
+in place are outlined here.
+
+1) The toolchain can be obtained from openrisc.net. Instructions for building
+a toolchain can be found at:
+
+http://openrisc.net/toolchain-build.html
+
+2) or1ksim (optional)
+
+or1ksim is the architectural simulator which will allow you to actually run
+your OpenRISC Linux kernel if you don't have an OpenRISC processor at hand.
+
+ git clone git://openrisc.net/jonas/or1ksim-svn
+
+ cd or1ksim
+ ./configure --prefix=$OPENRISC_PREFIX
+ make
+ make install
+
+3) Linux kernel
+
+Build the kernel as usual
+
+ make ARCH=openrisc defconfig
+ make ARCH=openrisc
+
+4) Run in architectural simulator
+
+Grab the or1ksim platform configuration file (from the or1ksim source) and
+together with your freshly built vmlinux, run your kernel with the following
+incantation:
+
+ sim -f arch/openrisc/or1ksim.cfg vmlinux
+
+---------------------------------------------------------------------
+
+Terminology
+===========
+
+In the code, the following particles are used on symbols to limit the scope
+to more or less specific processor implementations:
+
+openrisc: the OpenRISC class of processors
+or1k: the OpenRISC 1000 family of processors
+or1200: the OpenRISC 1200 processor
+
+---------------------------------------------------------------------
+
+History
+========
+
+18. 11. 2003 Matjaz Breskvar (phoenix@bsemi.com)
+ initial port of linux to OpenRISC/or32 architecture.
+ all the core stuff is implemented and seams usable.
+
+08. 12. 2003 Matjaz Breskvar (phoenix@bsemi.com)
+ complete change of TLB miss handling.
+ rewrite of exceptions handling.
+ fully functional sash-3.6 in default initrd.
+ a much improved version with changes all around.
+
+10. 04. 2004 Matjaz Breskvar (phoenix@bsemi.com)
+ alot of bugfixes all over.
+ ethernet support, functional http and telnet servers.
+ running many standard linux apps.
+
+26. 06. 2004 Matjaz Breskvar (phoenix@bsemi.com)
+ port to 2.6.x
+
+30. 11. 2004 Matjaz Breskvar (phoenix@bsemi.com)
+ lots of bugfixes and enhancments.
+ added opencores framebuffer driver.
+
+09. 10. 2010 Jonas Bonn (jonas@southpole.se)
+ major rewrite to bring up to par with upstream Linux 2.6.36
diff --git a/arch/openrisc/TODO.openrisc b/arch/openrisc/TODO.openrisc
new file mode 100644
index 000000000000..acfeef9c58e3
--- /dev/null
+++ b/arch/openrisc/TODO.openrisc
@@ -0,0 +1,16 @@
+The OpenRISC Linux port is fully functional and has been tracking upstream
+since 2.6.35. There are, however, remaining items to be completed within
+the coming months. Here's a list of known-to-be-less-than-stellar items
+that are due for investigation shortly, i.e. our TODO list:
+
+-- Implement the rest of the DMA API... dma_map_sg, etc.
+
+-- Consolidate usage of memblock and bootmem... move everything over to
+ memblock.
+
+-- Finish the renaming cleanup... there are references to or32 in the code
+ which was an older name for the architecture. The name we've settled on is
+ or1k and this change is slowly trickling through the stack. For the time
+ being, or32 is equivalent to or1k.
+
+-- Implement optimized version of memcpy and memset
diff --git a/arch/openrisc/boot/Makefile b/arch/openrisc/boot/Makefile
new file mode 100644
index 000000000000..98ca185097a5
--- /dev/null
+++ b/arch/openrisc/boot/Makefile
@@ -0,0 +1,15 @@
+
+
+ifneq '$(CONFIG_OPENRISC_BUILTIN_DTB)' '""'
+BUILTIN_DTB := $(patsubst "%",%,$(CONFIG_OPENRISC_BUILTIN_DTB)).dtb.o
+else
+BUILTIN_DTB :=
+endif
+obj-y += $(BUILTIN_DTB)
+
+clean-files := *.dtb.S
+
+#DTC_FLAGS ?= -p 1024
+
+$(obj)/%.dtb: $(src)/dts/%.dts
+ $(call cmd,dtc)
diff --git a/arch/openrisc/boot/dts/or1ksim.dts b/arch/openrisc/boot/dts/or1ksim.dts
new file mode 100644
index 000000000000..5d4f9027afaf
--- /dev/null
+++ b/arch/openrisc/boot/dts/or1ksim.dts
@@ -0,0 +1,50 @@
+/dts-v1/;
+/ {
+ compatible = "opencores,or1ksim";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-parent = <&pic>;
+
+ chosen {
+ bootargs = "console=uart,mmio,0x90000000,115200";
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x02000000>;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cpu@0 {
+ compatible = "opencores,or1200-rtlsvn481";
+ reg = <0>;
+ clock-frequency = <20000000>;
+ };
+ };
+
+ /*
+ * OR1K PIC is built into CPU and accessed via special purpose
+ * registers. It is not addressable and, hence, has no 'reg'
+ * property.
+ */
+ pic: pic {
+ compatible = "opencores,or1k-pic";
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ };
+
+ serial0: serial@90000000 {
+ compatible = "opencores,uart16550-rtlsvn105", "ns16550a";
+ reg = <0x90000000 0x100>;
+ interrupts = <2>;
+ clock-frequency = <20000000>;
+ };
+
+ enet0: ethoc@92000000 {
+ compatible = "opencores,ethmac-rtlsvn338";
+ reg = <0x92000000 0x100>;
+ interrupts = <4>;
+ };
+};
diff --git a/arch/openrisc/configs/or1ksim_defconfig b/arch/openrisc/configs/or1ksim_defconfig
new file mode 100644
index 000000000000..ea172bdfa36a
--- /dev/null
+++ b/arch/openrisc/configs/or1ksim_defconfig
@@ -0,0 +1,65 @@
+CONFIG_CROSS_COMPILE="or32-linux-"
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_GZIP is not set
+CONFIG_EXPERT=y
+# CONFIG_SYSCTL_SYSCALL is not set
+# CONFIG_KALLSYMS is not set
+# CONFIG_EPOLL is not set
+# CONFIG_TIMERFD is not set
+# CONFIG_EVENTFD is not set
+# CONFIG_AIO is not set
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLOB=y
+CONFIG_MODULES=y
+# CONFIG_BLOCK is not set
+CONFIG_OPENRISC_BUILTIN_DTB="or1ksim"
+CONFIG_NO_HZ=y
+CONFIG_HZ_100=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+CONFIG_TCP_CONG_ADVANCED=y
+# CONFIG_TCP_CONG_BIC is not set
+# CONFIG_TCP_CONG_CUBIC is not set
+# CONFIG_TCP_CONG_WESTWOOD is not set
+# CONFIG_TCP_CONG_HTCP is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FW_LOADER is not set
+CONFIG_PROC_DEVICETREE=y
+CONFIG_NETDEVICES=y
+CONFIG_MICREL_PHY=y
+CONFIG_NET_ETHERNET=y
+CONFIG_ETHOC=y
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_MFD_SUPPORT is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_DNOTIFY is not set
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild
new file mode 100644
index 000000000000..11162e6c878f
--- /dev/null
+++ b/arch/openrisc/include/asm/Kbuild
@@ -0,0 +1,64 @@
+include include/asm-generic/Kbuild.asm
+
+header-y += spr_defs.h
+
+generic-y += atomic.h
+generic-y += auxvec.h
+generic-y += bitsperlong.h
+generic-y += bug.h
+generic-y += bugs.h
+generic-y += cacheflush.h
+generic-y += checksum.h
+generic-y += cmpxchg.h
+generic-y += cmpxchg-local.h
+generic-y += cpumask.h
+generic-y += cputime.h
+generic-y += current.h
+generic-y += device.h
+generic-y += div64.h
+generic-y += dma.h
+generic-y += emergency-restart.h
+generic-y += errno.h
+generic-y += fb.h
+generic-y += fcntl.h
+generic-y += ftrace.h
+generic-y += futex.h
+generic-y += hardirq.h
+generic-y += hw_irq.h
+generic-y += ioctl.h
+generic-y += ioctls.h
+generic-y += ipcbuf.h
+generic-y += irq_regs.h
+generic-y += kdebug.h
+generic-y += kmap_types.h
+generic-y += local.h
+generic-y += mman.h
+generic-y += module.h
+generic-y += msgbuf.h
+generic-y += pci.h
+generic-y += percpu.h
+generic-y += poll.h
+generic-y += posix_types.h
+generic-y += resource.h
+generic-y += rmap.h
+generic-y += scatterlist.h
+generic-y += sections.h
+generic-y += segment.h
+generic-y += sembuf.h
+generic-y += setup.h
+generic-y += shmbuf.h
+generic-y += shmparam.h
+generic-y += siginfo.h
+generic-y += signal.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += statfs.h
+generic-y += stat.h
+generic-y += string.h
+generic-y += swab.h
+generic-y += termbits.h
+generic-y += termios.h
+generic-y += topology.h
+generic-y += types.h
+generic-y += ucontext.h
+generic-y += user.h
diff --git a/arch/openrisc/include/asm/asm-offsets.h b/arch/openrisc/include/asm/asm-offsets.h
new file mode 100644
index 000000000000..d370ee36a182
--- /dev/null
+++ b/arch/openrisc/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/openrisc/include/asm/bitops.h b/arch/openrisc/include/asm/bitops.h
new file mode 100644
index 000000000000..a9e11efae14d
--- /dev/null
+++ b/arch/openrisc/include/asm/bitops.h
@@ -0,0 +1,59 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_BITOPS_H
+#define __ASM_OPENRISC_BITOPS_H
+
+/*
+ * Where we haven't written assembly versions yet, we fall back to the
+ * generic implementations. Otherwise, we pull in our (hopefully)
+ * optimized versions.
+ */
+
+#include <linux/irqflags.h>
+#include <linux/compiler.h>
+
+/*
+ * clear_bit may not imply a memory barrier
+ */
+#ifndef smp_mb__before_clear_bit
+#define smp_mb__before_clear_bit() smp_mb()
+#define smp_mb__after_clear_bit() smp_mb()
+#endif
+
+#include <asm/bitops/__ffs.h>
+#include <asm-generic/bitops/ffz.h>
+#include <asm/bitops/fls.h>
+#include <asm/bitops/__fls.h>
+#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/find.h>
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#include <asm-generic/bitops/sched.h>
+#include <asm/bitops/ffs.h>
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
+
+#include <asm-generic/bitops/atomic.h>
+#include <asm-generic/bitops/non-atomic.h>
+#include <asm-generic/bitops/ext2-atomic.h>
+
+#endif /* __ASM_GENERIC_BITOPS_H */
diff --git a/arch/openrisc/include/asm/bitops/__ffs.h b/arch/openrisc/include/asm/bitops/__ffs.h
new file mode 100644
index 000000000000..6c8368a34059
--- /dev/null
+++ b/arch/openrisc/include/asm/bitops/__ffs.h
@@ -0,0 +1,33 @@
+/*
+ * OpenRISC Linux
+ *
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC___FFS_H
+#define __ASM_OPENRISC___FFS_H
+
+
+#ifdef CONFIG_OPENRISC_HAVE_INST_FF1
+
+static inline unsigned long __ffs(unsigned long x)
+{
+ int ret;
+
+ __asm__ ("l.ff1 %0,%1"
+ : "=r" (ret)
+ : "r" (x));
+
+ return ret-1;
+}
+
+#else
+#include <asm-generic/bitops/__ffs.h>
+#endif
+
+#endif /* __ASM_OPENRISC___FFS_H */
diff --git a/arch/openrisc/include/asm/bitops/__fls.h b/arch/openrisc/include/asm/bitops/__fls.h
new file mode 100644
index 000000000000..c4ecdb4c523b
--- /dev/null
+++ b/arch/openrisc/include/asm/bitops/__fls.h
@@ -0,0 +1,33 @@
+/*
+ * OpenRISC Linux
+ *
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC___FLS_H
+#define __ASM_OPENRISC___FLS_H
+
+
+#ifdef CONFIG_OPENRISC_HAVE_INST_FL1
+
+static inline unsigned long __fls(unsigned long x)
+{
+ int ret;
+
+ __asm__ ("l.fl1 %0,%1"
+ : "=r" (ret)
+ : "r" (x));
+
+ return ret-1;
+}
+
+#else
+#include <asm-generic/bitops/__fls.h>
+#endif
+
+#endif /* __ASM_OPENRISC___FLS_H */
diff --git a/arch/openrisc/include/asm/bitops/ffs.h b/arch/openrisc/include/asm/bitops/ffs.h
new file mode 100644
index 000000000000..9de46246ebc7
--- /dev/null
+++ b/arch/openrisc/include/asm/bitops/ffs.h
@@ -0,0 +1,32 @@
+/*
+ * OpenRISC Linux
+ *
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_FFS_H
+#define __ASM_OPENRISC_FFS_H
+
+#ifdef CONFIG_OPENRISC_HAVE_INST_FF1
+
+static inline int ffs(int x)
+{
+ int ret;
+
+ __asm__ ("l.ff1 %0,%1"
+ : "=r" (ret)
+ : "r" (x));
+
+ return ret;
+}
+
+#else
+#include <asm-generic/bitops/ffs.h>
+#endif
+
+#endif /* __ASM_OPENRISC_FFS_H */
diff --git a/arch/openrisc/include/asm/bitops/fls.h b/arch/openrisc/include/asm/bitops/fls.h
new file mode 100644
index 000000000000..9efbf9ad86c4
--- /dev/null
+++ b/arch/openrisc/include/asm/bitops/fls.h
@@ -0,0 +1,33 @@
+/*
+ * OpenRISC Linux
+ *
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_FLS_H
+#define __ASM_OPENRISC_FLS_H
+
+
+#ifdef CONFIG_OPENRISC_HAVE_INST_FL1
+
+static inline int fls(int x)
+{
+ int ret;
+
+ __asm__ ("l.fl1 %0,%1"
+ : "=r" (ret)
+ : "r" (x));
+
+ return ret;
+}
+
+#else
+#include <asm-generic/bitops/fls.h>
+#endif
+
+#endif /* __ASM_OPENRISC_FLS_H */
diff --git a/arch/openrisc/include/asm/byteorder.h b/arch/openrisc/include/asm/byteorder.h
new file mode 100644
index 000000000000..60d14f7e14e2
--- /dev/null
+++ b/arch/openrisc/include/asm/byteorder.h
@@ -0,0 +1 @@
+#include <linux/byteorder/big_endian.h>
diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
new file mode 100644
index 000000000000..4ce7a01a252d
--- /dev/null
+++ b/arch/openrisc/include/asm/cache.h
@@ -0,0 +1,29 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_CACHE_H
+#define __ASM_OPENRISC_CACHE_H
+
+/* FIXME: How can we replace these with values from the CPU...
+ * they shouldn't be hard-coded!
+ */
+
+#define L1_CACHE_BYTES 16
+#define L1_CACHE_SHIFT 4
+
+#endif /* __ASM_OPENRISC_CACHE_H */
diff --git a/arch/openrisc/include/asm/cpuinfo.h b/arch/openrisc/include/asm/cpuinfo.h
new file mode 100644
index 000000000000..917318b6a970
--- /dev/null
+++ b/arch/openrisc/include/asm/cpuinfo.h
@@ -0,0 +1,34 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_CPUINFO_H
+#define __ASM_OPENRISC_CPUINFO_H
+
+struct cpuinfo {
+ u32 clock_frequency;
+
+ u32 icache_size;
+ u32 icache_block_size;
+
+ u32 dcache_size;
+ u32 dcache_block_size;
+};
+
+extern struct cpuinfo cpuinfo;
+
+#endif /* __ASM_OPENRISC_CPUINFO_H */
diff --git a/arch/openrisc/include/asm/delay.h b/arch/openrisc/include/asm/delay.h
new file mode 100644
index 000000000000..17f8bf5a5ac2
--- /dev/null
+++ b/arch/openrisc/include/asm/delay.h
@@ -0,0 +1,24 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_DELAY_H
+#define __ASM_OPENRISC_DELAY_H
+
+#include <asm-generic/delay.h>
+
+extern unsigned long loops_per_jiffy;
+
+#endif
diff --git a/arch/openrisc/include/asm/dma-mapping.h b/arch/openrisc/include/asm/dma-mapping.h
new file mode 100644
index 000000000000..052f877b52a5
--- /dev/null
+++ b/arch/openrisc/include/asm/dma-mapping.h
@@ -0,0 +1,134 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_DMA_MAPPING_H
+#define __ASM_OPENRISC_DMA_MAPPING_H
+
+/*
+ * See Documentation/PCI/PCI-DMA-mapping.txt and
+ * Documentation/DMA-API.txt for documentation.
+ *
+ * This file is written with the intention of eventually moving over
+ * to largely using asm-generic/dma-mapping-common.h in its place.
+ */
+
+#include <linux/dma-debug.h>
+#include <asm-generic/dma-coherent.h>
+#include <linux/kmemcheck.h>
+
+#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
+
+int dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
+
+#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+
+void *or1k_dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag);
+void or1k_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle);
+dma_addr_t or1k_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs);
+void or1k_unmap_page(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs);
+void or1k_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction dir);
+void or1k_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction dir);
+
+static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+{
+ void *memory;
+
+ memory = or1k_dma_alloc_coherent(dev, size, dma_handle, flag);
+
+ debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
+ return memory;
+}
+
+static inline void dma_free_coherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+{
+ debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
+ or1k_dma_free_coherent(dev, size, cpu_addr, dma_handle);
+}
+
+static inline dma_addr_t dma_map_single(struct device *dev, void *ptr,
+ size_t size,
+ enum dma_data_direction dir)
+{
+ dma_addr_t addr;
+
+ kmemcheck_mark_initialized(ptr, size);
+ BUG_ON(!valid_dma_direction(dir));
+ addr = or1k_map_page(dev, virt_to_page(ptr),
+ (unsigned long)ptr & ~PAGE_MASK, size,
+ dir, NULL);
+ debug_dma_map_page(dev, virt_to_page(ptr),
+ (unsigned long)ptr & ~PAGE_MASK, size,
+ dir, addr, true);
+ return addr;
+}
+
+static inline void dma_unmap_single(struct device *dev, dma_addr_t addr,
+ size_t size,
+ enum dma_data_direction dir)
+{
+ BUG_ON(!valid_dma_direction(dir));
+ or1k_unmap_page(dev, addr, size, dir, NULL);
+ debug_dma_unmap_page(dev, addr, size, dir, true);
+}
+
+static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
+ size_t size,
+ enum dma_data_direction dir)
+{
+ BUG_ON(!valid_dma_direction(dir));
+ or1k_sync_single_for_cpu(dev, addr, size, dir);
+ debug_dma_sync_single_for_cpu(dev, addr, size, dir);
+}
+
+static inline void dma_sync_single_for_device(struct device *dev,
+ dma_addr_t addr, size_t size,
+ enum dma_data_direction dir)
+{
+ BUG_ON(!valid_dma_direction(dir));
+ or1k_sync_single_for_device(dev, addr, size, dir);
+ debug_dma_sync_single_for_device(dev, addr, size, dir);
+}
+
+static inline int dma_supported(struct device *dev, u64 dma_mask)
+{
+ /* Support 32 bit DMA mask exclusively */
+ return dma_mask == 0xffffffffULL;
+}
+
+static inline int dma_set_mask(struct device *dev, u64 dma_mask)
+{
+ if (!dev->dma_mask || !dma_supported(dev, dma_mask))
+ return -EIO;
+
+ *dev->dma_mask = dma_mask;
+
+ return 0;
+}
+#endif /* __ASM_OPENRISC_DMA_MAPPING_H */
diff --git a/arch/openrisc/include/asm/elf.h b/arch/openrisc/include/asm/elf.h
new file mode 100644
index 000000000000..2ce603bbfdd3
--- /dev/null
+++ b/arch/openrisc/include/asm/elf.h
@@ -0,0 +1,108 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_ELF_H
+#define __ASM_OPENRISC_ELF_H
+
+/*
+ * ELF register definitions..
+ */
+#include <linux/types.h>
+#include <linux/ptrace.h>
+
+
+/* The OR1K relocation types... not all relevant for module loader */
+#define R_OR32_NONE 0
+#define R_OR32_32 1
+#define R_OR32_16 2
+#define R_OR32_8 3
+#define R_OR32_CONST 4
+#define R_OR32_CONSTH 5
+#define R_OR32_JUMPTARG 6
+#define R_OR32_VTINHERIT 7
+#define R_OR32_VTENTRY 8
+
+typedef unsigned long elf_greg_t;
+
+/*
+ * Note that NGREG is defined to ELF_NGREG in include/linux/elfcore.h, and is
+ * thus exposed to user-space.
+ */
+#define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t))
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+/* A placeholder; OR32 does not have fp support yes, so no fp regs for now. */
+typedef unsigned long elf_fpregset_t;
+
+/* This should be moved to include/linux/elf.h */
+#define EM_OR32 0x8472
+#define EM_OPENRISC 92 /* OpenRISC 32-bit embedded processor */
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_ARCH EM_OR32
+#define ELF_CLASS ELFCLASS32
+#define ELF_DATA ELFDATA2MSB
+
+#ifdef __KERNEL__
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+
+#define elf_check_arch(x) \
+ (((x)->e_machine == EM_OR32) || ((x)->e_machine == EM_OPENRISC))
+
+/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
+ use of this is to invoke "./ld.so someprog" to test out a new version of
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+#define ELF_ET_DYN_BASE (0x08000000)
+
+/*
+ * Enable dump using regset.
+ * This covers all of general/DSP/FPU regs.
+ */
+#define CORE_DUMP_USE_REGSET
+
+#define ELF_EXEC_PAGESIZE 8192
+
+extern void dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt);
+#define ELF_CORE_COPY_REGS(dest, regs) dump_elf_thread(dest, regs);
+
+/* This yields a mask that user programs can use to figure out what
+ instruction set this cpu supports. This could be done in userspace,
+ but it's not easy, and we've already done it here. */
+
+#define ELF_HWCAP (0)
+
+/* This yields a string that ld.so will use to load implementation
+ specific libraries for optimization. This is more specific in
+ intent than poking at uname or /proc/cpuinfo.
+
+ For the moment, we have only optimizations for the Intel generations,
+ but that could change... */
+
+#define ELF_PLATFORM (NULL)
+
+#define SET_PERSONALITY(ex) set_personality(PER_LINUX)
+
+#endif /* __KERNEL__ */
+#endif
diff --git a/arch/openrisc/include/asm/fixmap.h b/arch/openrisc/include/asm/fixmap.h
new file mode 100644
index 000000000000..52733416c1f3
--- /dev/null
+++ b/arch/openrisc/include/asm/fixmap.h
@@ -0,0 +1,87 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_FIXMAP_H
+#define __ASM_OPENRISC_FIXMAP_H
+
+/* Why exactly do we need 2 empty pages between the top of the fixed
+ * addresses and the top of virtual memory? Something is using that
+ * memory space but not sure what right now... If you find it, leave
+ * a comment here.
+ */
+#define FIXADDR_TOP ((unsigned long) (-2*PAGE_SIZE))
+
+#include <linux/kernel.h>
+#include <asm/page.h>
+
+/*
+ * On OpenRISC we use these special fixed_addresses for doing ioremap
+ * early in the boot process before memory initialization is complete.
+ * This is used, in particular, by the early serial console code.
+ *
+ * It's not really 'fixmap', per se, but fits loosely into the same
+ * paradigm.
+ */
+enum fixed_addresses {
+ /*
+ * FIX_IOREMAP entries are useful for mapping physical address
+ * space before ioremap() is useable, e.g. really early in boot
+ * before kmalloc() is working.
+ */
+#define FIX_N_IOREMAPS 32
+ FIX_IOREMAP_BEGIN,
+ FIX_IOREMAP_END = FIX_IOREMAP_BEGIN + FIX_N_IOREMAPS - 1,
+ __end_of_fixed_addresses
+};
+
+#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
+/* FIXADDR_BOTTOM might be a better name here... */
+#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
+
+#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
+#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
+
+/*
+ * 'index to address' translation. If anyone tries to use the idx
+ * directly without tranlation, we catch the bug with a NULL-deference
+ * kernel oops. Illegal ranges of incoming indices are caught too.
+ */
+static __always_inline unsigned long fix_to_virt(const unsigned int idx)
+{
+ /*
+ * this branch gets completely eliminated after inlining,
+ * except when someone tries to use fixaddr indices in an
+ * illegal way. (such as mixing up address types or using
+ * out-of-range indices).
+ *
+ * If it doesn't get removed, the linker will complain
+ * loudly with a reasonably clear error message..
+ */
+ if (idx >= __end_of_fixed_addresses)
+ BUG();
+
+ return __fix_to_virt(idx);
+}
+
+static inline unsigned long virt_to_fix(const unsigned long vaddr)
+{
+ BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
+ return __virt_to_fix(vaddr);
+}
+
+#endif
diff --git a/arch/openrisc/include/asm/gpio.h b/arch/openrisc/include/asm/gpio.h
new file mode 100644
index 000000000000..0b0d174f47cd
--- /dev/null
+++ b/arch/openrisc/include/asm/gpio.h
@@ -0,0 +1,65 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_GPIO_H
+#define __ASM_OPENRISC_GPIO_H
+
+#include <linux/errno.h>
+#include <asm-generic/gpio.h>
+
+#ifdef CONFIG_GPIOLIB
+
+/*
+ * OpenRISC (or1k) does not have on-chip GPIO's so there is not really
+ * any standardized implementation that makes sense here. If passing
+ * through gpiolib becomes a bottleneck then it may make sense, on a
+ * case-by-case basis, to implement these inlined/rapid versions.
+ *
+ * Just call gpiolib.
+ */
+static inline int gpio_get_value(unsigned int gpio)
+{
+ return __gpio_get_value(gpio);
+}
+
+static inline void gpio_set_value(unsigned int gpio, int value)
+{
+ __gpio_set_value(gpio, value);
+}
+
+static inline int gpio_cansleep(unsigned int gpio)
+{
+ return __gpio_cansleep(gpio);
+}
+
+/*
+ * Not implemented, yet.
+ */
+static inline int gpio_to_irq(unsigned int gpio)
+{
+ return -ENOSYS;
+}
+
+static inline int irq_to_gpio(unsigned int irq)
+{
+ return -EINVAL;
+}
+
+#endif /* CONFIG_GPIOLIB */
+
+#endif /* __ASM_OPENRISC_GPIO_H */
diff --git a/arch/openrisc/include/asm/io.h b/arch/openrisc/include/asm/io.h
new file mode 100644
index 000000000000..07f5299d6c28
--- /dev/null
+++ b/arch/openrisc/include/asm/io.h
@@ -0,0 +1,51 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_IO_H
+#define __ASM_OPENRISC_IO_H
+
+/*
+ * PCI: can we really do 0 here if we have no port IO?
+ */
+#define IO_SPACE_LIMIT 0
+
+/* OpenRISC has no port IO */
+#define HAVE_ARCH_PIO_SIZE 1
+#define PIO_RESERVED 0X0UL
+#define PIO_OFFSET 0
+#define PIO_MASK 0
+
+#include <asm-generic/io.h>
+
+extern void __iomem *__ioremap(phys_addr_t offset, unsigned long size,
+ pgprot_t prot);
+
+static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size)
+{
+ return __ioremap(offset, size, PAGE_KERNEL);
+}
+
+/* #define _PAGE_CI 0x002 */
+static inline void __iomem *ioremap_nocache(phys_addr_t offset,
+ unsigned long size)
+{
+ return __ioremap(offset, size,
+ __pgprot(pgprot_val(PAGE_KERNEL) | _PAGE_CI));
+}
+
+extern void iounmap(void *addr);
+#endif
diff --git a/arch/openrisc/include/asm/irq.h b/arch/openrisc/include/asm/irq.h
new file mode 100644
index 000000000000..eb612b1865d2
--- /dev/null
+++ b/arch/openrisc/include/asm/irq.h
@@ -0,0 +1,27 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_IRQ_H__
+#define __ASM_OPENRISC_IRQ_H__
+
+#define NR_IRQS 32
+#include <asm-generic/irq.h>
+
+#define NO_IRQ (-1)
+
+#endif /* __ASM_OPENRISC_IRQ_H__ */
diff --git a/arch/openrisc/include/asm/irqflags.h b/arch/openrisc/include/asm/irqflags.h
new file mode 100644
index 000000000000..dc86c653d70b
--- /dev/null
+++ b/arch/openrisc/include/asm/irqflags.h
@@ -0,0 +1,29 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef ___ASM_OPENRISC_IRQFLAGS_H
+#define ___ASM_OPENRISC_IRQFLAGS_H
+
+#include <asm/spr_defs.h>
+
+#define ARCH_IRQ_DISABLED 0x00
+#define ARCH_IRQ_ENABLED (SPR_SR_IEE|SPR_SR_TEE)
+
+#include <asm-generic/irqflags.h>
+
+#endif /* ___ASM_OPENRISC_IRQFLAGS_H */
diff --git a/arch/openrisc/include/asm/linkage.h b/arch/openrisc/include/asm/linkage.h
new file mode 100644
index 000000000000..e2638752091a
--- /dev/null
+++ b/arch/openrisc/include/asm/linkage.h
@@ -0,0 +1,25 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_LINKAGE_H
+#define __ASM_OPENRISC_LINKAGE_H
+
+#define __ALIGN .align 0
+#define __ALIGN_STR ".align 0"
+
+#endif /* __ASM_OPENRISC_LINKAGE_H */
diff --git a/arch/openrisc/include/asm/memblock.h b/arch/openrisc/include/asm/memblock.h
new file mode 100644
index 000000000000..bbe5a1c788cb
--- /dev/null
+++ b/arch/openrisc/include/asm/memblock.h
@@ -0,0 +1,24 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_MEMBLOCK_H
+#define __ASM_OPENRISC_MEMBLOCK_H
+
+/* empty */
+
+#endif /* __ASM_OPENRISC_MEMBLOCK_H */
diff --git a/arch/openrisc/include/asm/mmu.h b/arch/openrisc/include/asm/mmu.h
new file mode 100644
index 000000000000..d069bc2ddfa4
--- /dev/null
+++ b/arch/openrisc/include/asm/mmu.h
@@ -0,0 +1,26 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_MMU_H
+#define __ASM_OPENRISC_MMU_H
+
+#ifndef __ASSEMBLY__
+typedef unsigned long mm_context_t;
+#endif
+
+#endif
diff --git a/arch/openrisc/include/asm/mmu_context.h b/arch/openrisc/include/asm/mmu_context.h
new file mode 100644
index 000000000000..e94b814d2e3c
--- /dev/null
+++ b/arch/openrisc/include/asm/mmu_context.h
@@ -0,0 +1,43 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_MMU_CONTEXT_H
+#define __ASM_OPENRISC_MMU_CONTEXT_H
+
+#include <asm-generic/mm_hooks.h>
+
+extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
+extern void destroy_context(struct mm_struct *mm);
+extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk);
+
+#define deactivate_mm(tsk, mm) do { } while (0)
+
+#define activate_mm(prev, next) switch_mm((prev), (next), NULL)
+
+/* current active pgd - this is similar to other processors pgd
+ * registers like cr3 on the i386
+ */
+
+extern volatile pgd_t *current_pgd; /* defined in arch/openrisc/mm/fault.c */
+
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+}
+
+#endif
diff --git a/arch/openrisc/include/asm/mutex.h b/arch/openrisc/include/asm/mutex.h
new file mode 100644
index 000000000000..b85a0cfa9fc9
--- /dev/null
+++ b/arch/openrisc/include/asm/mutex.h
@@ -0,0 +1,27 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/arch/openrisc/include/asm/page.h b/arch/openrisc/include/asm/page.h
new file mode 100644
index 000000000000..b041b344b229
--- /dev/null
+++ b/arch/openrisc/include/asm/page.h
@@ -0,0 +1,110 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_PAGE_H
+#define __ASM_OPENRISC_PAGE_H
+
+
+/* PAGE_SHIFT determines the page size */
+
+#define PAGE_SHIFT 13
+#ifdef __ASSEMBLY__
+#define PAGE_SIZE (1 << PAGE_SHIFT)
+#else
+#define PAGE_SIZE (1UL << PAGE_SHIFT)
+#endif
+#define PAGE_MASK (~(PAGE_SIZE-1))
+
+#define PAGE_OFFSET 0xc0000000
+#define KERNELBASE PAGE_OFFSET
+
+/* This is not necessarily the right place for this, but it's needed by
+ * drivers/of/fdt.c
+ */
+#include <asm/setup.h>
+
+#ifndef __ASSEMBLY__
+
+#define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
+#define free_user_page(page, addr) free_page(addr)
+
+#define clear_page(page) memset((page), 0, PAGE_SIZE)
+#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
+
+#define clear_user_page(page, vaddr, pg) clear_page(page)
+#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
+
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct {
+ unsigned long pte;
+} pte_t;
+typedef struct {
+ unsigned long pgd;
+} pgd_t;
+typedef struct {
+ unsigned long pgprot;
+} pgprot_t;
+typedef struct page *pgtable_t;
+
+#define pte_val(x) ((x).pte)
+#define pgd_val(x) ((x).pgd)
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) })
+#define __pgd(x) ((pgd_t) { (x) })
+#define __pgprot(x) ((pgprot_t) { (x) })
+
+extern unsigned long memory_start;
+extern unsigned long memory_end;
+
+#endif /* !__ASSEMBLY__ */
+
+
+#ifndef __ASSEMBLY__
+
+#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET))
+#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET)
+
+#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
+#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
+
+#define virt_to_page(addr) \
+ (mem_map + (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT))
+#define page_to_virt(page) \
+ ((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET)
+
+#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
+
+#define pfn_valid(pfn) ((pfn) < max_mapnr)
+
+#define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \
+ ((void *)(kaddr) < (void *)memory_end))
+
+#endif /* __ASSEMBLY__ */
+
+
+#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+
+#include <asm-generic/memory_model.h>
+#include <asm-generic/getorder.h>
+
+#endif /* __ASM_OPENRISC_PAGE_H */
diff --git a/arch/openrisc/include/asm/param.h b/arch/openrisc/include/asm/param.h
new file mode 100644
index 000000000000..c39a336610e2
--- /dev/null
+++ b/arch/openrisc/include/asm/param.h
@@ -0,0 +1,26 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_PARAM_H
+#define __ASM_OPENRISC_PARAM_H
+
+#define EXEC_PAGESIZE 8192
+
+#include <asm-generic/param.h>
+
+#endif /* __ASM_OPENRISC_PARAM_H */
diff --git a/arch/openrisc/include/asm/pgalloc.h b/arch/openrisc/include/asm/pgalloc.h
new file mode 100644
index 000000000000..05c39ecd2efd
--- /dev/null
+++ b/arch/openrisc/include/asm/pgalloc.h
@@ -0,0 +1,102 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_PGALLOC_H
+#define __ASM_OPENRISC_PGALLOC_H
+
+#include <asm/page.h>
+#include <linux/threads.h>
+#include <linux/mm.h>
+#include <linux/memblock.h>
+#include <linux/bootmem.h>
+
+extern int mem_init_done;
+
+#define pmd_populate_kernel(mm, pmd, pte) \
+ set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte)))
+
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
+ struct page *pte)
+{
+ set_pmd(pmd, __pmd(_KERNPG_TABLE +
+ ((unsigned long)page_to_pfn(pte) <<
+ (unsigned long) PAGE_SHIFT)));
+}
+
+/*
+ * Allocate and free page tables.
+ */
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL);
+
+ if (ret) {
+ memset(ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
+ memcpy(ret + USER_PTRS_PER_PGD,
+ swapper_pg_dir + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+
+ }
+ return ret;
+}
+
+#if 0
+/* FIXME: This seems to be the preferred style, but we are using
+ * current_pgd (from mm->pgd) to load kernel pages so we need it
+ * initialized. This needs to be looked into.
+ */
+extern inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ return (pgd_t *)get_zeroed_page(GFP_KERNEL);
+}
+#endif
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+ free_page((unsigned long)pgd);
+}
+
+extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address);
+
+static inline struct page *pte_alloc_one(struct mm_struct *mm,
+ unsigned long address)
+{
+ struct page *pte;
+ pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
+ if (pte)
+ clear_page(page_address(pte));
+ return pte;
+}
+
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ free_page((unsigned long)pte);
+}
+
+static inline void pte_free(struct mm_struct *mm, struct page *pte)
+{
+ __free_page(pte);
+}
+
+
+#define __pte_free_tlb(tlb, pte, addr) tlb_remove_page((tlb), (pte))
+#define pmd_pgtable(pmd) pmd_page(pmd)
+
+#define check_pgt_cache() do { } while (0)
+
+#endif
diff --git a/arch/openrisc/include/asm/pgtable.h b/arch/openrisc/include/asm/pgtable.h
new file mode 100644
index 000000000000..043505d7f684
--- /dev/null
+++ b/arch/openrisc/include/asm/pgtable.h
@@ -0,0 +1,463 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+/* or32 pgtable.h - macros and functions to manipulate page tables
+ *
+ * Based on:
+ * include/asm-cris/pgtable.h
+ */
+
+#ifndef __ASM_OPENRISC_PGTABLE_H
+#define __ASM_OPENRISC_PGTABLE_H
+
+#include <asm-generic/pgtable-nopmd.h>
+
+#ifndef __ASSEMBLY__
+#include <asm/mmu.h>
+#include <asm/fixmap.h>
+
+/*
+ * The Linux memory management assumes a three-level page table setup. On
+ * or32, we use that, but "fold" the mid level into the top-level page
+ * table. Since the MMU TLB is software loaded through an interrupt, it
+ * supports any page table structure, so we could have used a three-level
+ * setup, but for the amounts of memory we normally use, a two-level is
+ * probably more efficient.
+ *
+ * This file contains the functions and defines necessary to modify and use
+ * the or32 page table tree.
+ */
+
+extern void paging_init(void);
+
+/* Certain architectures need to do special things when pte's
+ * within a page table are directly modified. Thus, the following
+ * hook is made available.
+ */
+#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
+#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
+/*
+ * (pmds are folded into pgds so this doesn't get actually called,
+ * but the define is needed for a generic inline function.)
+ */
+#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
+
+#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-2))
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+/*
+ * entries per page directory level: we use a two-level, so
+ * we don't really have any PMD directory physically.
+ * pointers are 4 bytes so we can use the page size and
+ * divide it by 4 (shift by 2).
+ */
+#define PTRS_PER_PTE (1UL << (PAGE_SHIFT-2))
+
+#define PTRS_PER_PGD (1UL << (PAGE_SHIFT-2))
+
+/* calculate how many PGD entries a user-level program can use
+ * the first mappable virtual address is 0
+ * (TASK_SIZE is the maximum virtual address space)
+ */
+
+#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
+#define FIRST_USER_ADDRESS 0
+
+/*
+ * Kernels own virtual memory area.
+ */
+
+/*
+ * The size and location of the vmalloc area are chosen so that modules
+ * placed in this area aren't more than a 28-bit signed offset from any
+ * kernel functions that they may need. This greatly simplifies handling
+ * of the relocations for l.j and l.jal instructions as we don't need to
+ * introduce any trampolines for reaching "distant" code.
+ *
+ * 64 MB of vmalloc area is comparable to what's available on other arches.
+ */
+
+#define VMALLOC_START (PAGE_OFFSET-0x04000000)
+#define VMALLOC_END (PAGE_OFFSET)
+#define VMALLOC_VMADDR(x) ((unsigned long)(x))
+
+/* Define some higher level generic page attributes.
+ *
+ * If you change _PAGE_CI definition be sure to change it in
+ * io.h for ioremap_nocache() too.
+ */
+
+/*
+ * An OR32 PTE looks like this:
+ *
+ * | 31 ... 10 | 9 | 8 ... 6 | 5 | 4 | 3 | 2 | 1 | 0 |
+ * Phys pg.num L PP Index D A WOM WBC CI CC
+ *
+ * L : link
+ * PPI: Page protection index
+ * D : Dirty
+ * A : Accessed
+ * WOM: Weakly ordered memory
+ * WBC: Write-back cache
+ * CI : Cache inhibit
+ * CC : Cache coherent
+ *
+ * The protection bits below should correspond to the layout of the actual
+ * PTE as per above
+ */
+
+#define _PAGE_CC 0x001 /* software: pte contains a translation */
+#define _PAGE_CI 0x002 /* cache inhibit */
+#define _PAGE_WBC 0x004 /* write back cache */
+#define _PAGE_FILE 0x004 /* set: pagecache, unset: swap (when !PRESENT) */
+#define _PAGE_WOM 0x008 /* weakly ordered memory */
+
+#define _PAGE_A 0x010 /* accessed */
+#define _PAGE_D 0x020 /* dirty */
+#define _PAGE_URE 0x040 /* user read enable */
+#define _PAGE_UWE 0x080 /* user write enable */
+
+#define _PAGE_SRE 0x100 /* superuser read enable */
+#define _PAGE_SWE 0x200 /* superuser write enable */
+#define _PAGE_EXEC 0x400 /* software: page is executable */
+#define _PAGE_U_SHARED 0x800 /* software: page is shared in user space */
+
+/* 0x001 is cache coherency bit, which should always be set to
+ * 1 - for SMP (when we support it)
+ * 0 - otherwise
+ *
+ * we just reuse this bit in software for _PAGE_PRESENT and
+ * force it to 0 when loading it into TLB.
+ */
+#define _PAGE_PRESENT _PAGE_CC
+#define _PAGE_USER _PAGE_URE
+#define _PAGE_WRITE (_PAGE_UWE | _PAGE_SWE)
+#define _PAGE_DIRTY _PAGE_D
+#define _PAGE_ACCESSED _PAGE_A
+#define _PAGE_NO_CACHE _PAGE_CI
+#define _PAGE_SHARED _PAGE_U_SHARED
+#define _PAGE_READ (_PAGE_URE | _PAGE_SRE)
+
+#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED)
+#define _PAGE_ALL (_PAGE_PRESENT | _PAGE_ACCESSED)
+#define _KERNPG_TABLE \
+ (_PAGE_BASE | _PAGE_SRE | _PAGE_SWE | _PAGE_ACCESSED | _PAGE_DIRTY)
+
+#define PAGE_NONE __pgprot(_PAGE_ALL)
+#define PAGE_READONLY __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE)
+#define PAGE_READONLY_X __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE | _PAGE_EXEC)
+#define PAGE_SHARED \
+ __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE | _PAGE_UWE | _PAGE_SWE \
+ | _PAGE_SHARED)
+#define PAGE_SHARED_X \
+ __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE | _PAGE_UWE | _PAGE_SWE \
+ | _PAGE_SHARED | _PAGE_EXEC)
+#define PAGE_COPY __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE)
+#define PAGE_COPY_X __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE | _PAGE_EXEC)
+
+#define PAGE_KERNEL \
+ __pgprot(_PAGE_ALL | _PAGE_SRE | _PAGE_SWE \
+ | _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC)
+#define PAGE_KERNEL_RO \
+ __pgprot(_PAGE_ALL | _PAGE_SRE \
+ | _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC)
+#define PAGE_KERNEL_NOCACHE \
+ __pgprot(_PAGE_ALL | _PAGE_SRE | _PAGE_SWE \
+ | _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC | _PAGE_CI)
+
+#define __P000 PAGE_NONE
+#define __P001 PAGE_READONLY_X
+#define __P010 PAGE_COPY
+#define __P011 PAGE_COPY_X
+#define __P100 PAGE_READONLY
+#define __P101 PAGE_READONLY_X
+#define __P110 PAGE_COPY
+#define __P111 PAGE_COPY_X
+
+#define __S000 PAGE_NONE
+#define __S001 PAGE_READONLY_X
+#define __S010 PAGE_SHARED
+#define __S011 PAGE_SHARED_X
+#define __S100 PAGE_READONLY
+#define __S101 PAGE_READONLY_X
+#define __S110 PAGE_SHARED
+#define __S111 PAGE_SHARED_X
+
+/* zero page used for uninitialized stuff */
+extern unsigned long empty_zero_page[2048];
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+
+/* number of bits that fit into a memory pointer */
+#define BITS_PER_PTR (8*sizeof(unsigned long))
+
+/* to align the pointer to a pointer address */
+#define PTR_MASK (~(sizeof(void *)-1))
+
+/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
+/* 64-bit machines, beware! SRB. */
+#define SIZEOF_PTR_LOG2 2
+
+/* to find an entry in a page-table */
+#define PAGE_PTR(address) \
+((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
+
+/* to set the page-dir */
+#define SET_PAGE_DIR(tsk, pgdir)
+
+#define pte_none(x) (!pte_val(x))
+#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
+#define pte_clear(mm, addr, xp) do { pte_val(*(xp)) = 0; } while (0)
+
+#define pmd_none(x) (!pmd_val(x))
+#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK)) != _KERNPG_TABLE)
+#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
+#define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+
+static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_READ; }
+static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
+static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
+static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
+static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
+static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
+static inline int pte_special(pte_t pte) { return 0; }
+static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
+
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ pte_val(pte) &= ~(_PAGE_WRITE);
+ return pte;
+}
+
+static inline pte_t pte_rdprotect(pte_t pte)
+{
+ pte_val(pte) &= ~(_PAGE_READ);
+ return pte;
+}
+
+static inline pte_t pte_exprotect(pte_t pte)
+{
+ pte_val(pte) &= ~(_PAGE_EXEC);
+ return pte;
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+ pte_val(pte) &= ~(_PAGE_DIRTY);
+ return pte;
+}
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+ pte_val(pte) &= ~(_PAGE_ACCESSED);
+ return pte;
+}
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_WRITE;
+ return pte;
+}
+
+static inline pte_t pte_mkread(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_READ;
+ return pte;
+}
+
+static inline pte_t pte_mkexec(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_EXEC;
+ return pte;
+}
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_DIRTY;
+ return pte;
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_ACCESSED;
+ return pte;
+}
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+
+/* What actually goes as arguments to the various functions is less than
+ * obvious, but a rule of thumb is that struct page's goes as struct page *,
+ * really physical DRAM addresses are unsigned long's, and DRAM "virtual"
+ * addresses (the 0xc0xxxxxx's) goes as void *'s.
+ */
+
+static inline pte_t __mk_pte(void *page, pgprot_t pgprot)
+{
+ pte_t pte;
+ /* the PTE needs a physical address */
+ pte_val(pte) = __pa(page) | pgprot_val(pgprot);
+ return pte;
+}
+
+#define mk_pte(page, pgprot) __mk_pte(page_address(page), (pgprot))
+
+#define mk_pte_phys(physpage, pgprot) \
+({ \
+ pte_t __pte; \
+ \
+ pte_val(__pte) = (physpage) + pgprot_val(pgprot); \
+ __pte; \
+})
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
+ return pte;
+}
+
+
+/*
+ * pte_val refers to a page in the 0x0xxxxxxx physical DRAM interval
+ * __pte_page(pte_val) refers to the "virtual" DRAM interval
+ * pte_pagenr refers to the page-number counted starting from the virtual
+ * DRAM start
+ */
+
+static inline unsigned long __pte_page(pte_t pte)
+{
+ /* the PTE contains a physical address */
+ return (unsigned long)__va(pte_val(pte) & PAGE_MASK);
+}
+
+#define pte_pagenr(pte) ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT)
+
+/* permanent address of a page */
+
+#define __page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
+#define pte_page(pte) (mem_map+pte_pagenr(pte))
+
+/*
+ * only the pte's themselves need to point to physical DRAM (see above)
+ * the pagetable links are purely handled within the kernel SW and thus
+ * don't need the __pa and __va transformations.
+ */
+static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
+{
+ pmd_val(*pmdp) = _KERNPG_TABLE | (unsigned long) ptep;
+}
+
+#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
+#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
+
+/* to find an entry in a page-table-directory. */
+#define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
+
+#define __pgd_offset(address) pgd_index(address)
+
+#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
+
+/* to find an entry in a kernel page-table-directory */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+#define __pmd_offset(address) \
+ (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
+
+/*
+ * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
+ *
+ * this macro returns the index of the entry in the pte page which would
+ * control the given virtual address
+ */
+#define __pte_offset(address) \
+ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_offset_kernel(dir, address) \
+ ((pte_t *) pmd_page_kernel(*(dir)) + __pte_offset(address))
+#define pte_offset_map(dir, address) \
+ ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
+#define pte_offset_map_nested(dir, address) \
+ pte_offset_map(dir, address)
+
+#define pte_unmap(pte) do { } while (0)
+#define pte_unmap_nested(pte) do { } while (0)
+#define pte_pfn(x) ((unsigned long)(((x).pte)) >> PAGE_SHIFT)
+#define pfn_pte(pfn, prot) __pte((((pfn) << PAGE_SHIFT)) | pgprot_val(prot))
+
+#define pte_ERROR(e) \
+ printk(KERN_ERR "%s:%d: bad pte %p(%08lx).\n", \
+ __FILE__, __LINE__, &(e), pte_val(e))
+#define pgd_ERROR(e) \
+ printk(KERN_ERR "%s:%d: bad pgd %p(%08lx).\n", \
+ __FILE__, __LINE__, &(e), pgd_val(e))
+
+extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* defined in head.S */
+
+/*
+ * or32 doesn't have any external MMU info: the kernel page
+ * tables contain all the necessary information.
+ *
+ * Actually I am not sure on what this could be used for.
+ */
+static inline void update_mmu_cache(struct vm_area_struct *vma,
+ unsigned long address, pte_t *pte)
+{
+}
+
+/* __PHX__ FIXME, SWAP, this probably doesn't work */
+
+/* Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e)) */
+/* Since the PAGE_PRESENT bit is bit 4, we can use the bits above */
+
+#define __swp_type(x) (((x).val >> 5) & 0x7f)
+#define __swp_offset(x) ((x).val >> 12)
+#define __swp_entry(type, offset) \
+ ((swp_entry_t) { ((type) << 5) | ((offset) << 12) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+/* Encode and decode a nonlinear file mapping entry */
+
+#define PTE_FILE_MAX_BITS 26
+#define pte_to_pgoff(x) (pte_val(x) >> 6)
+#define pgoff_to_pte(x) __pte(((x) << 6) | _PAGE_FILE)
+
+#define kern_addr_valid(addr) (1)
+
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
+#include <asm-generic/pgtable.h>
+
+/*
+ * No page table caches to initialise
+ */
+#define pgtable_cache_init() do { } while (0)
+#define io_remap_page_range remap_page_range
+
+typedef pte_t *pte_addr_t;
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_OPENRISC_PGTABLE_H */
diff --git a/arch/openrisc/include/asm/processor.h b/arch/openrisc/include/asm/processor.h
new file mode 100644
index 000000000000..bb54c97b9783
--- /dev/null
+++ b/arch/openrisc/include/asm/processor.h
@@ -0,0 +1,113 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_PROCESSOR_H
+#define __ASM_OPENRISC_PROCESSOR_H
+
+#include <asm/spr_defs.h>
+#include <asm/page.h>
+#include <asm/ptrace.h>
+
+#define STACK_TOP TASK_SIZE
+#define STACK_TOP_MAX STACK_TOP
+/* Kernel and user SR register setting */
+#define KERNEL_SR (SPR_SR_DME | SPR_SR_IME | SPR_SR_ICE \
+ | SPR_SR_DCE | SPR_SR_SM)
+#define USER_SR (SPR_SR_DME | SPR_SR_IME | SPR_SR_ICE \
+ | SPR_SR_DCE | SPR_SR_IEE | SPR_SR_TEE)
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#define current_text_addr() ({ __label__ _l; _l: &&_l; })
+
+/*
+ * User space process size. This is hardcoded into a few places,
+ * so don't change it unless you know what you are doing.
+ */
+
+#define TASK_SIZE (0x80000000UL)
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE (TASK_SIZE / 8 * 3)
+
+#ifndef __ASSEMBLY__
+
+struct task_struct;
+
+struct thread_struct {
+};
+
+/*
+ * At user->kernel entry, the pt_regs struct is stacked on the top of the
+ * kernel-stack. This macro allows us to find those regs for a task.
+ * Notice that subsequent pt_regs stackings, like recursive interrupts
+ * occurring while we're in the kernel, won't affect this - only the first
+ * user->kernel transition registers are reached by this (i.e. not regs
+ * for running signal handler)
+ */
+#define user_regs(thread_info) (((struct pt_regs *)((unsigned long)(thread_info) + THREAD_SIZE - STACK_FRAME_OVERHEAD)) - 1)
+
+/*
+ * Dito but for the currently running task
+ */
+
+#define task_pt_regs(task) user_regs(task_thread_info(task))
+#define current_regs() user_regs(current_thread_info())
+
+extern inline void prepare_to_copy(struct task_struct *tsk)
+{
+}
+
+#define INIT_SP (sizeof(init_stack) + (unsigned long) &init_stack)
+
+#define INIT_THREAD { }
+
+
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc);
+#define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp);
+
+
+extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
+
+void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp);
+void release_thread(struct task_struct *);
+unsigned long get_wchan(struct task_struct *p);
+
+/*
+ * Free current thread data structures etc..
+ */
+
+extern inline void exit_thread(void)
+{
+ /* Nothing needs to be done. */
+}
+
+/*
+ * Return saved PC of a blocked thread. For now, this is the "user" PC
+ */
+extern unsigned long thread_saved_pc(struct task_struct *t);
+
+#define init_stack (init_thread_union.stack)
+
+#define cpu_relax() do { } while (0)
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_OPENRISC_PROCESSOR_H */
diff --git a/arch/openrisc/include/asm/prom.h b/arch/openrisc/include/asm/prom.h
new file mode 100644
index 000000000000..e1f3fe26606c
--- /dev/null
+++ b/arch/openrisc/include/asm/prom.h
@@ -0,0 +1,77 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/of.h> /* linux/of.h gets to determine #include ordering */
+
+#ifndef _ASM_OPENRISC_PROM_H
+#define _ASM_OPENRISC_PROM_H
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <asm/irq.h>
+#include <linux/atomic.h>
+#include <linux/of_irq.h>
+#include <linux/of_fdt.h>
+#include <linux/of_address.h>
+#include <linux/proc_fs.h>
+#include <linux/platform_device.h>
+#define HAVE_ARCH_DEVTREE_FIXUPS
+
+/* Other Prototypes */
+extern int early_uartlite_console(void);
+
+/* Parse the ibm,dma-window property of an OF node into the busno, phys and
+ * size parameters.
+ */
+void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
+ unsigned long *busno, unsigned long *phys, unsigned long *size);
+
+extern void kdump_move_device_tree(void);
+
+/* CPU OF node matching */
+struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
+
+/* Get the MAC address */
+extern const void *of_get_mac_address(struct device_node *np);
+
+/**
+ * of_irq_map_pci - Resolve the interrupt for a PCI device
+ * @pdev: the device whose interrupt is to be resolved
+ * @out_irq: structure of_irq filled by this function
+ *
+ * This function resolves the PCI interrupt for a given PCI device. If a
+ * device-node exists for a given pci_dev, it will use normal OF tree
+ * walking. If not, it will implement standard swizzling and walk up the
+ * PCI tree until an device-node is found, at which point it will finish
+ * resolving using the OF tree walking.
+ */
+struct pci_dev;
+extern int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq);
+
+/* This routine is here to provide compatibility with how powerpc
+ * handles IRQ mapping for OF device nodes. We precompute and permanently
+ * register them in the platform_device objects, whereas powerpc computes them
+ * on request.
+ */
+static inline void irq_dispose_mapping(unsigned int virq)
+{
+}
+
+#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* _ASM_OPENRISC_PROM_H */
diff --git a/arch/openrisc/include/asm/ptrace.h b/arch/openrisc/include/asm/ptrace.h
new file mode 100644
index 000000000000..054537c5f9c9
--- /dev/null
+++ b/arch/openrisc/include/asm/ptrace.h
@@ -0,0 +1,131 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_PTRACE_H
+#define __ASM_OPENRISC_PTRACE_H
+
+#include <asm/spr_defs.h>
+
+#ifndef __ASSEMBLY__
+/*
+ * This is the layout of the regset returned by the GETREGSET ptrace call
+ */
+struct user_regs_struct {
+ /* GPR R0-R31... */
+ unsigned long gpr[32];
+ unsigned long pc;
+ unsigned long sr;
+ unsigned long pad1;
+ unsigned long pad2;
+};
+#endif
+
+#ifdef __KERNEL__
+
+/*
+ * Make kernel PTrace/register structures opaque to userspace... userspace can
+ * access thread state via the regset mechanism. This allows us a bit of
+ * flexibility in how we order the registers on the stack, permitting some
+ * optimizations like packing call-clobbered registers together so that
+ * they share a cacheline (not done yet, though... future optimization).
+ */
+
+#ifndef __ASSEMBLY__
+/*
+ * This struct describes how the registers are laid out on the kernel stack
+ * during a syscall or other kernel entry.
+ *
+ * This structure should always be cacheline aligned on the stack.
+ * FIXME: I don't think that's the case right now. The alignment is
+ * taken care of elsewhere... head.S, process.c, etc.
+ */
+
+struct pt_regs {
+ union {
+ struct {
+ /* Named registers */
+ long sr; /* Stored in place of r0 */
+ long sp; /* r1 */
+ };
+ struct {
+ /* Old style */
+ long offset[2];
+ long gprs[30];
+ };
+ struct {
+ /* New style */
+ long gpr[32];
+ };
+ };
+ long pc;
+ long orig_gpr11; /* For restarting system calls */
+ long syscallno; /* Syscall number (used by strace) */
+ long dummy; /* Cheap alignment fix */
+};
+#endif /* __ASSEMBLY__ */
+
+/* TODO: Rename this to REDZONE because that's what it is */
+#define STACK_FRAME_OVERHEAD 128 /* size of minimum stack frame */
+
+#define instruction_pointer(regs) ((regs)->pc)
+#define user_mode(regs) (((regs)->sr & SPR_SR_SM) == 0)
+#define user_stack_pointer(regs) ((unsigned long)(regs)->sp)
+#define profile_pc(regs) instruction_pointer(regs)
+
+/*
+ * Offsets used by 'ptrace' system call interface.
+ */
+#define PT_SR 0
+#define PT_SP 4
+#define PT_GPR2 8
+#define PT_GPR3 12
+#define PT_GPR4 16
+#define PT_GPR5 20
+#define PT_GPR6 24
+#define PT_GPR7 28
+#define PT_GPR8 32
+#define PT_GPR9 36
+#define PT_GPR10 40
+#define PT_GPR11 44
+#define PT_GPR12 48
+#define PT_GPR13 52
+#define PT_GPR14 56
+#define PT_GPR15 60
+#define PT_GPR16 64
+#define PT_GPR17 68
+#define PT_GPR18 72
+#define PT_GPR19 76
+#define PT_GPR20 80
+#define PT_GPR21 84
+#define PT_GPR22 88
+#define PT_GPR23 92
+#define PT_GPR24 96
+#define PT_GPR25 100
+#define PT_GPR26 104
+#define PT_GPR27 108
+#define PT_GPR28 112
+#define PT_GPR29 116
+#define PT_GPR30 120
+#define PT_GPR31 124
+#define PT_PC 128
+#define PT_ORIG_GPR11 132
+#define PT_SYSCALLNO 136
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_OPENRISC_PTRACE_H */
diff --git a/arch/openrisc/include/asm/serial.h b/arch/openrisc/include/asm/serial.h
new file mode 100644
index 000000000000..270a45241639
--- /dev/null
+++ b/arch/openrisc/include/asm/serial.h
@@ -0,0 +1,36 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_SERIAL_H
+#define __ASM_OPENRISC_SERIAL_H
+
+#ifdef __KERNEL__
+
+#include <asm/cpuinfo.h>
+
+/* There's a generic version of this file, but it assumes a 1.8MHz UART clk...
+ * this, on the other hand, assumes the UART clock is tied to the system
+ * clock... 8250_early.c (early 8250 serial console) actually uses this, so
+ * it needs to be correct to get the early console working.
+ */
+
+#define BASE_BAUD (cpuinfo.clock_frequency/16)
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_OPENRISC_SERIAL_H */
diff --git a/arch/openrisc/include/asm/sigcontext.h b/arch/openrisc/include/asm/sigcontext.h
new file mode 100644
index 000000000000..54a5c50132e3
--- /dev/null
+++ b/arch/openrisc/include/asm/sigcontext.h
@@ -0,0 +1,38 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_SIGCONTEXT_H
+#define __ASM_OPENRISC_SIGCONTEXT_H
+
+#include <asm/ptrace.h>
+
+/* This struct is saved by setup_frame in signal.c, to keep the current
+ context while a signal handler is executed. It's restored by sys_sigreturn.
+
+ To keep things simple, we use pt_regs here even though normally you just
+ specify the list of regs to save. Then we can use copy_from_user on the
+ entire regs instead of a bunch of get_user's as well...
+*/
+
+struct sigcontext {
+ struct pt_regs regs; /* needs to be first */
+ unsigned long oldmask;
+ unsigned long usp; /* usp before stacking this gunk on it */
+};
+
+#endif /* __ASM_OPENRISC_SIGCONTEXT_H */
diff --git a/arch/openrisc/include/asm/spinlock.h b/arch/openrisc/include/asm/spinlock.h
new file mode 100644
index 000000000000..fd00a3a24123
--- /dev/null
+++ b/arch/openrisc/include/asm/spinlock.h
@@ -0,0 +1,24 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_SPINLOCK_H
+#define __ASM_OPENRISC_SPINLOCK_H
+
+#error "or32 doesn't do SMP yet"
+
+#endif
diff --git a/arch/openrisc/include/asm/spr.h b/arch/openrisc/include/asm/spr.h
new file mode 100644
index 000000000000..1cccb42dd477
--- /dev/null
+++ b/arch/openrisc/include/asm/spr.h
@@ -0,0 +1,42 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_SPR_H
+#define __ASM_OPENRISC_SPR_H
+
+#define mtspr(_spr, _val) __asm__ __volatile__ ( \
+ "l.mtspr r0,%1,%0" \
+ : : "K" (_spr), "r" (_val))
+#define mtspr_off(_spr, _off, _val) __asm__ __volatile__ ( \
+ "l.mtspr %0,%1,%2" \
+ : : "r" (_off), "r" (_val), "K" (_spr))
+
+static inline unsigned long mfspr(unsigned long add)
+{
+ unsigned long ret;
+ __asm__ __volatile__ ("l.mfspr %0,r0,%1" : "=r" (ret) : "K" (add));
+ return ret;
+}
+
+static inline unsigned long mfspr_off(unsigned long add, unsigned long offset)
+{
+ unsigned long ret;
+ __asm__ __volatile__ ("l.mfspr %0,%1,%2" : "=r" (ret)
+ : "r" (offset), "K" (add));
+ return ret;
+}
+
+#endif
diff --git a/arch/openrisc/include/asm/spr_defs.h b/arch/openrisc/include/asm/spr_defs.h
new file mode 100644
index 000000000000..5dbc668865c4
--- /dev/null
+++ b/arch/openrisc/include/asm/spr_defs.h
@@ -0,0 +1,604 @@
+/*
+ * OpenRISC Linux
+ *
+ * SPR Definitions
+ *
+ * Copyright (C) 2000 Damjan Lampret
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2008, 2010 Embecosm Limited
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This file is part of OpenRISC 1000 Architectural Simulator.
+ */
+
+#ifndef SPR_DEFS__H
+#define SPR_DEFS__H
+
+/* Definition of special-purpose registers (SPRs). */
+
+#define MAX_GRPS (32)
+#define MAX_SPRS_PER_GRP_BITS (11)
+#define MAX_SPRS_PER_GRP (1 << MAX_SPRS_PER_GRP_BITS)
+#define MAX_SPRS (0x10000)
+
+/* Base addresses for the groups */
+#define SPRGROUP_SYS (0 << MAX_SPRS_PER_GRP_BITS)
+#define SPRGROUP_DMMU (1 << MAX_SPRS_PER_GRP_BITS)
+#define SPRGROUP_IMMU (2 << MAX_SPRS_PER_GRP_BITS)
+#define SPRGROUP_DC (3 << MAX_SPRS_PER_GRP_BITS)
+#define SPRGROUP_IC (4 << MAX_SPRS_PER_GRP_BITS)
+#define SPRGROUP_MAC (5 << MAX_SPRS_PER_GRP_BITS)
+#define SPRGROUP_D (6 << MAX_SPRS_PER_GRP_BITS)
+#define SPRGROUP_PC (7 << MAX_SPRS_PER_GRP_BITS)
+#define SPRGROUP_PM (8 << MAX_SPRS_PER_GRP_BITS)
+#define SPRGROUP_PIC (9 << MAX_SPRS_PER_GRP_BITS)
+#define SPRGROUP_TT (10 << MAX_SPRS_PER_GRP_BITS)
+#define SPRGROUP_FP (11 << MAX_SPRS_PER_GRP_BITS)
+
+/* System control and status group */
+#define SPR_VR (SPRGROUP_SYS + 0)
+#define SPR_UPR (SPRGROUP_SYS + 1)
+#define SPR_CPUCFGR (SPRGROUP_SYS + 2)
+#define SPR_DMMUCFGR (SPRGROUP_SYS + 3)
+#define SPR_IMMUCFGR (SPRGROUP_SYS + 4)
+#define SPR_DCCFGR (SPRGROUP_SYS + 5)
+#define SPR_ICCFGR (SPRGROUP_SYS + 6)
+#define SPR_DCFGR (SPRGROUP_SYS + 7)
+#define SPR_PCCFGR (SPRGROUP_SYS + 8)
+#define SPR_NPC (SPRGROUP_SYS + 16) /* CZ 21/06/01 */
+#define SPR_SR (SPRGROUP_SYS + 17) /* CZ 21/06/01 */
+#define SPR_PPC (SPRGROUP_SYS + 18) /* CZ 21/06/01 */
+#define SPR_FPCSR (SPRGROUP_SYS + 20) /* CZ 21/06/01 */
+#define SPR_EPCR_BASE (SPRGROUP_SYS + 32) /* CZ 21/06/01 */
+#define SPR_EPCR_LAST (SPRGROUP_SYS + 47) /* CZ 21/06/01 */
+#define SPR_EEAR_BASE (SPRGROUP_SYS + 48)
+#define SPR_EEAR_LAST (SPRGROUP_SYS + 63)
+#define SPR_ESR_BASE (SPRGROUP_SYS + 64)
+#define SPR_ESR_LAST (SPRGROUP_SYS + 79)
+#define SPR_GPR_BASE (SPRGROUP_SYS + 1024)
+
+/* Data MMU group */
+#define SPR_DMMUCR (SPRGROUP_DMMU + 0)
+#define SPR_DTLBEIR (SPRGROUP_DMMU + 2)
+#define SPR_DTLBMR_BASE(WAY) (SPRGROUP_DMMU + 0x200 + (WAY) * 0x100)
+#define SPR_DTLBMR_LAST(WAY) (SPRGROUP_DMMU + 0x27f + (WAY) * 0x100)
+#define SPR_DTLBTR_BASE(WAY) (SPRGROUP_DMMU + 0x280 + (WAY) * 0x100)
+#define SPR_DTLBTR_LAST(WAY) (SPRGROUP_DMMU + 0x2ff + (WAY) * 0x100)
+
+/* Instruction MMU group */
+#define SPR_IMMUCR (SPRGROUP_IMMU + 0)
+#define SPR_ITLBEIR (SPRGROUP_IMMU + 2)
+#define SPR_ITLBMR_BASE(WAY) (SPRGROUP_IMMU + 0x200 + (WAY) * 0x100)
+#define SPR_ITLBMR_LAST(WAY) (SPRGROUP_IMMU + 0x27f + (WAY) * 0x100)
+#define SPR_ITLBTR_BASE(WAY) (SPRGROUP_IMMU + 0x280 + (WAY) * 0x100)
+#define SPR_ITLBTR_LAST(WAY) (SPRGROUP_IMMU + 0x2ff + (WAY) * 0x100)
+
+/* Data cache group */
+#define SPR_DCCR (SPRGROUP_DC + 0)
+#define SPR_DCBPR (SPRGROUP_DC + 1)
+#define SPR_DCBFR (SPRGROUP_DC + 2)
+#define SPR_DCBIR (SPRGROUP_DC + 3)
+#define SPR_DCBWR (SPRGROUP_DC + 4)
+#define SPR_DCBLR (SPRGROUP_DC + 5)
+#define SPR_DCR_BASE(WAY) (SPRGROUP_DC + 0x200 + (WAY) * 0x200)
+#define SPR_DCR_LAST(WAY) (SPRGROUP_DC + 0x3ff + (WAY) * 0x200)
+
+/* Instruction cache group */
+#define SPR_ICCR (SPRGROUP_IC + 0)
+#define SPR_ICBPR (SPRGROUP_IC + 1)
+#define SPR_ICBIR (SPRGROUP_IC + 2)
+#define SPR_ICBLR (SPRGROUP_IC + 3)
+#define SPR_ICR_BASE(WAY) (SPRGROUP_IC + 0x200 + (WAY) * 0x200)
+#define SPR_ICR_LAST(WAY) (SPRGROUP_IC + 0x3ff + (WAY) * 0x200)
+
+/* MAC group */
+#define SPR_MACLO (SPRGROUP_MAC + 1)
+#define SPR_MACHI (SPRGROUP_MAC + 2)
+
+/* Debug group */
+#define SPR_DVR(N) (SPRGROUP_D + (N))
+#define SPR_DCR(N) (SPRGROUP_D + 8 + (N))
+#define SPR_DMR1 (SPRGROUP_D + 16)
+#define SPR_DMR2 (SPRGROUP_D + 17)
+#define SPR_DWCR0 (SPRGROUP_D + 18)
+#define SPR_DWCR1 (SPRGROUP_D + 19)
+#define SPR_DSR (SPRGROUP_D + 20)
+#define SPR_DRR (SPRGROUP_D + 21)
+
+/* Performance counters group */
+#define SPR_PCCR(N) (SPRGROUP_PC + (N))
+#define SPR_PCMR(N) (SPRGROUP_PC + 8 + (N))
+
+/* Power management group */
+#define SPR_PMR (SPRGROUP_PM + 0)
+
+/* PIC group */
+#define SPR_PICMR (SPRGROUP_PIC + 0)
+#define SPR_PICPR (SPRGROUP_PIC + 1)
+#define SPR_PICSR (SPRGROUP_PIC + 2)
+
+/* Tick Timer group */
+#define SPR_TTMR (SPRGROUP_TT + 0)
+#define SPR_TTCR (SPRGROUP_TT + 1)
+
+/*
+ * Bit definitions for the Version Register
+ *
+ */
+#define SPR_VR_VER 0xff000000 /* Processor version */
+#define SPR_VR_CFG 0x00ff0000 /* Processor configuration */
+#define SPR_VR_RES 0x0000ffc0 /* Reserved */
+#define SPR_VR_REV 0x0000003f /* Processor revision */
+
+#define SPR_VR_VER_OFF 24
+#define SPR_VR_CFG_OFF 16
+#define SPR_VR_REV_OFF 0
+
+/*
+ * Bit definitions for the Unit Present Register
+ *
+ */
+#define SPR_UPR_UP 0x00000001 /* UPR present */
+#define SPR_UPR_DCP 0x00000002 /* Data cache present */
+#define SPR_UPR_ICP 0x00000004 /* Instruction cache present */
+#define SPR_UPR_DMP 0x00000008 /* Data MMU present */
+#define SPR_UPR_IMP 0x00000010 /* Instruction MMU present */
+#define SPR_UPR_MP 0x00000020 /* MAC present */
+#define SPR_UPR_DUP 0x00000040 /* Debug unit present */
+#define SPR_UPR_PCUP 0x00000080 /* Performance counters unit present */
+#define SPR_UPR_PMP 0x00000100 /* Power management present */
+#define SPR_UPR_PICP 0x00000200 /* PIC present */
+#define SPR_UPR_TTP 0x00000400 /* Tick timer present */
+#define SPR_UPR_RES 0x00fe0000 /* Reserved */
+#define SPR_UPR_CUP 0xff000000 /* Context units present */
+
+/*
+ * JPB: Bit definitions for the CPU configuration register
+ *
+ */
+#define SPR_CPUCFGR_NSGF 0x0000000f /* Number of shadow GPR files */
+#define SPR_CPUCFGR_CGF 0x00000010 /* Custom GPR file */
+#define SPR_CPUCFGR_OB32S 0x00000020 /* ORBIS32 supported */
+#define SPR_CPUCFGR_OB64S 0x00000040 /* ORBIS64 supported */
+#define SPR_CPUCFGR_OF32S 0x00000080 /* ORFPX32 supported */
+#define SPR_CPUCFGR_OF64S 0x00000100 /* ORFPX64 supported */
+#define SPR_CPUCFGR_OV64S 0x00000200 /* ORVDX64 supported */
+#define SPR_CPUCFGR_RES 0xfffffc00 /* Reserved */
+
+/*
+ * JPB: Bit definitions for the Debug configuration register and other
+ * constants.
+ *
+ */
+
+#define SPR_DCFGR_NDP 0x00000007 /* Number of matchpoints mask */
+#define SPR_DCFGR_NDP1 0x00000000 /* One matchpoint supported */
+#define SPR_DCFGR_NDP2 0x00000001 /* Two matchpoints supported */
+#define SPR_DCFGR_NDP3 0x00000002 /* Three matchpoints supported */
+#define SPR_DCFGR_NDP4 0x00000003 /* Four matchpoints supported */
+#define SPR_DCFGR_NDP5 0x00000004 /* Five matchpoints supported */
+#define SPR_DCFGR_NDP6 0x00000005 /* Six matchpoints supported */
+#define SPR_DCFGR_NDP7 0x00000006 /* Seven matchpoints supported */
+#define SPR_DCFGR_NDP8 0x00000007 /* Eight matchpoints supported */
+#define SPR_DCFGR_WPCI 0x00000008 /* Watchpoint counters implemented */
+
+#define MATCHPOINTS_TO_NDP(n) (1 == n ? SPR_DCFGR_NDP1 : \
+ 2 == n ? SPR_DCFGR_NDP2 : \
+ 3 == n ? SPR_DCFGR_NDP3 : \
+ 4 == n ? SPR_DCFGR_NDP4 : \
+ 5 == n ? SPR_DCFGR_NDP5 : \
+ 6 == n ? SPR_DCFGR_NDP6 : \
+ 7 == n ? SPR_DCFGR_NDP7 : SPR_DCFGR_NDP8)
+#define MAX_MATCHPOINTS 8
+#define MAX_WATCHPOINTS (MAX_MATCHPOINTS + 2)
+
+/*
+ * Bit definitions for the Supervision Register
+ *
+ */
+#define SPR_SR_SM 0x00000001 /* Supervisor Mode */
+#define SPR_SR_TEE 0x00000002 /* Tick timer Exception Enable */
+#define SPR_SR_IEE 0x00000004 /* Interrupt Exception Enable */
+#define SPR_SR_DCE 0x00000008 /* Data Cache Enable */
+#define SPR_SR_ICE 0x00000010 /* Instruction Cache Enable */
+#define SPR_SR_DME 0x00000020 /* Data MMU Enable */
+#define SPR_SR_IME 0x00000040 /* Instruction MMU Enable */
+#define SPR_SR_LEE 0x00000080 /* Little Endian Enable */
+#define SPR_SR_CE 0x00000100 /* CID Enable */
+#define SPR_SR_F 0x00000200 /* Condition Flag */
+#define SPR_SR_CY 0x00000400 /* Carry flag */
+#define SPR_SR_OV 0x00000800 /* Overflow flag */
+#define SPR_SR_OVE 0x00001000 /* Overflow flag Exception */
+#define SPR_SR_DSX 0x00002000 /* Delay Slot Exception */
+#define SPR_SR_EPH 0x00004000 /* Exception Prefix High */
+#define SPR_SR_FO 0x00008000 /* Fixed one */
+#define SPR_SR_SUMRA 0x00010000 /* Supervisor SPR read access */
+#define SPR_SR_RES 0x0ffe0000 /* Reserved */
+#define SPR_SR_CID 0xf0000000 /* Context ID */
+
+/*
+ * Bit definitions for the Data MMU Control Register
+ *
+ */
+#define SPR_DMMUCR_P2S 0x0000003e /* Level 2 Page Size */
+#define SPR_DMMUCR_P1S 0x000007c0 /* Level 1 Page Size */
+#define SPR_DMMUCR_VADDR_WIDTH 0x0000f800 /* Virtual ADDR Width */
+#define SPR_DMMUCR_PADDR_WIDTH 0x000f0000 /* Physical ADDR Width */
+
+/*
+ * Bit definitions for the Instruction MMU Control Register
+ *
+ */
+#define SPR_IMMUCR_P2S 0x0000003e /* Level 2 Page Size */
+#define SPR_IMMUCR_P1S 0x000007c0 /* Level 1 Page Size */
+#define SPR_IMMUCR_VADDR_WIDTH 0x0000f800 /* Virtual ADDR Width */
+#define SPR_IMMUCR_PADDR_WIDTH 0x000f0000 /* Physical ADDR Width */
+
+/*
+ * Bit definitions for the Data TLB Match Register
+ *
+ */
+#define SPR_DTLBMR_V 0x00000001 /* Valid */
+#define SPR_DTLBMR_PL1 0x00000002 /* Page Level 1 (if 0 then PL2) */
+#define SPR_DTLBMR_CID 0x0000003c /* Context ID */
+#define SPR_DTLBMR_LRU 0x000000c0 /* Least Recently Used */
+#define SPR_DTLBMR_VPN 0xfffff000 /* Virtual Page Number */
+
+/*
+ * Bit definitions for the Data TLB Translate Register
+ *
+ */
+#define SPR_DTLBTR_CC 0x00000001 /* Cache Coherency */
+#define SPR_DTLBTR_CI 0x00000002 /* Cache Inhibit */
+#define SPR_DTLBTR_WBC 0x00000004 /* Write-Back Cache */
+#define SPR_DTLBTR_WOM 0x00000008 /* Weakly-Ordered Memory */
+#define SPR_DTLBTR_A 0x00000010 /* Accessed */
+#define SPR_DTLBTR_D 0x00000020 /* Dirty */
+#define SPR_DTLBTR_URE 0x00000040 /* User Read Enable */
+#define SPR_DTLBTR_UWE 0x00000080 /* User Write Enable */
+#define SPR_DTLBTR_SRE 0x00000100 /* Supervisor Read Enable */
+#define SPR_DTLBTR_SWE 0x00000200 /* Supervisor Write Enable */
+#define SPR_DTLBTR_PPN 0xfffff000 /* Physical Page Number */
+
+/*
+ * Bit definitions for the Instruction TLB Match Register
+ *
+ */
+#define SPR_ITLBMR_V 0x00000001 /* Valid */
+#define SPR_ITLBMR_PL1 0x00000002 /* Page Level 1 (if 0 then PL2) */
+#define SPR_ITLBMR_CID 0x0000003c /* Context ID */
+#define SPR_ITLBMR_LRU 0x000000c0 /* Least Recently Used */
+#define SPR_ITLBMR_VPN 0xfffff000 /* Virtual Page Number */
+
+/*
+ * Bit definitions for the Instruction TLB Translate Register
+ *
+ */
+#define SPR_ITLBTR_CC 0x00000001 /* Cache Coherency */
+#define SPR_ITLBTR_CI 0x00000002 /* Cache Inhibit */
+#define SPR_ITLBTR_WBC 0x00000004 /* Write-Back Cache */
+#define SPR_ITLBTR_WOM 0x00000008 /* Weakly-Ordered Memory */
+#define SPR_ITLBTR_A 0x00000010 /* Accessed */
+#define SPR_ITLBTR_D 0x00000020 /* Dirty */
+#define SPR_ITLBTR_SXE 0x00000040 /* User Read Enable */
+#define SPR_ITLBTR_UXE 0x00000080 /* User Write Enable */
+#define SPR_ITLBTR_PPN 0xfffff000 /* Physical Page Number */
+
+/*
+ * Bit definitions for Data Cache Control register
+ *
+ */
+#define SPR_DCCR_EW 0x000000ff /* Enable ways */
+
+/*
+ * Bit definitions for Insn Cache Control register
+ *
+ */
+#define SPR_ICCR_EW 0x000000ff /* Enable ways */
+
+/*
+ * Bit definitions for Data Cache Configuration Register
+ *
+ */
+
+#define SPR_DCCFGR_NCW 0x00000007
+#define SPR_DCCFGR_NCS 0x00000078
+#define SPR_DCCFGR_CBS 0x00000080
+#define SPR_DCCFGR_CWS 0x00000100
+#define SPR_DCCFGR_CCRI 0x00000200
+#define SPR_DCCFGR_CBIRI 0x00000400
+#define SPR_DCCFGR_CBPRI 0x00000800
+#define SPR_DCCFGR_CBLRI 0x00001000
+#define SPR_DCCFGR_CBFRI 0x00002000
+#define SPR_DCCFGR_CBWBRI 0x00004000
+
+#define SPR_DCCFGR_NCW_OFF 0
+#define SPR_DCCFGR_NCS_OFF 3
+#define SPR_DCCFGR_CBS_OFF 7
+
+/*
+ * Bit definitions for Instruction Cache Configuration Register
+ *
+ */
+#define SPR_ICCFGR_NCW 0x00000007
+#define SPR_ICCFGR_NCS 0x00000078
+#define SPR_ICCFGR_CBS 0x00000080
+#define SPR_ICCFGR_CCRI 0x00000200
+#define SPR_ICCFGR_CBIRI 0x00000400
+#define SPR_ICCFGR_CBPRI 0x00000800
+#define SPR_ICCFGR_CBLRI 0x00001000
+
+#define SPR_ICCFGR_NCW_OFF 0
+#define SPR_ICCFGR_NCS_OFF 3
+#define SPR_ICCFGR_CBS_OFF 7
+
+/*
+ * Bit definitions for Data MMU Configuration Register
+ *
+ */
+
+#define SPR_DMMUCFGR_NTW 0x00000003
+#define SPR_DMMUCFGR_NTS 0x0000001C
+#define SPR_DMMUCFGR_NAE 0x000000E0
+#define SPR_DMMUCFGR_CRI 0x00000100
+#define SPR_DMMUCFGR_PRI 0x00000200
+#define SPR_DMMUCFGR_TEIRI 0x00000400
+#define SPR_DMMUCFGR_HTR 0x00000800
+
+#define SPR_DMMUCFGR_NTW_OFF 0
+#define SPR_DMMUCFGR_NTS_OFF 2
+
+/*
+ * Bit definitions for Instruction MMU Configuration Register
+ *
+ */
+
+#define SPR_IMMUCFGR_NTW 0x00000003
+#define SPR_IMMUCFGR_NTS 0x0000001C
+#define SPR_IMMUCFGR_NAE 0x000000E0
+#define SPR_IMMUCFGR_CRI 0x00000100
+#define SPR_IMMUCFGR_PRI 0x00000200
+#define SPR_IMMUCFGR_TEIRI 0x00000400
+#define SPR_IMMUCFGR_HTR 0x00000800
+
+#define SPR_IMMUCFGR_NTW_OFF 0
+#define SPR_IMMUCFGR_NTS_OFF 2
+
+/*
+ * Bit definitions for Debug Control registers
+ *
+ */
+#define SPR_DCR_DP 0x00000001 /* DVR/DCR present */
+#define SPR_DCR_CC 0x0000000e /* Compare condition */
+#define SPR_DCR_SC 0x00000010 /* Signed compare */
+#define SPR_DCR_CT 0x000000e0 /* Compare to */
+
+/* Bit results with SPR_DCR_CC mask */
+#define SPR_DCR_CC_MASKED 0x00000000
+#define SPR_DCR_CC_EQUAL 0x00000002
+#define SPR_DCR_CC_LESS 0x00000004
+#define SPR_DCR_CC_LESSE 0x00000006
+#define SPR_DCR_CC_GREAT 0x00000008
+#define SPR_DCR_CC_GREATE 0x0000000a
+#define SPR_DCR_CC_NEQUAL 0x0000000c
+
+/* Bit results with SPR_DCR_CT mask */
+#define SPR_DCR_CT_DISABLED 0x00000000
+#define SPR_DCR_CT_IFEA 0x00000020
+#define SPR_DCR_CT_LEA 0x00000040
+#define SPR_DCR_CT_SEA 0x00000060
+#define SPR_DCR_CT_LD 0x00000080
+#define SPR_DCR_CT_SD 0x000000a0
+#define SPR_DCR_CT_LSEA 0x000000c0
+#define SPR_DCR_CT_LSD 0x000000e0
+/* SPR_DCR_CT_LSD doesn't seem to be implemented anywhere in or1ksim. 2004-1-30 HP */
+
+/*
+ * Bit definitions for Debug Mode 1 register
+ *
+ */
+#define SPR_DMR1_CW 0x000fffff /* Chain register pair data */
+#define SPR_DMR1_CW0_AND 0x00000001
+#define SPR_DMR1_CW0_OR 0x00000002
+#define SPR_DMR1_CW0 (SPR_DMR1_CW0_AND | SPR_DMR1_CW0_OR)
+#define SPR_DMR1_CW1_AND 0x00000004
+#define SPR_DMR1_CW1_OR 0x00000008
+#define SPR_DMR1_CW1 (SPR_DMR1_CW1_AND | SPR_DMR1_CW1_OR)
+#define SPR_DMR1_CW2_AND 0x00000010
+#define SPR_DMR1_CW2_OR 0x00000020
+#define SPR_DMR1_CW2 (SPR_DMR1_CW2_AND | SPR_DMR1_CW2_OR)
+#define SPR_DMR1_CW3_AND 0x00000040
+#define SPR_DMR1_CW3_OR 0x00000080
+#define SPR_DMR1_CW3 (SPR_DMR1_CW3_AND | SPR_DMR1_CW3_OR)
+#define SPR_DMR1_CW4_AND 0x00000100
+#define SPR_DMR1_CW4_OR 0x00000200
+#define SPR_DMR1_CW4 (SPR_DMR1_CW4_AND | SPR_DMR1_CW4_OR)
+#define SPR_DMR1_CW5_AND 0x00000400
+#define SPR_DMR1_CW5_OR 0x00000800
+#define SPR_DMR1_CW5 (SPR_DMR1_CW5_AND | SPR_DMR1_CW5_OR)
+#define SPR_DMR1_CW6_AND 0x00001000
+#define SPR_DMR1_CW6_OR 0x00002000
+#define SPR_DMR1_CW6 (SPR_DMR1_CW6_AND | SPR_DMR1_CW6_OR)
+#define SPR_DMR1_CW7_AND 0x00004000
+#define SPR_DMR1_CW7_OR 0x00008000
+#define SPR_DMR1_CW7 (SPR_DMR1_CW7_AND | SPR_DMR1_CW7_OR)
+#define SPR_DMR1_CW8_AND 0x00010000
+#define SPR_DMR1_CW8_OR 0x00020000
+#define SPR_DMR1_CW8 (SPR_DMR1_CW8_AND | SPR_DMR1_CW8_OR)
+#define SPR_DMR1_CW9_AND 0x00040000
+#define SPR_DMR1_CW9_OR 0x00080000
+#define SPR_DMR1_CW9 (SPR_DMR1_CW9_AND | SPR_DMR1_CW9_OR)
+#define SPR_DMR1_RES1 0x00300000 /* Reserved */
+#define SPR_DMR1_ST 0x00400000 /* Single-step trace*/
+#define SPR_DMR1_BT 0x00800000 /* Branch trace */
+#define SPR_DMR1_RES2 0xff000000 /* Reserved */
+
+/*
+ * Bit definitions for Debug Mode 2 register. AWTC and WGB corrected by JPB
+ *
+ */
+#define SPR_DMR2_WCE0 0x00000001 /* Watchpoint counter 0 enable */
+#define SPR_DMR2_WCE1 0x00000002 /* Watchpoint counter 0 enable */
+#define SPR_DMR2_AWTC 0x00000ffc /* Assign watchpoints to counters */
+#define SPR_DMR2_AWTC_OFF 2 /* Bit offset to AWTC field */
+#define SPR_DMR2_WGB 0x003ff000 /* Watchpoints generating breakpoint */
+#define SPR_DMR2_WGB_OFF 12 /* Bit offset to WGB field */
+#define SPR_DMR2_WBS 0xffc00000 /* JPB: Watchpoint status */
+#define SPR_DMR2_WBS_OFF 22 /* Bit offset to WBS field */
+
+/*
+ * Bit definitions for Debug watchpoint counter registers
+ *
+ */
+#define SPR_DWCR_COUNT 0x0000ffff /* Count */
+#define SPR_DWCR_MATCH 0xffff0000 /* Match */
+#define SPR_DWCR_MATCH_OFF 16 /* Match bit offset */
+
+/*
+ * Bit definitions for Debug stop register
+ *
+ */
+#define SPR_DSR_RSTE 0x00000001 /* Reset exception */
+#define SPR_DSR_BUSEE 0x00000002 /* Bus error exception */
+#define SPR_DSR_DPFE 0x00000004 /* Data Page Fault exception */
+#define SPR_DSR_IPFE 0x00000008 /* Insn Page Fault exception */
+#define SPR_DSR_TTE 0x00000010 /* Tick Timer exception */
+#define SPR_DSR_AE 0x00000020 /* Alignment exception */
+#define SPR_DSR_IIE 0x00000040 /* Illegal Instruction exception */
+#define SPR_DSR_IE 0x00000080 /* Interrupt exception */
+#define SPR_DSR_DME 0x00000100 /* DTLB miss exception */
+#define SPR_DSR_IME 0x00000200 /* ITLB miss exception */
+#define SPR_DSR_RE 0x00000400 /* Range exception */
+#define SPR_DSR_SCE 0x00000800 /* System call exception */
+#define SPR_DSR_FPE 0x00001000 /* Floating Point Exception */
+#define SPR_DSR_TE 0x00002000 /* Trap exception */
+
+/*
+ * Bit definitions for Debug reason register
+ *
+ */
+#define SPR_DRR_RSTE 0x00000001 /* Reset exception */
+#define SPR_DRR_BUSEE 0x00000002 /* Bus error exception */
+#define SPR_DRR_DPFE 0x00000004 /* Data Page Fault exception */
+#define SPR_DRR_IPFE 0x00000008 /* Insn Page Fault exception */
+#define SPR_DRR_TTE 0x00000010 /* Tick Timer exception */
+#define SPR_DRR_AE 0x00000020 /* Alignment exception */
+#define SPR_DRR_IIE 0x00000040 /* Illegal Instruction exception */
+#define SPR_DRR_IE 0x00000080 /* Interrupt exception */
+#define SPR_DRR_DME 0x00000100 /* DTLB miss exception */
+#define SPR_DRR_IME 0x00000200 /* ITLB miss exception */
+#define SPR_DRR_RE 0x00000400 /* Range exception */
+#define SPR_DRR_SCE 0x00000800 /* System call exception */
+#define SPR_DRR_FPE 0x00001000 /* Floating Point Exception */
+#define SPR_DRR_TE 0x00002000 /* Trap exception */
+
+/*
+ * Bit definitions for Performance counters mode registers
+ *
+ */
+#define SPR_PCMR_CP 0x00000001 /* Counter present */
+#define SPR_PCMR_UMRA 0x00000002 /* User mode read access */
+#define SPR_PCMR_CISM 0x00000004 /* Count in supervisor mode */
+#define SPR_PCMR_CIUM 0x00000008 /* Count in user mode */
+#define SPR_PCMR_LA 0x00000010 /* Load access event */
+#define SPR_PCMR_SA 0x00000020 /* Store access event */
+#define SPR_PCMR_IF 0x00000040 /* Instruction fetch event*/
+#define SPR_PCMR_DCM 0x00000080 /* Data cache miss event */
+#define SPR_PCMR_ICM 0x00000100 /* Insn cache miss event */
+#define SPR_PCMR_IFS 0x00000200 /* Insn fetch stall event */
+#define SPR_PCMR_LSUS 0x00000400 /* LSU stall event */
+#define SPR_PCMR_BS 0x00000800 /* Branch stall event */
+#define SPR_PCMR_DTLBM 0x00001000 /* DTLB miss event */
+#define SPR_PCMR_ITLBM 0x00002000 /* ITLB miss event */
+#define SPR_PCMR_DDS 0x00004000 /* Data dependency stall event */
+#define SPR_PCMR_WPE 0x03ff8000 /* Watchpoint events */
+
+/*
+ * Bit definitions for the Power management register
+ *
+ */
+#define SPR_PMR_SDF 0x0000000f /* Slow down factor */
+#define SPR_PMR_DME 0x00000010 /* Doze mode enable */
+#define SPR_PMR_SME 0x00000020 /* Sleep mode enable */
+#define SPR_PMR_DCGE 0x00000040 /* Dynamic clock gating enable */
+#define SPR_PMR_SUME 0x00000080 /* Suspend mode enable */
+
+/*
+ * Bit definitions for PICMR
+ *
+ */
+#define SPR_PICMR_IUM 0xfffffffc /* Interrupt unmask */
+
+/*
+ * Bit definitions for PICPR
+ *
+ */
+#define SPR_PICPR_IPRIO 0xfffffffc /* Interrupt priority */
+
+/*
+ * Bit definitions for PICSR
+ *
+ */
+#define SPR_PICSR_IS 0xffffffff /* Interrupt status */
+
+/*
+ * Bit definitions for Tick Timer Control Register
+ *
+ */
+
+#define SPR_TTCR_CNT 0xffffffff /* Count, time period */
+#define SPR_TTMR_TP 0x0fffffff /* Time period */
+#define SPR_TTMR_IP 0x10000000 /* Interrupt Pending */
+#define SPR_TTMR_IE 0x20000000 /* Interrupt Enable */
+#define SPR_TTMR_DI 0x00000000 /* Disabled */
+#define SPR_TTMR_RT 0x40000000 /* Restart tick */
+#define SPR_TTMR_SR 0x80000000 /* Single run */
+#define SPR_TTMR_CR 0xc0000000 /* Continuous run */
+#define SPR_TTMR_M 0xc0000000 /* Tick mode */
+
+/*
+ * Bit definitions for the FP Control Status Register
+ *
+ */
+#define SPR_FPCSR_FPEE 0x00000001 /* Floating Point Exception Enable */
+#define SPR_FPCSR_RM 0x00000006 /* Rounding Mode */
+#define SPR_FPCSR_OVF 0x00000008 /* Overflow Flag */
+#define SPR_FPCSR_UNF 0x00000010 /* Underflow Flag */
+#define SPR_FPCSR_SNF 0x00000020 /* SNAN Flag */
+#define SPR_FPCSR_QNF 0x00000040 /* QNAN Flag */
+#define SPR_FPCSR_ZF 0x00000080 /* Zero Flag */
+#define SPR_FPCSR_IXF 0x00000100 /* Inexact Flag */
+#define SPR_FPCSR_IVF 0x00000200 /* Invalid Flag */
+#define SPR_FPCSR_INF 0x00000400 /* Infinity Flag */
+#define SPR_FPCSR_DZF 0x00000800 /* Divide By Zero Flag */
+#define SPR_FPCSR_ALLF (SPR_FPCSR_OVF | SPR_FPCSR_UNF | SPR_FPCSR_SNF | \
+ SPR_FPCSR_QNF | SPR_FPCSR_ZF | SPR_FPCSR_IXF | \
+ SPR_FPCSR_IVF | SPR_FPCSR_INF | SPR_FPCSR_DZF)
+
+#define FPCSR_RM_RN (0<<1)
+#define FPCSR_RM_RZ (1<<1)
+#define FPCSR_RM_RIP (2<<1)
+#define FPCSR_RM_RIN (3<<1)
+
+/*
+ * l.nop constants
+ *
+ */
+#define NOP_NOP 0x0000 /* Normal nop instruction */
+#define NOP_EXIT 0x0001 /* End of simulation */
+#define NOP_REPORT 0x0002 /* Simple report */
+/*#define NOP_PRINTF 0x0003 Simprintf instruction (obsolete)*/
+#define NOP_PUTC 0x0004 /* JPB: Simputc instruction */
+#define NOP_CNT_RESET 0x0005 /* Reset statistics counters */
+#define NOP_GET_TICKS 0x0006 /* JPB: Get # ticks running */
+#define NOP_GET_PS 0x0007 /* JPB: Get picosecs/cycle */
+#define NOP_REPORT_FIRST 0x0400 /* Report with number */
+#define NOP_REPORT_LAST 0x03ff /* Report with number */
+
+#endif /* SPR_DEFS__H */
diff --git a/arch/openrisc/include/asm/syscall.h b/arch/openrisc/include/asm/syscall.h
new file mode 100644
index 000000000000..9f0337055d26
--- /dev/null
+++ b/arch/openrisc/include/asm/syscall.h
@@ -0,0 +1,77 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_SYSCALL_H__
+#define __ASM_OPENRISC_SYSCALL_H__
+
+#include <linux/err.h>
+#include <linux/sched.h>
+
+static inline int
+syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
+{
+ return regs->syscallno ? regs->syscallno : -1;
+}
+
+static inline void
+syscall_rollback(struct task_struct *task, struct pt_regs *regs)
+{
+ regs->gpr[11] = regs->orig_gpr11;
+}
+
+static inline long
+syscall_get_error(struct task_struct *task, struct pt_regs *regs)
+{
+ return IS_ERR_VALUE(regs->gpr[11]) ? regs->gpr[11] : 0;
+}
+
+static inline long
+syscall_get_return_value(struct task_struct *task, struct pt_regs *regs)
+{
+ return regs->gpr[11];
+}
+
+static inline void
+syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
+ int error, long val)
+{
+ if (error)
+ regs->gpr[11] = -error;
+ else
+ regs->gpr[11] = val;
+}
+
+static inline void
+syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
+ unsigned int i, unsigned int n, unsigned long *args)
+{
+ BUG_ON(i + n > 6);
+
+ memcpy(args, &regs->gpr[3 + i], n * sizeof(args[0]));
+}
+
+static inline void
+syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
+ unsigned int i, unsigned int n, const unsigned long *args)
+{
+ BUG_ON(i + n > 6);
+
+ memcpy(&regs->gpr[3 + i], args, n * sizeof(args[0]));
+}
+
+#endif
diff --git a/arch/openrisc/include/asm/syscalls.h b/arch/openrisc/include/asm/syscalls.h
new file mode 100644
index 000000000000..84a978af44d7
--- /dev/null
+++ b/arch/openrisc/include/asm/syscalls.h
@@ -0,0 +1,27 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_SYSCALLS_H
+#define __ASM_OPENRISC_SYSCALLS_H
+
+asmlinkage long sys_or1k_atomic(unsigned long type, unsigned long *v1,
+ unsigned long *v2);
+
+#include <asm-generic/syscalls.h>
+
+#endif /* __ASM_OPENRISC_SYSCALLS_H */
diff --git a/arch/openrisc/include/asm/system.h b/arch/openrisc/include/asm/system.h
new file mode 100644
index 000000000000..cf658882186b
--- /dev/null
+++ b/arch/openrisc/include/asm/system.h
@@ -0,0 +1,35 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_SYSTEM_H
+#define __ASM_OPENRISC_SYSTEM_H
+
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+
+#include <asm/spr.h>
+#include <asm-generic/system.h>
+
+/* We probably need this definition, but the generic system.h provides it
+ * and it's not used on our arch anyway...
+ */
+/*#define nop() __asm__ __volatile__ ("l.nop"::)*/
+
+#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* __ASM_OPENRISC_SYSTEM_H */
diff --git a/arch/openrisc/include/asm/thread_info.h b/arch/openrisc/include/asm/thread_info.h
new file mode 100644
index 000000000000..07a8bc080ef2
--- /dev/null
+++ b/arch/openrisc/include/asm/thread_info.h
@@ -0,0 +1,134 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _ASM_THREAD_INFO_H
+#define _ASM_THREAD_INFO_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+#include <asm/types.h>
+#include <asm/processor.h>
+#endif
+
+
+/* THREAD_SIZE is the size of the task_struct/kernel_stack combo.
+ * normally, the stack is found by doing something like p + THREAD_SIZE
+ * in or32, a page is 8192 bytes, which seems like a sane size
+ */
+
+#define THREAD_SIZE_ORDER 0
+#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
+
+/*
+ * low level task data that entry.S needs immediate access to
+ * - this struct should fit entirely inside of one cache line
+ * - this struct shares the supervisor stack pages
+ * - if the contents of this structure are changed, the assembly constants
+ * must also be changed
+ */
+#ifndef __ASSEMBLY__
+
+typedef unsigned long mm_segment_t;
+
+struct thread_info {
+ struct task_struct *task; /* main task structure */
+ struct exec_domain *exec_domain; /* execution domain */
+ unsigned long flags; /* low level flags */
+ __u32 cpu; /* current CPU */
+ __s32 preempt_count; /* 0 => preemptable, <0 => BUG */
+
+ mm_segment_t addr_limit; /* thread address space:
+ 0-0x7FFFFFFF for user-thead
+ 0-0xFFFFFFFF for kernel-thread
+ */
+ struct restart_block restart_block;
+ __u8 supervisor_stack[0];
+
+ /* saved context data */
+ unsigned long ksp;
+};
+#endif
+
+/*
+ * macros/functions for gaining access to the thread information structure
+ *
+ * preempt_count needs to be 1 initially, until the scheduler is functional.
+ */
+#ifndef __ASSEMBLY__
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .task = &tsk, \
+ .exec_domain = &default_exec_domain, \
+ .flags = 0, \
+ .cpu = 0, \
+ .preempt_count = 1, \
+ .addr_limit = KERNEL_DS, \
+ .restart_block = { \
+ .fn = do_no_restart_syscall, \
+ }, \
+ .ksp = 0, \
+}
+
+#define init_thread_info (init_thread_union.thread_info)
+
+/* how to get the thread information struct from C */
+register struct thread_info *current_thread_info_reg asm("r10");
+#define current_thread_info() (current_thread_info_reg)
+
+#define get_thread_info(ti) get_task_struct((ti)->task)
+#define put_thread_info(ti) put_task_struct((ti)->task)
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * thread information flags
+ * these are process state flags that various assembly files may need to
+ * access
+ * - pending work-to-be-done flags are in LSW
+ * - other flags in MSW
+ */
+#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
+#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */
+#define TIF_SIGPENDING 2 /* signal pending */
+#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
+#define TIF_SINGLESTEP 4 /* restore singlestep on return to user
+ * mode
+ */
+#define TIF_SYSCALL_TRACEPOINT 8 /* for ftrace syscall instrumentation */
+#define TIF_RESTORE_SIGMASK 9
+#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling * TIF_NEED_RESCHED
+ */
+#define TIF_MEMDIE 17
+
+#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
+#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
+#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
+#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
+#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
+#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
+
+
+/* Work to do when returning from interrupt/exception */
+/* For OpenRISC, this is anything in the LSW other than syscall trace */
+#define _TIF_WORK_MASK (0xff & ~(_TIF_SYSCALL_TRACE|_TIF_SINGLESTEP))
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/openrisc/include/asm/timex.h b/arch/openrisc/include/asm/timex.h
new file mode 100644
index 000000000000..9935cad1b9b9
--- /dev/null
+++ b/arch/openrisc/include/asm/timex.h
@@ -0,0 +1,36 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_TIMEX_H
+#define __ASM_OPENRISC_TIMEX_H
+
+#define get_cycles get_cycles
+
+#include <asm-generic/timex.h>
+#include <asm/spr.h>
+#include <asm/spr_defs.h>
+
+static inline cycles_t get_cycles(void)
+{
+ return mfspr(SPR_TTCR);
+}
+
+/* This isn't really used any more */
+#define CLOCK_TICK_RATE 1000
+
+#define ARCH_HAS_READ_CURRENT_TIMER
+
+#endif
diff --git a/arch/openrisc/include/asm/tlb.h b/arch/openrisc/include/asm/tlb.h
new file mode 100644
index 000000000000..fa4376a4515d
--- /dev/null
+++ b/arch/openrisc/include/asm/tlb.h
@@ -0,0 +1,34 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_TLB_H__
+#define __ASM_OPENRISC_TLB_H__
+
+/*
+ * or32 doesn't need any special per-pte or
+ * per-vma handling..
+ */
+#define tlb_start_vma(tlb, vma) do { } while (0)
+#define tlb_end_vma(tlb, vma) do { } while (0)
+#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
+
+#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
+#include <linux/pagemap.h>
+#include <asm-generic/tlb.h>
+
+#endif /* __ASM_OPENRISC_TLB_H__ */
diff --git a/arch/openrisc/include/asm/tlbflush.h b/arch/openrisc/include/asm/tlbflush.h
new file mode 100644
index 000000000000..6a2accd6cb67
--- /dev/null
+++ b/arch/openrisc/include/asm/tlbflush.h
@@ -0,0 +1,55 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_TLBFLUSH_H
+#define __ASM_OPENRISC_TLBFLUSH_H
+
+#include <linux/mm.h>
+#include <asm/processor.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/current.h>
+#include <linux/sched.h>
+
+/*
+ * - flush_tlb() flushes the current mm struct TLBs
+ * - flush_tlb_all() flushes all processes TLBs
+ * - flush_tlb_mm(mm) flushes the specified mm context TLB's
+ * - flush_tlb_page(vma, vmaddr) flushes one page
+ * - flush_tlb_range(mm, start, end) flushes a range of pages
+ */
+
+void flush_tlb_all(void);
+void flush_tlb_mm(struct mm_struct *mm);
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
+void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end);
+
+static inline void flush_tlb(void)
+{
+ flush_tlb_mm(current->mm);
+}
+
+static inline void flush_tlb_kernel_range(unsigned long start,
+ unsigned long end)
+{
+ flush_tlb_range(NULL, start, end);
+}
+
+#endif /* __ASM_OPENRISC_TLBFLUSH_H */
diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h
new file mode 100644
index 000000000000..c310e45b538e
--- /dev/null
+++ b/arch/openrisc/include/asm/uaccess.h
@@ -0,0 +1,355 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_UACCESS_H
+#define __ASM_OPENRISC_UACCESS_H
+
+/*
+ * User space memory access functions
+ */
+#include <linux/errno.h>
+#include <linux/thread_info.h>
+#include <linux/prefetch.h>
+#include <linux/string.h>
+#include <linux/thread_info.h>
+#include <asm/page.h>
+
+#define VERIFY_READ 0
+#define VERIFY_WRITE 1
+
+/*
+ * The fs value determines whether argument validity checking should be
+ * performed or not. If get_fs() == USER_DS, checking is performed, with
+ * get_fs() == KERNEL_DS, checking is bypassed.
+ *
+ * For historical reasons, these macros are grossly misnamed.
+ */
+
+/* addr_limit is the maximum accessible address for the task. we misuse
+ * the KERNEL_DS and USER_DS values to both assign and compare the
+ * addr_limit values through the equally misnamed get/set_fs macros.
+ * (see above)
+ */
+
+#define KERNEL_DS (~0UL)
+#define get_ds() (KERNEL_DS)
+
+#define USER_DS (TASK_SIZE)
+#define get_fs() (current_thread_info()->addr_limit)
+#define set_fs(x) (current_thread_info()->addr_limit = (x))
+
+#define segment_eq(a, b) ((a) == (b))
+
+/* Ensure that the range from addr to addr+size is all within the process'
+ * address space
+ */
+#define __range_ok(addr, size) (size <= get_fs() && addr <= (get_fs()-size))
+
+/* Ensure that addr is below task's addr_limit */
+#define __addr_ok(addr) ((unsigned long) addr < get_fs())
+
+#define access_ok(type, addr, size) \
+ __range_ok((unsigned long)addr, (unsigned long)size)
+
+/*
+ * The exception table consists of pairs of addresses: the first is the
+ * address of an instruction that is allowed to fault, and the second is
+ * the address at which the program should continue. No registers are
+ * modified, so it is entirely up to the continuation code to figure out
+ * what to do.
+ *
+ * All the routines below use bits of fixup code that are out of line
+ * with the main instruction path. This means when everything is well,
+ * we don't even have to jump over them. Further, they do not intrude
+ * on our cache or tlb entries.
+ */
+
+struct exception_table_entry {
+ unsigned long insn, fixup;
+};
+
+/* Returns 0 if exception not found and fixup otherwise. */
+extern unsigned long search_exception_table(unsigned long);
+extern void sort_exception_table(void);
+
+/*
+ * These are the main single-value transfer routines. They automatically
+ * use the right size if we just have the right pointer type.
+ *
+ * This gets kind of ugly. We want to return _two_ values in "get_user()"
+ * and yet we don't want to do any pointers, because that is too much
+ * of a performance impact. Thus we have a few rather ugly macros here,
+ * and hide all the uglyness from the user.
+ *
+ * The "__xxx" versions of the user access functions are versions that
+ * do not verify the address space, that must have been done previously
+ * with a separate "access_ok()" call (this is used when we do multiple
+ * accesses to the same area of user memory).
+ *
+ * As we use the same address space for kernel and user data on the
+ * PowerPC, we can just do these as direct assignments. (Of course, the
+ * exception handling means that it's no longer "just"...)
+ */
+#define get_user(x, ptr) \
+ __get_user_check((x), (ptr), sizeof(*(ptr)))
+#define put_user(x, ptr) \
+ __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
+
+#define __get_user(x, ptr) \
+ __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
+#define __put_user(x, ptr) \
+ __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
+
+extern long __put_user_bad(void);
+
+#define __put_user_nocheck(x, ptr, size) \
+({ \
+ long __pu_err; \
+ __put_user_size((x), (ptr), (size), __pu_err); \
+ __pu_err; \
+})
+
+#define __put_user_check(x, ptr, size) \
+({ \
+ long __pu_err = -EFAULT; \
+ __typeof__(*(ptr)) *__pu_addr = (ptr); \
+ if (access_ok(VERIFY_WRITE, __pu_addr, size)) \
+ __put_user_size((x), __pu_addr, (size), __pu_err); \
+ __pu_err; \
+})
+
+#define __put_user_size(x, ptr, size, retval) \
+do { \
+ retval = 0; \
+ switch (size) { \
+ case 1: __put_user_asm(x, ptr, retval, "l.sb"); break; \
+ case 2: __put_user_asm(x, ptr, retval, "l.sh"); break; \
+ case 4: __put_user_asm(x, ptr, retval, "l.sw"); break; \
+ case 8: __put_user_asm2(x, ptr, retval); break; \
+ default: __put_user_bad(); \
+ } \
+} while (0)
+
+struct __large_struct {
+ unsigned long buf[100];
+};
+#define __m(x) (*(struct __large_struct *)(x))
+
+/*
+ * We don't tell gcc that we are accessing memory, but this is OK
+ * because we do not write to any memory gcc knows about, so there
+ * are no aliasing issues.
+ */
+#define __put_user_asm(x, addr, err, op) \
+ __asm__ __volatile__( \
+ "1: "op" 0(%2),%1\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: l.addi %0,r0,%3\n" \
+ " l.j 2b\n" \
+ " l.nop\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align 2\n" \
+ " .long 1b,3b\n" \
+ ".previous" \
+ : "=r"(err) \
+ : "r"(x), "r"(addr), "i"(-EFAULT), "0"(err))
+
+#define __put_user_asm2(x, addr, err) \
+ __asm__ __volatile__( \
+ "1: l.sw 0(%2),%1\n" \
+ "2: l.sw 4(%2),%H1\n" \
+ "3:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "4: l.addi %0,r0,%3\n" \
+ " l.j 3b\n" \
+ " l.nop\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align 2\n" \
+ " .long 1b,4b\n" \
+ " .long 2b,4b\n" \
+ ".previous" \
+ : "=r"(err) \
+ : "r"(x), "r"(addr), "i"(-EFAULT), "0"(err))
+
+#define __get_user_nocheck(x, ptr, size) \
+({ \
+ long __gu_err, __gu_val; \
+ __get_user_size(__gu_val, (ptr), (size), __gu_err); \
+ (x) = (__typeof__(*(ptr)))__gu_val; \
+ __gu_err; \
+})
+
+#define __get_user_check(x, ptr, size) \
+({ \
+ long __gu_err = -EFAULT, __gu_val = 0; \
+ const __typeof__(*(ptr)) * __gu_addr = (ptr); \
+ if (access_ok(VERIFY_READ, __gu_addr, size)) \
+ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
+ (x) = (__typeof__(*(ptr)))__gu_val; \
+ __gu_err; \
+})
+
+extern long __get_user_bad(void);
+
+#define __get_user_size(x, ptr, size, retval) \
+do { \
+ retval = 0; \
+ switch (size) { \
+ case 1: __get_user_asm(x, ptr, retval, "l.lbz"); break; \
+ case 2: __get_user_asm(x, ptr, retval, "l.lhz"); break; \
+ case 4: __get_user_asm(x, ptr, retval, "l.lwz"); break; \
+ case 8: __get_user_asm2(x, ptr, retval); \
+ default: (x) = __get_user_bad(); \
+ } \
+} while (0)
+
+#define __get_user_asm(x, addr, err, op) \
+ __asm__ __volatile__( \
+ "1: "op" %1,0(%2)\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: l.addi %0,r0,%3\n" \
+ " l.addi %1,r0,0\n" \
+ " l.j 2b\n" \
+ " l.nop\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align 2\n" \
+ " .long 1b,3b\n" \
+ ".previous" \
+ : "=r"(err), "=r"(x) \
+ : "r"(addr), "i"(-EFAULT), "0"(err))
+
+#define __get_user_asm2(x, addr, err) \
+ __asm__ __volatile__( \
+ "1: l.lwz %1,0(%2)\n" \
+ "2: l.lwz %H1,4(%2)\n" \
+ "3:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "4: l.addi %0,r0,%3\n" \
+ " l.addi %1,r0,0\n" \
+ " l.addi %H1,r0,0\n" \
+ " l.j 3b\n" \
+ " l.nop\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align 2\n" \
+ " .long 1b,4b\n" \
+ " .long 2b,4b\n" \
+ ".previous" \
+ : "=r"(err), "=&r"(x) \
+ : "r"(addr), "i"(-EFAULT), "0"(err))
+
+/* more complex routines */
+
+extern unsigned long __must_check
+__copy_tofrom_user(void *to, const void *from, unsigned long size);
+
+#define __copy_from_user(to, from, size) \
+ __copy_tofrom_user(to, from, size)
+#define __copy_to_user(to, from, size) \
+ __copy_tofrom_user(to, from, size)
+
+#define __copy_to_user_inatomic __copy_to_user
+#define __copy_from_user_inatomic __copy_from_user
+
+static inline unsigned long
+copy_from_user(void *to, const void *from, unsigned long n)
+{
+ unsigned long over;
+
+ if (access_ok(VERIFY_READ, from, n))
+ return __copy_tofrom_user(to, from, n);
+ if ((unsigned long)from < TASK_SIZE) {
+ over = (unsigned long)from + n - TASK_SIZE;
+ return __copy_tofrom_user(to, from, n - over) + over;
+ }
+ return n;
+}
+
+static inline unsigned long
+copy_to_user(void *to, const void *from, unsigned long n)
+{
+ unsigned long over;
+
+ if (access_ok(VERIFY_WRITE, to, n))
+ return __copy_tofrom_user(to, from, n);
+ if ((unsigned long)to < TASK_SIZE) {
+ over = (unsigned long)to + n - TASK_SIZE;
+ return __copy_tofrom_user(to, from, n - over) + over;
+ }
+ return n;
+}
+
+extern unsigned long __clear_user(void *addr, unsigned long size);
+
+static inline __must_check unsigned long
+clear_user(void *addr, unsigned long size)
+{
+
+ if (access_ok(VERIFY_WRITE, addr, size))
+ return __clear_user(addr, size);
+ if ((unsigned long)addr < TASK_SIZE) {
+ unsigned long over = (unsigned long)addr + size - TASK_SIZE;
+ return __clear_user(addr, size - over) + over;
+ }
+ return size;
+}
+
+extern int __strncpy_from_user(char *dst, const char *src, long count);
+
+static inline long strncpy_from_user(char *dst, const char *src, long count)
+{
+ if (access_ok(VERIFY_READ, src, 1))
+ return __strncpy_from_user(dst, src, count);
+ return -EFAULT;
+}
+
+/*
+ * Return the size of a string (including the ending 0)
+ *
+ * Return 0 for error
+ */
+
+extern int __strnlen_user(const char *str, long len, unsigned long top);
+
+/*
+ * Returns the length of the string at str (including the null byte),
+ * or 0 if we hit a page we can't access,
+ * or something > len if we didn't find a null byte.
+ *
+ * The `top' parameter to __strnlen_user is to make sure that
+ * we can never overflow from the user area into kernel space.
+ */
+static inline long strnlen_user(const char __user *str, long len)
+{
+ unsigned long top = (unsigned long)get_fs();
+ unsigned long res = 0;
+
+ if (__addr_ok(str))
+ res = __strnlen_user(str, len, top);
+
+ return res;
+}
+
+#define strlen_user(str) strnlen_user(str, TASK_SIZE-1)
+
+#endif /* __ASM_OPENRISC_UACCESS_H */
diff --git a/arch/openrisc/include/asm/unaligned.h b/arch/openrisc/include/asm/unaligned.h
new file mode 100644
index 000000000000..1141cbd6fd72
--- /dev/null
+++ b/arch/openrisc/include/asm/unaligned.h
@@ -0,0 +1,51 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_UNALIGNED_H
+#define __ASM_OPENRISC_UNALIGNED_H
+
+/*
+ * This is copied from the generic implementation and the C-struct
+ * variant replaced with the memmove variant. The GCC compiler
+ * for the OR32 arch optimizes too aggressively for the C-struct
+ * variant to work, so use the memmove variant instead.
+ *
+ * It may be worth considering implementing the unaligned access
+ * exception handler and allowing unaligned accesses (access_ok.h)...
+ * not sure if it would be much of a performance win without further
+ * investigation.
+ */
+#include <asm/byteorder.h>
+
+#if defined(__LITTLE_ENDIAN)
+# include <linux/unaligned/le_memmove.h>
+# include <linux/unaligned/be_byteshift.h>
+# include <linux/unaligned/generic.h>
+# define get_unaligned __get_unaligned_le
+# define put_unaligned __put_unaligned_le
+#elif defined(__BIG_ENDIAN)
+# include <linux/unaligned/be_memmove.h>
+# include <linux/unaligned/le_byteshift.h>
+# include <linux/unaligned/generic.h>
+# define get_unaligned __get_unaligned_be
+# define put_unaligned __put_unaligned_be
+#else
+# error need to define endianess
+#endif
+
+#endif /* __ASM_OPENRISC_UNALIGNED_H */
diff --git a/arch/openrisc/include/asm/unistd.h b/arch/openrisc/include/asm/unistd.h
new file mode 100644
index 000000000000..89af3ab5c2e9
--- /dev/null
+++ b/arch/openrisc/include/asm/unistd.h
@@ -0,0 +1,31 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#if !defined(__ASM_OPENRISC_UNISTD_H) || defined(__SYSCALL)
+#define __ASM_OPENRISC_UNISTD_H
+
+#define __ARCH_HAVE_MMU
+
+#define sys_mmap2 sys_mmap_pgoff
+
+#include <asm-generic/unistd.h>
+
+#define __NR_or1k_atomic __NR_arch_specific_syscall
+__SYSCALL(__NR_or1k_atomic, sys_or1k_atomic)
+
+#endif /* __ASM_OPENRISC_UNISTD_H */
diff --git a/arch/openrisc/kernel/Makefile b/arch/openrisc/kernel/Makefile
new file mode 100644
index 000000000000..9a4c2706d795
--- /dev/null
+++ b/arch/openrisc/kernel/Makefile
@@ -0,0 +1,14 @@
+#
+# Makefile for the linux kernel.
+#
+
+extra-y := head.o vmlinux.lds init_task.o
+
+obj-y := setup.o idle.o or32_ksyms.o process.o dma.o \
+ traps.o time.o irq.o entry.o ptrace.o signal.o sys_or32.o \
+ sys_call_table.o
+
+obj-$(CONFIG_MODULES) += module.o
+obj-$(CONFIG_OF) += prom.o
+
+clean:
diff --git a/arch/openrisc/kernel/asm-offsets.c b/arch/openrisc/kernel/asm-offsets.c
new file mode 100644
index 000000000000..1a242a0d7583
--- /dev/null
+++ b/arch/openrisc/kernel/asm-offsets.c
@@ -0,0 +1,70 @@
+/*
+ * OpenRISC asm-offsets.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * This program is used to generate definitions needed by
+ * assembly language modules.
+ *
+ * We use the technique used in the OSF Mach kernel code:
+ * generate asm statements containing #defines,
+ * compile this file to assembler, and then extract the
+ * #defines from the assembly-language output.
+ */
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/io.h>
+#include <linux/thread_info.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+
+#define DEFINE(sym, val) \
+ asm volatile("\n->" #sym " %0 " #val : : "i" (val))
+
+#define BLANK() asm volatile("\n->" : : )
+
+int main(void)
+{
+ /* offsets into the task_struct */
+ DEFINE(TASK_STATE, offsetof(struct task_struct, state));
+ DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
+ DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
+ DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
+ DEFINE(TASK_MM, offsetof(struct task_struct, mm));
+ DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
+
+ /* offsets into thread_info */
+ DEFINE(TI_TASK, offsetof(struct thread_info, task));
+ DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
+ DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
+ DEFINE(TI_KSP, offsetof(struct thread_info, ksp));
+
+ DEFINE(PT_SIZE, sizeof(struct pt_regs));
+
+ /* Interrupt register frame */
+ DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD);
+ DEFINE(INT_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
+
+ DEFINE(NUM_USER_SEGMENTS, TASK_SIZE >> 28);
+ return 0;
+}
diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c
new file mode 100644
index 000000000000..968d3ee477e3
--- /dev/null
+++ b/arch/openrisc/kernel/dma.c
@@ -0,0 +1,191 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * DMA mapping callbacks...
+ * As alloc_coherent is the only DMA callback being used currently, that's
+ * the only thing implemented properly. The rest need looking into...
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/dma-debug.h>
+
+#include <asm/cpuinfo.h>
+#include <asm/spr_defs.h>
+#include <asm/tlbflush.h>
+
+static int page_set_nocache(pte_t *pte, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+{
+ unsigned long cl;
+
+ pte_val(*pte) |= _PAGE_CI;
+
+ /*
+ * Flush the page out of the TLB so that the new page flags get
+ * picked up next time there's an access
+ */
+ flush_tlb_page(NULL, addr);
+
+ /* Flush page out of dcache */
+ for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo.dcache_block_size)
+ mtspr(SPR_DCBFR, cl);
+
+ return 0;
+}
+
+static int page_clear_nocache(pte_t *pte, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+{
+ pte_val(*pte) &= ~_PAGE_CI;
+
+ /*
+ * Flush the page out of the TLB so that the new page flags get
+ * picked up next time there's an access
+ */
+ flush_tlb_page(NULL, addr);
+
+ return 0;
+}
+
+/*
+ * Alloc "coherent" memory, which for OpenRISC means simply uncached.
+ *
+ * This function effectively just calls __get_free_pages, sets the
+ * cache-inhibit bit on those pages, and makes sure that the pages are
+ * flushed out of the cache before they are used.
+ *
+ */
+void *or1k_dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp)
+{
+ unsigned long va;
+ void *page;
+ struct mm_walk walk = {
+ .pte_entry = page_set_nocache,
+ .mm = &init_mm
+ };
+
+ page = alloc_pages_exact(size, gfp);
+ if (!page)
+ return NULL;
+
+ /* This gives us the real physical address of the first page. */
+ *dma_handle = __pa(page);
+
+ va = (unsigned long)page;
+
+ /*
+ * We need to iterate through the pages, clearing the dcache for
+ * them and setting the cache-inhibit bit.
+ */
+ if (walk_page_range(va, va + size, &walk)) {
+ free_pages_exact(page, size);
+ return NULL;
+ }
+
+ return (void *)va;
+}
+
+void or1k_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle)
+{
+ unsigned long va = (unsigned long)vaddr;
+ struct mm_walk walk = {
+ .pte_entry = page_clear_nocache,
+ .mm = &init_mm
+ };
+
+ /* walk_page_range shouldn't be able to fail here */
+ WARN_ON(walk_page_range(va, va + size, &walk));
+
+ free_pages_exact(vaddr, size);
+}
+
+dma_addr_t or1k_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ unsigned long cl;
+ dma_addr_t addr = page_to_phys(page) + offset;
+
+ switch (dir) {
+ case DMA_TO_DEVICE:
+ /* Flush the dcache for the requested range */
+ for (cl = addr; cl < addr + size;
+ cl += cpuinfo.dcache_block_size)
+ mtspr(SPR_DCBFR, cl);
+ break;
+ case DMA_FROM_DEVICE:
+ /* Invalidate the dcache for the requested range */
+ for (cl = addr; cl < addr + size;
+ cl += cpuinfo.dcache_block_size)
+ mtspr(SPR_DCBIR, cl);
+ break;
+ default:
+ /*
+ * NOTE: If dir == DMA_BIDIRECTIONAL then there's no need to
+ * flush nor invalidate the cache here as the area will need
+ * to be manually synced anyway.
+ */
+ break;
+ }
+
+ return addr;
+}
+
+void or1k_unmap_page(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ /* Nothing special to do here... */
+}
+
+void or1k_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction dir)
+{
+ unsigned long cl;
+ dma_addr_t addr = dma_handle;
+
+ /* Invalidate the dcache for the requested range */
+ for (cl = addr; cl < addr + size; cl += cpuinfo.dcache_block_size)
+ mtspr(SPR_DCBIR, cl);
+}
+
+void or1k_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction dir)
+{
+ unsigned long cl;
+ dma_addr_t addr = dma_handle;
+
+ /* Flush the dcache for the requested range */
+ for (cl = addr; cl < addr + size; cl += cpuinfo.dcache_block_size)
+ mtspr(SPR_DCBFR, cl);
+}
+
+/* Number of entries preallocated for DMA-API debugging */
+#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
+
+static int __init dma_init(void)
+{
+ dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
+
+ return 0;
+}
+
+fs_initcall(dma_init);
diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S
new file mode 100644
index 000000000000..d5f9c35a583f
--- /dev/null
+++ b/arch/openrisc/kernel/entry.S
@@ -0,0 +1,1128 @@
+/*
+ * OpenRISC entry.S
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2005 Gyorgy Jeney <nog@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/linkage.h>
+
+#include <asm/processor.h>
+#include <asm/unistd.h>
+#include <asm/thread_info.h>
+#include <asm/errno.h>
+#include <asm/spr_defs.h>
+#include <asm/page.h>
+#include <asm/mmu.h>
+#include <asm/pgtable.h>
+#include <asm/asm-offsets.h>
+
+#define DISABLE_INTERRUPTS(t1,t2) \
+ l.mfspr t2,r0,SPR_SR ;\
+ l.movhi t1,hi(~(SPR_SR_IEE|SPR_SR_TEE)) ;\
+ l.ori t1,t1,lo(~(SPR_SR_IEE|SPR_SR_TEE)) ;\
+ l.and t2,t2,t1 ;\
+ l.mtspr r0,t2,SPR_SR
+
+#define ENABLE_INTERRUPTS(t1) \
+ l.mfspr t1,r0,SPR_SR ;\
+ l.ori t1,t1,lo(SPR_SR_IEE|SPR_SR_TEE) ;\
+ l.mtspr r0,t1,SPR_SR
+
+/* =========================================================[ macros ]=== */
+
+/*
+ * We need to disable interrupts at beginning of RESTORE_ALL
+ * since interrupt might come in after we've loaded EPC return address
+ * and overwrite EPC with address somewhere in RESTORE_ALL
+ * which is of course wrong!
+ */
+
+#define RESTORE_ALL \
+ DISABLE_INTERRUPTS(r3,r4) ;\
+ l.lwz r3,PT_PC(r1) ;\
+ l.mtspr r0,r3,SPR_EPCR_BASE ;\
+ l.lwz r3,PT_SR(r1) ;\
+ l.mtspr r0,r3,SPR_ESR_BASE ;\
+ l.lwz r2,PT_GPR2(r1) ;\
+ l.lwz r3,PT_GPR3(r1) ;\
+ l.lwz r4,PT_GPR4(r1) ;\
+ l.lwz r5,PT_GPR5(r1) ;\
+ l.lwz r6,PT_GPR6(r1) ;\
+ l.lwz r7,PT_GPR7(r1) ;\
+ l.lwz r8,PT_GPR8(r1) ;\
+ l.lwz r9,PT_GPR9(r1) ;\
+ l.lwz r10,PT_GPR10(r1) ;\
+ l.lwz r11,PT_GPR11(r1) ;\
+ l.lwz r12,PT_GPR12(r1) ;\
+ l.lwz r13,PT_GPR13(r1) ;\
+ l.lwz r14,PT_GPR14(r1) ;\
+ l.lwz r15,PT_GPR15(r1) ;\
+ l.lwz r16,PT_GPR16(r1) ;\
+ l.lwz r17,PT_GPR17(r1) ;\
+ l.lwz r18,PT_GPR18(r1) ;\
+ l.lwz r19,PT_GPR19(r1) ;\
+ l.lwz r20,PT_GPR20(r1) ;\
+ l.lwz r21,PT_GPR21(r1) ;\
+ l.lwz r22,PT_GPR22(r1) ;\
+ l.lwz r23,PT_GPR23(r1) ;\
+ l.lwz r24,PT_GPR24(r1) ;\
+ l.lwz r25,PT_GPR25(r1) ;\
+ l.lwz r26,PT_GPR26(r1) ;\
+ l.lwz r27,PT_GPR27(r1) ;\
+ l.lwz r28,PT_GPR28(r1) ;\
+ l.lwz r29,PT_GPR29(r1) ;\
+ l.lwz r30,PT_GPR30(r1) ;\
+ l.lwz r31,PT_GPR31(r1) ;\
+ l.lwz r1,PT_SP(r1) ;\
+ l.rfe
+
+
+#define EXCEPTION_ENTRY(handler) \
+ .global handler ;\
+handler: ;\
+ /* r1, EPCR, ESR a already saved */ ;\
+ l.sw PT_GPR2(r1),r2 ;\
+ l.sw PT_GPR3(r1),r3 ;\
+ l.sw PT_ORIG_GPR11(r1),r11 ;\
+ /* r4 already save */ ;\
+ l.sw PT_GPR5(r1),r5 ;\
+ l.sw PT_GPR6(r1),r6 ;\
+ l.sw PT_GPR7(r1),r7 ;\
+ l.sw PT_GPR8(r1),r8 ;\
+ l.sw PT_GPR9(r1),r9 ;\
+ /* r10 already saved */ ;\
+ l.sw PT_GPR11(r1),r11 ;\
+ /* r12 already saved */ ;\
+ l.sw PT_GPR13(r1),r13 ;\
+ l.sw PT_GPR14(r1),r14 ;\
+ l.sw PT_GPR15(r1),r15 ;\
+ l.sw PT_GPR16(r1),r16 ;\
+ l.sw PT_GPR17(r1),r17 ;\
+ l.sw PT_GPR18(r1),r18 ;\
+ l.sw PT_GPR19(r1),r19 ;\
+ l.sw PT_GPR20(r1),r20 ;\
+ l.sw PT_GPR21(r1),r21 ;\
+ l.sw PT_GPR22(r1),r22 ;\
+ l.sw PT_GPR23(r1),r23 ;\
+ l.sw PT_GPR24(r1),r24 ;\
+ l.sw PT_GPR25(r1),r25 ;\
+ l.sw PT_GPR26(r1),r26 ;\
+ l.sw PT_GPR27(r1),r27 ;\
+ l.sw PT_GPR28(r1),r28 ;\
+ l.sw PT_GPR29(r1),r29 ;\
+ /* r30 already save */ ;\
+/* l.sw PT_GPR30(r1),r30*/ ;\
+ l.sw PT_GPR31(r1),r31 ;\
+ l.sw PT_SYSCALLNO(r1),r0
+
+#define UNHANDLED_EXCEPTION(handler,vector) \
+ .global handler ;\
+handler: ;\
+ /* r1, EPCR, ESR already saved */ ;\
+ l.sw PT_GPR2(r1),r2 ;\
+ l.sw PT_GPR3(r1),r3 ;\
+ l.sw PT_ORIG_GPR11(r1),r11 ;\
+ l.sw PT_GPR5(r1),r5 ;\
+ l.sw PT_GPR6(r1),r6 ;\
+ l.sw PT_GPR7(r1),r7 ;\
+ l.sw PT_GPR8(r1),r8 ;\
+ l.sw PT_GPR9(r1),r9 ;\
+ /* r10 already saved */ ;\
+ l.sw PT_GPR11(r1),r11 ;\
+ /* r12 already saved */ ;\
+ l.sw PT_GPR13(r1),r13 ;\
+ l.sw PT_GPR14(r1),r14 ;\
+ l.sw PT_GPR15(r1),r15 ;\
+ l.sw PT_GPR16(r1),r16 ;\
+ l.sw PT_GPR17(r1),r17 ;\
+ l.sw PT_GPR18(r1),r18 ;\
+ l.sw PT_GPR19(r1),r19 ;\
+ l.sw PT_GPR20(r1),r20 ;\
+ l.sw PT_GPR21(r1),r21 ;\
+ l.sw PT_GPR22(r1),r22 ;\
+ l.sw PT_GPR23(r1),r23 ;\
+ l.sw PT_GPR24(r1),r24 ;\
+ l.sw PT_GPR25(r1),r25 ;\
+ l.sw PT_GPR26(r1),r26 ;\
+ l.sw PT_GPR27(r1),r27 ;\
+ l.sw PT_GPR28(r1),r28 ;\
+ l.sw PT_GPR29(r1),r29 ;\
+ /* r31 already saved */ ;\
+ l.sw PT_GPR30(r1),r30 ;\
+/* l.sw PT_GPR31(r1),r31 */ ;\
+ l.sw PT_SYSCALLNO(r1),r0 ;\
+ l.addi r3,r1,0 ;\
+ /* r4 is exception EA */ ;\
+ l.addi r5,r0,vector ;\
+ l.jal unhandled_exception ;\
+ l.nop ;\
+ l.j _ret_from_exception ;\
+ l.nop
+
+/*
+ * NOTE: one should never assume that SPR_EPC, SPR_ESR, SPR_EEAR
+ * contain the same values as when exception we're handling
+ * occured. in fact they never do. if you need them use
+ * values saved on stack (for SPR_EPC, SPR_ESR) or content
+ * of r4 (for SPR_EEAR). for details look at EXCEPTION_HANDLE()
+ * in 'arch/or32/kernel/head.S'
+ */
+
+/* =====================================================[ exceptions] === */
+
+/* ---[ 0x100: RESET exception ]----------------------------------------- */
+
+EXCEPTION_ENTRY(_tng_kernel_start)
+ l.jal _start
+ l.andi r0,r0,0
+
+/* ---[ 0x200: BUS exception ]------------------------------------------- */
+
+EXCEPTION_ENTRY(_bus_fault_handler)
+ /* r4: EA of fault (set by EXCEPTION_HANDLE) */
+ l.jal do_bus_fault
+ l.addi r3,r1,0 /* pt_regs */
+
+ l.j _ret_from_exception
+ l.nop
+
+/* ---[ 0x300: Data Page Fault exception ]------------------------------- */
+
+EXCEPTION_ENTRY(_data_page_fault_handler)
+ /* set up parameters for do_page_fault */
+ l.addi r3,r1,0 // pt_regs
+ /* r4 set be EXCEPTION_HANDLE */ // effective address of fault
+ l.ori r5,r0,0x300 // exception vector
+
+ /*
+ * __PHX__: TODO
+ *
+ * all this can be written much simpler. look at
+ * DTLB miss handler in the CONFIG_GUARD_PROTECTED_CORE part
+ */
+#ifdef CONFIG_OPENRISC_NO_SPR_SR_DSX
+ l.lwz r6,PT_PC(r3) // address of an offending insn
+ l.lwz r6,0(r6) // instruction that caused pf
+
+ l.srli r6,r6,26 // check opcode for jump insn
+ l.sfeqi r6,0 // l.j
+ l.bf 8f
+ l.sfeqi r6,1 // l.jal
+ l.bf 8f
+ l.sfeqi r6,3 // l.bnf
+ l.bf 8f
+ l.sfeqi r6,4 // l.bf
+ l.bf 8f
+ l.sfeqi r6,0x11 // l.jr
+ l.bf 8f
+ l.sfeqi r6,0x12 // l.jalr
+ l.bf 8f
+
+ l.nop
+
+ l.j 9f
+ l.nop
+8:
+
+ l.lwz r6,PT_PC(r3) // address of an offending insn
+ l.addi r6,r6,4
+ l.lwz r6,0(r6) // instruction that caused pf
+ l.srli r6,r6,26 // get opcode
+9:
+
+#else
+
+ l.mfspr r6,r0,SPR_SR // SR
+// l.lwz r6,PT_SR(r3) // ESR
+ l.andi r6,r6,SPR_SR_DSX // check for delay slot exception
+ l.sfeqi r6,0x1 // exception happened in delay slot
+ l.bnf 7f
+ l.lwz r6,PT_PC(r3) // address of an offending insn
+
+ l.addi r6,r6,4 // offending insn is in delay slot
+7:
+ l.lwz r6,0(r6) // instruction that caused pf
+ l.srli r6,r6,26 // check opcode for write access
+#endif
+
+ l.sfgeui r6,0x34 // check opcode for write access
+ l.bnf 1f
+ l.sfleui r6,0x37
+ l.bnf 1f
+ l.ori r6,r0,0x1 // write access
+ l.j 2f
+ l.nop
+1: l.ori r6,r0,0x0 // !write access
+2:
+
+ /* call fault.c handler in or32/mm/fault.c */
+ l.jal do_page_fault
+ l.nop
+ l.j _ret_from_exception
+ l.nop
+
+/* ---[ 0x400: Insn Page Fault exception ]------------------------------- */
+
+EXCEPTION_ENTRY(_insn_page_fault_handler)
+ /* set up parameters for do_page_fault */
+ l.addi r3,r1,0 // pt_regs
+ /* r4 set be EXCEPTION_HANDLE */ // effective address of fault
+ l.ori r5,r0,0x400 // exception vector
+ l.ori r6,r0,0x0 // !write access
+
+ /* call fault.c handler in or32/mm/fault.c */
+ l.jal do_page_fault
+ l.nop
+ l.j _ret_from_exception
+ l.nop
+
+
+/* ---[ 0x500: Timer exception ]----------------------------------------- */
+
+EXCEPTION_ENTRY(_timer_handler)
+ l.jal timer_interrupt
+ l.addi r3,r1,0 /* pt_regs */
+
+ l.j _ret_from_intr
+ l.nop
+
+/* ---[ 0x600: Aligment exception ]-------------------------------------- */
+
+EXCEPTION_ENTRY(_alignment_handler)
+ /* r4: EA of fault (set by EXCEPTION_HANDLE) */
+ l.jal do_unaligned_access
+ l.addi r3,r1,0 /* pt_regs */
+
+ l.j _ret_from_exception
+ l.nop
+
+#if 0
+EXCEPTION_ENTRY(_aligment_handler)
+// l.mfspr r2,r0,SPR_EEAR_BASE /* Load the efective addres */
+ l.addi r2,r4,0
+// l.mfspr r5,r0,SPR_EPCR_BASE /* Load the insn address */
+ l.lwz r5,PT_PC(r1)
+
+ l.lwz r3,0(r5) /* Load insn */
+ l.srli r4,r3,26 /* Shift left to get the insn opcode */
+
+ l.sfeqi r4,0x00 /* Check if the load/store insn is in delay slot */
+ l.bf jmp
+ l.sfeqi r4,0x01
+ l.bf jmp
+ l.sfeqi r4,0x03
+ l.bf jmp
+ l.sfeqi r4,0x04
+ l.bf jmp
+ l.sfeqi r4,0x11
+ l.bf jr
+ l.sfeqi r4,0x12
+ l.bf jr
+ l.nop
+ l.j 1f
+ l.addi r5,r5,4 /* Increment PC to get return insn address */
+
+jmp:
+ l.slli r4,r3,6 /* Get the signed extended jump length */
+ l.srai r4,r4,4
+
+ l.lwz r3,4(r5) /* Load the real load/store insn */
+
+ l.add r5,r5,r4 /* Calculate jump target address */
+
+ l.j 1f
+ l.srli r4,r3,26 /* Shift left to get the insn opcode */
+
+jr:
+ l.slli r4,r3,9 /* Shift to get the reg nb */
+ l.andi r4,r4,0x7c
+
+ l.lwz r3,4(r5) /* Load the real load/store insn */
+
+ l.add r4,r4,r1 /* Load the jump register value from the stack */
+ l.lwz r5,0(r4)
+
+ l.srli r4,r3,26 /* Shift left to get the insn opcode */
+
+
+1:
+// l.mtspr r0,r5,SPR_EPCR_BASE
+ l.sw PT_PC(r1),r5
+
+ l.sfeqi r4,0x26
+ l.bf lhs
+ l.sfeqi r4,0x25
+ l.bf lhz
+ l.sfeqi r4,0x22
+ l.bf lws
+ l.sfeqi r4,0x21
+ l.bf lwz
+ l.sfeqi r4,0x37
+ l.bf sh
+ l.sfeqi r4,0x35
+ l.bf sw
+ l.nop
+
+1: l.j 1b /* I don't know what to do */
+ l.nop
+
+lhs: l.lbs r5,0(r2)
+ l.slli r5,r5,8
+ l.lbz r6,1(r2)
+ l.or r5,r5,r6
+ l.srli r4,r3,19
+ l.andi r4,r4,0x7c
+ l.add r4,r4,r1
+ l.j align_end
+ l.sw 0(r4),r5
+
+lhz: l.lbz r5,0(r2)
+ l.slli r5,r5,8
+ l.lbz r6,1(r2)
+ l.or r5,r5,r6
+ l.srli r4,r3,19
+ l.andi r4,r4,0x7c
+ l.add r4,r4,r1
+ l.j align_end
+ l.sw 0(r4),r5
+
+lws: l.lbs r5,0(r2)
+ l.slli r5,r5,24
+ l.lbz r6,1(r2)
+ l.slli r6,r6,16
+ l.or r5,r5,r6
+ l.lbz r6,2(r2)
+ l.slli r6,r6,8
+ l.or r5,r5,r6
+ l.lbz r6,3(r2)
+ l.or r5,r5,r6
+ l.srli r4,r3,19
+ l.andi r4,r4,0x7c
+ l.add r4,r4,r1
+ l.j align_end
+ l.sw 0(r4),r5
+
+lwz: l.lbz r5,0(r2)
+ l.slli r5,r5,24
+ l.lbz r6,1(r2)
+ l.slli r6,r6,16
+ l.or r5,r5,r6
+ l.lbz r6,2(r2)
+ l.slli r6,r6,8
+ l.or r5,r5,r6
+ l.lbz r6,3(r2)
+ l.or r5,r5,r6
+ l.srli r4,r3,19
+ l.andi r4,r4,0x7c
+ l.add r4,r4,r1
+ l.j align_end
+ l.sw 0(r4),r5
+
+sh:
+ l.srli r4,r3,9
+ l.andi r4,r4,0x7c
+ l.add r4,r4,r1
+ l.lwz r5,0(r4)
+ l.sb 1(r2),r5
+ l.srli r5,r5,8
+ l.j align_end
+ l.sb 0(r2),r5
+
+sw:
+ l.srli r4,r3,9
+ l.andi r4,r4,0x7c
+ l.add r4,r4,r1
+ l.lwz r5,0(r4)
+ l.sb 3(r2),r5
+ l.srli r5,r5,8
+ l.sb 2(r2),r5
+ l.srli r5,r5,8
+ l.sb 1(r2),r5
+ l.srli r5,r5,8
+ l.j align_end
+ l.sb 0(r2),r5
+
+align_end:
+ l.j _ret_from_intr
+ l.nop
+#endif
+
+/* ---[ 0x700: Illegal insn exception ]---------------------------------- */
+
+EXCEPTION_ENTRY(_illegal_instruction_handler)
+ /* r4: EA of fault (set by EXCEPTION_HANDLE) */
+ l.jal do_illegal_instruction
+ l.addi r3,r1,0 /* pt_regs */
+
+ l.j _ret_from_exception
+ l.nop
+
+/* ---[ 0x800: External interrupt exception ]---------------------------- */
+
+EXCEPTION_ENTRY(_external_irq_handler)
+#ifdef CONFIG_OPENRISC_ESR_EXCEPTION_BUG_CHECK
+ l.lwz r4,PT_SR(r1) // were interrupts enabled ?
+ l.andi r4,r4,SPR_SR_IEE
+ l.sfeqi r4,0
+ l.bnf 1f // ext irq enabled, all ok.
+ l.nop
+
+ l.addi r1,r1,-0x8
+ l.movhi r3,hi(42f)
+ l.ori r3,r3,lo(42f)
+ l.sw 0x0(r1),r3
+ l.jal printk
+ l.sw 0x4(r1),r4
+ l.addi r1,r1,0x8
+
+ .section .rodata, "a"
+42:
+ .string "\n\rESR interrupt bug: in _external_irq_handler (ESR %x)\n\r"
+ .align 4
+ .previous
+
+ l.ori r4,r4,SPR_SR_IEE // fix the bug
+// l.sw PT_SR(r1),r4
+1:
+#endif
+ l.addi r3,r1,0
+ l.movhi r8,hi(do_IRQ)
+ l.ori r8,r8,lo(do_IRQ)
+ l.jalr r8
+ l.nop
+ l.j _ret_from_intr
+ l.nop
+
+/* ---[ 0x900: DTLB miss exception ]------------------------------------- */
+
+
+/* ---[ 0xa00: ITLB miss exception ]------------------------------------- */
+
+
+/* ---[ 0xb00: Range exception ]----------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0xb00,0xb00)
+
+/* ---[ 0xc00: Syscall exception ]--------------------------------------- */
+
+/*
+ * Syscalls are a special type of exception in that they are
+ * _explicitly_ invoked by userspace and can therefore be
+ * held to conform to the same ABI as normal functions with
+ * respect to whether registers are preserved across the call
+ * or not.
+ */
+
+/* Upon syscall entry we just save the callee-saved registers
+ * and not the call-clobbered ones.
+ */
+
+_string_syscall_return:
+ .string "syscall return %ld \n\r\0"
+ .align 4
+
+ENTRY(_sys_call_handler)
+ /* syscalls run with interrupts enabled */
+ ENABLE_INTERRUPTS(r29) // enable interrupts, r29 is temp
+
+ /* r1, EPCR, ESR a already saved */
+ l.sw PT_GPR2(r1),r2
+ /* r3-r8 must be saved because syscall restart relies
+ * on us being able to restart the syscall args... technically
+ * they should be clobbered, otherwise
+ */
+ l.sw PT_GPR3(r1),r3
+ /* r4 already saved */
+ /* r4 holds the EEAR address of the fault, load the original r4 */
+ l.lwz r4,PT_GPR4(r1)
+ l.sw PT_GPR5(r1),r5
+ l.sw PT_GPR6(r1),r6
+ l.sw PT_GPR7(r1),r7
+ l.sw PT_GPR8(r1),r8
+ l.sw PT_GPR9(r1),r9
+ /* r10 already saved */
+ l.sw PT_GPR11(r1),r11
+ l.sw PT_ORIG_GPR11(r1),r11
+ /* r12,r13 already saved */
+
+ /* r14-r28 (even) aren't touched by the syscall fast path below
+ * so we don't need to save them. However, the functions that return
+ * to userspace via a call to switch() DO need to save these because
+ * switch() effectively clobbers them... saving these registers for
+ * such functions is handled in their syscall wrappers (see fork, vfork,
+ * and clone, below).
+
+ /* r30 is the only register we clobber in the fast path */
+ /* r30 already saved */
+/* l.sw PT_GPR30(r1),r30 */
+ /* This is used by do_signal to determine whether to check for
+ * syscall restart or not */
+ l.sw PT_SYSCALLNO(r1),r11
+
+_syscall_check_trace_enter:
+ /* If TIF_SYSCALL_TRACE is set, then we want to do syscall tracing */
+ l.lwz r30,TI_FLAGS(r10)
+ l.andi r30,r30,_TIF_SYSCALL_TRACE
+ l.sfne r30,r0
+ l.bf _syscall_trace_enter
+ l.nop
+
+_syscall_check:
+ /* Ensure that the syscall number is reasonable */
+ l.sfgeui r11,__NR_syscalls
+ l.bf _syscall_badsys
+ l.nop
+
+_syscall_call:
+ l.movhi r29,hi(sys_call_table)
+ l.ori r29,r29,lo(sys_call_table)
+ l.slli r11,r11,2
+ l.add r29,r29,r11
+ l.lwz r29,0(r29)
+
+ l.jalr r29
+ l.nop
+
+_syscall_return:
+ /* All syscalls return here... just pay attention to ret_from_fork
+ * which does it in a round-about way.
+ */
+ l.sw PT_GPR11(r1),r11 // save return value
+
+#if 0
+_syscall_debug:
+ l.movhi r3,hi(_string_syscall_return)
+ l.ori r3,r3,lo(_string_syscall_return)
+ l.ori r27,r0,1
+ l.sw -4(r1),r27
+ l.sw -8(r1),r11
+ l.addi r1,r1,-8
+ l.movhi r27,hi(printk)
+ l.ori r27,r27,lo(printk)
+ l.jalr r27
+ l.nop
+ l.addi r1,r1,8
+#endif
+
+_syscall_check_trace_leave:
+ /* r30 is a callee-saved register so this should still hold the
+ * _TIF_SYSCALL_TRACE flag from _syscall_check_trace_enter above...
+ * _syscall_trace_leave expects syscall result to be in pt_regs->r11.
+ */
+ l.sfne r30,r0
+ l.bf _syscall_trace_leave
+ l.nop
+
+/* This is where the exception-return code begins... interrupts need to be
+ * disabled the rest of the way here because we can't afford to miss any
+ * interrupts that set NEED_RESCHED or SIGNALPENDING... really true? */
+
+_syscall_check_work:
+ /* Here we need to disable interrupts */
+ DISABLE_INTERRUPTS(r27,r29)
+ l.lwz r30,TI_FLAGS(r10)
+ l.andi r30,r30,_TIF_WORK_MASK
+ l.sfne r30,r0
+
+ l.bnf _syscall_resume_userspace
+ l.nop
+
+ /* Work pending follows a different return path, so we need to
+ * make sure that all the call-saved registers get into pt_regs
+ * before branching...
+ */
+ l.sw PT_GPR14(r1),r14
+ l.sw PT_GPR16(r1),r16
+ l.sw PT_GPR18(r1),r18
+ l.sw PT_GPR20(r1),r20
+ l.sw PT_GPR22(r1),r22
+ l.sw PT_GPR24(r1),r24
+ l.sw PT_GPR26(r1),r26
+ l.sw PT_GPR28(r1),r28
+
+ /* _work_pending needs to be called with interrupts disabled */
+ l.j _work_pending
+ l.nop
+
+_syscall_resume_userspace:
+// ENABLE_INTERRUPTS(r29)
+
+
+/* This is the hot path for returning to userspace from a syscall. If there's
+ * work to be done and the branch to _work_pending was taken above, then the
+ * return to userspace will be done via the normal exception return path...
+ * that path restores _all_ registers and will overwrite the "clobbered"
+ * registers with whatever garbage is in pt_regs -- that's OK because those
+ * registers are clobbered anyway and because the extra work is insignificant
+ * in the context of the extra work that _work_pending is doing.
+
+/* Once again, syscalls are special and only guarantee to preserve the
+ * same registers as a normal function call */
+
+/* The assumption here is that the registers r14-r28 (even) are untouched and
+ * don't need to be restored... be sure that that's really the case!
+ */
+
+/* This is still too much... we should only be restoring what we actually
+ * clobbered... we should even be using 'scratch' (odd) regs above so that
+ * we don't need to restore anything, hardly...
+ */
+
+ l.lwz r2,PT_GPR2(r1)
+
+ /* Restore args */
+ /* r3-r8 are technically clobbered, but syscall restart needs these
+ * to be restored...
+ */
+ l.lwz r3,PT_GPR3(r1)
+ l.lwz r4,PT_GPR4(r1)
+ l.lwz r5,PT_GPR5(r1)
+ l.lwz r6,PT_GPR6(r1)
+ l.lwz r7,PT_GPR7(r1)
+ l.lwz r8,PT_GPR8(r1)
+
+ l.lwz r9,PT_GPR9(r1)
+ l.lwz r10,PT_GPR10(r1)
+ l.lwz r11,PT_GPR11(r1)
+
+ /* r30 is the only register we clobber in the fast path */
+ l.lwz r30,PT_GPR30(r1)
+
+ /* Here we use r13-r19 (odd) as scratch regs */
+ l.lwz r13,PT_PC(r1)
+ l.lwz r15,PT_SR(r1)
+ l.lwz r1,PT_SP(r1)
+ /* Interrupts need to be disabled for setting EPCR and ESR
+ * so that another interrupt doesn't come in here and clobber
+ * them before we can use them for our l.rfe */
+ DISABLE_INTERRUPTS(r17,r19)
+ l.mtspr r0,r13,SPR_EPCR_BASE
+ l.mtspr r0,r15,SPR_ESR_BASE
+ l.rfe
+
+/* End of hot path!
+ * Keep the below tracing and error handling out of the hot path...
+*/
+
+_syscall_trace_enter:
+ /* Here we pass pt_regs to do_syscall_trace_enter. Make sure
+ * that function is really getting all the info it needs as
+ * pt_regs isn't a complete set of userspace regs, just the
+ * ones relevant to the syscall...
+ *
+ * Note use of delay slot for setting argument.
+ */
+ l.jal do_syscall_trace_enter
+ l.addi r3,r1,0
+
+ /* Restore arguments (not preserved across do_syscall_trace_enter)
+ * so that we can do the syscall for real and return to the syscall
+ * hot path.
+ */
+ l.lwz r11,PT_SYSCALLNO(r1)
+ l.lwz r3,PT_GPR3(r1)
+ l.lwz r4,PT_GPR4(r1)
+ l.lwz r5,PT_GPR5(r1)
+ l.lwz r6,PT_GPR6(r1)
+ l.lwz r7,PT_GPR7(r1)
+
+ l.j _syscall_check
+ l.lwz r8,PT_GPR8(r1)
+
+_syscall_trace_leave:
+ l.jal do_syscall_trace_leave
+ l.addi r3,r1,0
+
+ l.j _syscall_check_work
+ l.nop
+
+_syscall_badsys:
+ /* Here we effectively pretend to have executed an imaginary
+ * syscall that returns -ENOSYS and then return to the regular
+ * syscall hot path.
+ * Note that "return value" is set in the delay slot...
+ */
+ l.j _syscall_return
+ l.addi r11,r0,-ENOSYS
+
+/******* END SYSCALL HANDLING *******/
+
+/* ---[ 0xd00: Trap exception ]------------------------------------------ */
+
+UNHANDLED_EXCEPTION(_vector_0xd00,0xd00)
+
+/* ---[ 0xe00: Trap exception ]------------------------------------------ */
+
+EXCEPTION_ENTRY(_trap_handler)
+ /* r4: EA of fault (set by EXCEPTION_HANDLE) */
+ l.jal do_trap
+ l.addi r3,r1,0 /* pt_regs */
+
+ l.j _ret_from_exception
+ l.nop
+
+/* ---[ 0xf00: Reserved exception ]-------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0xf00,0xf00)
+
+/* ---[ 0x1000: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1000,0x1000)
+
+/* ---[ 0x1100: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1100,0x1100)
+
+/* ---[ 0x1200: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1200,0x1200)
+
+/* ---[ 0x1300: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1300,0x1300)
+
+/* ---[ 0x1400: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1400,0x1400)
+
+/* ---[ 0x1500: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1500,0x1500)
+
+/* ---[ 0x1600: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1600,0x1600)
+
+/* ---[ 0x1700: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1700,0x1700)
+
+/* ---[ 0x1800: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1800,0x1800)
+
+/* ---[ 0x1900: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1900,0x1900)
+
+/* ---[ 0x1a00: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1a00,0x1a00)
+
+/* ---[ 0x1b00: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1b00,0x1b00)
+
+/* ---[ 0x1c00: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1c00,0x1c00)
+
+/* ---[ 0x1d00: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1d00,0x1d00)
+
+/* ---[ 0x1e00: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1e00,0x1e00)
+
+/* ---[ 0x1f00: Reserved exception ]------------------------------------- */
+
+UNHANDLED_EXCEPTION(_vector_0x1f00,0x1f00)
+
+/* ========================================================[ return ] === */
+
+_work_pending:
+ /*
+ * if (current_thread_info->flags & _TIF_NEED_RESCHED)
+ * schedule();
+ */
+ l.lwz r5,TI_FLAGS(r10)
+ l.andi r3,r5,_TIF_NEED_RESCHED
+ l.sfnei r3,0
+ l.bnf _work_notifysig
+ l.nop
+ l.jal schedule
+ l.nop
+ l.j _resume_userspace
+ l.nop
+
+/* Handle pending signals and notify-resume requests.
+ * do_notify_resume must be passed the latest pushed pt_regs, not
+ * necessarily the "userspace" ones. Also, pt_regs->syscallno
+ * must be set so that the syscall restart functionality works.
+ */
+_work_notifysig:
+ l.jal do_notify_resume
+ l.ori r3,r1,0 /* pt_regs */
+
+_resume_userspace:
+ DISABLE_INTERRUPTS(r3,r4)
+ l.lwz r3,TI_FLAGS(r10)
+ l.andi r3,r3,_TIF_WORK_MASK
+ l.sfnei r3,0
+ l.bf _work_pending
+ l.nop
+
+_restore_all:
+ RESTORE_ALL
+ /* This returns to userspace code */
+
+
+ENTRY(_ret_from_intr)
+ENTRY(_ret_from_exception)
+ l.lwz r4,PT_SR(r1)
+ l.andi r3,r4,SPR_SR_SM
+ l.sfeqi r3,0
+ l.bnf _restore_all
+ l.nop
+ l.j _resume_userspace
+ l.nop
+
+ENTRY(ret_from_fork)
+ l.jal schedule_tail
+ l.nop
+
+ /* _syscall_returns expect r11 to contain return value */
+ l.lwz r11,PT_GPR11(r1)
+
+ /* The syscall fast path return expects call-saved registers
+ * r12-r28 to be untouched, so we restore them here as they
+ * will have been effectively clobbered when arriving here
+ * via the call to switch()
+ */
+ l.lwz r12,PT_GPR12(r1)
+ l.lwz r14,PT_GPR14(r1)
+ l.lwz r16,PT_GPR16(r1)
+ l.lwz r18,PT_GPR18(r1)
+ l.lwz r20,PT_GPR20(r1)
+ l.lwz r22,PT_GPR22(r1)
+ l.lwz r24,PT_GPR24(r1)
+ l.lwz r26,PT_GPR26(r1)
+ l.lwz r28,PT_GPR28(r1)
+
+ l.j _syscall_return
+ l.nop
+
+/* Since syscalls don't save call-clobbered registers, the args to
+ * kernel_thread_helper will need to be passed through callee-saved
+ * registers and copied to the parameter registers when the thread
+ * begins running.
+ *
+ * See arch/openrisc/kernel/process.c:
+ * The args are passed as follows:
+ * arg1 (r3) : passed in r20
+ * arg2 (r4) : passed in r22
+ */
+
+ENTRY(_kernel_thread_helper)
+ l.or r3,r20,r0
+ l.or r4,r22,r0
+ l.movhi r31,hi(kernel_thread_helper)
+ l.ori r31,r31,lo(kernel_thread_helper)
+ l.jr r31
+ l.nop
+
+
+/* ========================================================[ switch ] === */
+
+/*
+ * This routine switches between two different tasks. The process
+ * state of one is saved on its kernel stack. Then the state
+ * of the other is restored from its kernel stack. The memory
+ * management hardware is updated to the second process's state.
+ * Finally, we can return to the second process, via the 'return'.
+ *
+ * Note: there are two ways to get to the "going out" portion
+ * of this code; either by coming in via the entry (_switch)
+ * or via "fork" which must set up an environment equivalent
+ * to the "_switch" path. If you change this (or in particular, the
+ * SAVE_REGS macro), you'll have to change the fork code also.
+ */
+
+
+/* _switch MUST never lay on page boundry, cause it runs from
+ * effective addresses and beeing interrupted by iTLB miss would kill it.
+ * dTLB miss seams to never accour in the bad place since data accesses
+ * are from task structures which are always page aligned.
+ *
+ * The problem happens in RESTORE_ALL_NO_R11 where we first set the EPCR
+ * register, then load the previous register values and only at the end call
+ * the l.rfe instruction. If get TLB miss in beetwen the EPCR register gets
+ * garbled and we end up calling l.rfe with the wrong EPCR. (same probably
+ * holds for ESR)
+ *
+ * To avoid this problems it is sufficient to align _switch to
+ * some nice round number smaller than it's size...
+ */
+
+/* ABI rules apply here... we either enter _switch via schedule() or via
+ * an imaginary call to which we shall return at return_from_fork. Either
+ * way, we are a function call and only need to preserve the callee-saved
+ * registers when we return. As such, we don't need to save the registers
+ * on the stack that we won't be returning as they were...
+ */
+
+ .align 0x400
+ENTRY(_switch)
+ /* We don't store SR as _switch only gets called in a context where
+ * the SR will be the same going in and coming out... */
+
+ /* Set up new pt_regs struct for saving task state */
+ l.addi r1,r1,-(INT_FRAME_SIZE)
+
+ /* No need to store r1/PT_SP as it goes into KSP below */
+ l.sw PT_GPR2(r1),r2
+ l.sw PT_GPR9(r1),r9
+ /* This is wrong, r12 shouldn't be here... but GCC is broken for the time being
+ * and expects r12 to be callee-saved... */
+ l.sw PT_GPR12(r1),r12
+ l.sw PT_GPR14(r1),r14
+ l.sw PT_GPR16(r1),r16
+ l.sw PT_GPR18(r1),r18
+ l.sw PT_GPR20(r1),r20
+ l.sw PT_GPR22(r1),r22
+ l.sw PT_GPR24(r1),r24
+ l.sw PT_GPR26(r1),r26
+ l.sw PT_GPR28(r1),r28
+ l.sw PT_GPR30(r1),r30
+
+ l.addi r11,r10,0 /* Save old 'current' to 'last' return value*/
+
+ /* We use thread_info->ksp for storing the address of the above
+ * structure so that we can get back to it later... we don't want
+ * to lose the value of thread_info->ksp, though, so store it as
+ * pt_regs->sp so that we can easily restore it when we are made
+ * live again...
+ */
+
+ /* Save the old value of thread_info->ksp as pt_regs->sp */
+ l.lwz r29,TI_KSP(r10)
+ l.sw PT_SP(r1),r29
+
+ /* Swap kernel stack pointers */
+ l.sw TI_KSP(r10),r1 /* Save old stack pointer */
+ l.or r10,r4,r0 /* Set up new current_thread_info */
+ l.lwz r1,TI_KSP(r10) /* Load new stack pointer */
+
+ /* Restore the old value of thread_info->ksp */
+ l.lwz r29,PT_SP(r1)
+ l.sw TI_KSP(r10),r29
+
+ /* ...and restore the registers, except r11 because the return value
+ * has already been set above.
+ */
+ l.lwz r2,PT_GPR2(r1)
+ l.lwz r9,PT_GPR9(r1)
+ /* No need to restore r10 */
+ /* ...and do not restore r11 */
+
+ /* This is wrong, r12 shouldn't be here... but GCC is broken for the time being
+ * and expects r12 to be callee-saved... */
+ l.lwz r12,PT_GPR12(r1)
+ l.lwz r14,PT_GPR14(r1)
+ l.lwz r16,PT_GPR16(r1)
+ l.lwz r18,PT_GPR18(r1)
+ l.lwz r20,PT_GPR20(r1)
+ l.lwz r22,PT_GPR22(r1)
+ l.lwz r24,PT_GPR24(r1)
+ l.lwz r26,PT_GPR26(r1)
+ l.lwz r28,PT_GPR28(r1)
+ l.lwz r30,PT_GPR30(r1)
+
+ /* Unwind stack to pre-switch state */
+ l.addi r1,r1,(INT_FRAME_SIZE)
+
+ /* Return via the link-register back to where we 'came from', where that can be
+ * either schedule() or return_from_fork()... */
+ l.jr r9
+ l.nop
+
+/* ==================================================================== */
+
+/* These all use the delay slot for setting the argument register, so the
+ * jump is always happening after the l.addi instruction.
+ *
+ * These are all just wrappers that don't touch the link-register r9, so the
+ * return from the "real" syscall function will return back to the syscall
+ * code that did the l.jal that brought us here.
+ */
+
+/* fork requires that we save all the callee-saved registers because they
+ * are all effectively clobbered by the call to _switch. Here we store
+ * all the registers that aren't touched by the syscall fast path and thus
+ * weren't saved there.
+ */
+
+_fork_save_extra_regs_and_call:
+ l.sw PT_GPR14(r1),r14
+ l.sw PT_GPR16(r1),r16
+ l.sw PT_GPR18(r1),r18
+ l.sw PT_GPR20(r1),r20
+ l.sw PT_GPR22(r1),r22
+ l.sw PT_GPR24(r1),r24
+ l.sw PT_GPR26(r1),r26
+ l.jr r29
+ l.sw PT_GPR28(r1),r28
+
+ENTRY(sys_clone)
+ l.movhi r29,hi(_sys_clone)
+ l.ori r29,r29,lo(_sys_clone)
+ l.j _fork_save_extra_regs_and_call
+ l.addi r7,r1,0
+
+ENTRY(sys_fork)
+ l.movhi r29,hi(_sys_fork)
+ l.ori r29,r29,lo(_sys_fork)
+ l.j _fork_save_extra_regs_and_call
+ l.addi r3,r1,0
+
+ENTRY(sys_execve)
+ l.j _sys_execve
+ l.addi r6,r1,0
+
+ENTRY(sys_sigaltstack)
+ l.j _sys_sigaltstack
+ l.addi r5,r1,0
+
+ENTRY(sys_rt_sigreturn)
+ l.j _sys_rt_sigreturn
+ l.addi r3,r1,0
+
+/* This is a catch-all syscall for atomic instructions for the OpenRISC 1000.
+ * The functions takes a variable number of parameters depending on which
+ * particular flavour of atomic you want... parameter 1 is a flag identifying
+ * the atomic in question. Currently, this function implements the
+ * following variants:
+ *
+ * XCHG:
+ * @flag: 1
+ * @ptr1:
+ * @ptr2:
+ * Atomically exchange the values in pointers 1 and 2.
+ *
+ */
+
+ENTRY(sys_or1k_atomic)
+ /* FIXME: This ignores r3 and always does an XCHG */
+ DISABLE_INTERRUPTS(r17,r19)
+ l.lwz r30,0(r4)
+ l.lwz r28,0(r5)
+ l.sw 0(r4),r28
+ l.sw 0(r5),r30
+ ENABLE_INTERRUPTS(r17)
+ l.jr r9
+ l.or r11,r0,r0
+
+/* ============================================================[ EOF ]=== */
diff --git a/arch/openrisc/kernel/head.S b/arch/openrisc/kernel/head.S
new file mode 100644
index 000000000000..c75018d22644
--- /dev/null
+++ b/arch/openrisc/kernel/head.S
@@ -0,0 +1,1607 @@
+/*
+ * OpenRISC head.S
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/linkage.h>
+#include <linux/threads.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/mmu.h>
+#include <asm/pgtable.h>
+#include <asm/cache.h>
+#include <asm/spr_defs.h>
+#include <asm/asm-offsets.h>
+
+#define tophys(rd,rs) \
+ l.movhi rd,hi(-KERNELBASE) ;\
+ l.add rd,rd,rs
+
+#define CLEAR_GPR(gpr) \
+ l.or gpr,r0,r0
+
+#define LOAD_SYMBOL_2_GPR(gpr,symbol) \
+ l.movhi gpr,hi(symbol) ;\
+ l.ori gpr,gpr,lo(symbol)
+
+
+#define UART_BASE_ADD 0x90000000
+
+#define EXCEPTION_SR (SPR_SR_DME | SPR_SR_IME | SPR_SR_DCE | SPR_SR_ICE | SPR_SR_SM)
+#define SYSCALL_SR (SPR_SR_DME | SPR_SR_IME | SPR_SR_DCE | SPR_SR_ICE | SPR_SR_IEE | SPR_SR_TEE | SPR_SR_SM)
+
+/* ============================================[ tmp store locations ]=== */
+
+/*
+ * emergency_print temporary stores
+ */
+#define EMERGENCY_PRINT_STORE_GPR4 l.sw 0x20(r0),r4
+#define EMERGENCY_PRINT_LOAD_GPR4 l.lwz r4,0x20(r0)
+
+#define EMERGENCY_PRINT_STORE_GPR5 l.sw 0x24(r0),r5
+#define EMERGENCY_PRINT_LOAD_GPR5 l.lwz r5,0x24(r0)
+
+#define EMERGENCY_PRINT_STORE_GPR6 l.sw 0x28(r0),r6
+#define EMERGENCY_PRINT_LOAD_GPR6 l.lwz r6,0x28(r0)
+
+#define EMERGENCY_PRINT_STORE_GPR7 l.sw 0x2c(r0),r7
+#define EMERGENCY_PRINT_LOAD_GPR7 l.lwz r7,0x2c(r0)
+
+#define EMERGENCY_PRINT_STORE_GPR8 l.sw 0x30(r0),r8
+#define EMERGENCY_PRINT_LOAD_GPR8 l.lwz r8,0x30(r0)
+
+#define EMERGENCY_PRINT_STORE_GPR9 l.sw 0x34(r0),r9
+#define EMERGENCY_PRINT_LOAD_GPR9 l.lwz r9,0x34(r0)
+
+
+/*
+ * TLB miss handlers temorary stores
+ */
+#define EXCEPTION_STORE_GPR9 l.sw 0x10(r0),r9
+#define EXCEPTION_LOAD_GPR9 l.lwz r9,0x10(r0)
+
+#define EXCEPTION_STORE_GPR2 l.sw 0x64(r0),r2
+#define EXCEPTION_LOAD_GPR2 l.lwz r2,0x64(r0)
+
+#define EXCEPTION_STORE_GPR3 l.sw 0x68(r0),r3
+#define EXCEPTION_LOAD_GPR3 l.lwz r3,0x68(r0)
+
+#define EXCEPTION_STORE_GPR4 l.sw 0x6c(r0),r4
+#define EXCEPTION_LOAD_GPR4 l.lwz r4,0x6c(r0)
+
+#define EXCEPTION_STORE_GPR5 l.sw 0x70(r0),r5
+#define EXCEPTION_LOAD_GPR5 l.lwz r5,0x70(r0)
+
+#define EXCEPTION_STORE_GPR6 l.sw 0x74(r0),r6
+#define EXCEPTION_LOAD_GPR6 l.lwz r6,0x74(r0)
+
+
+/*
+ * EXCEPTION_HANDLE temporary stores
+ */
+
+#define EXCEPTION_T_STORE_GPR30 l.sw 0x78(r0),r30
+#define EXCEPTION_T_LOAD_GPR30(reg) l.lwz reg,0x78(r0)
+
+#define EXCEPTION_T_STORE_GPR10 l.sw 0x7c(r0),r10
+#define EXCEPTION_T_LOAD_GPR10(reg) l.lwz reg,0x7c(r0)
+
+#define EXCEPTION_T_STORE_SP l.sw 0x80(r0),r1
+#define EXCEPTION_T_LOAD_SP(reg) l.lwz reg,0x80(r0)
+
+/*
+ * For UNHANLDED_EXCEPTION
+ */
+
+#define EXCEPTION_T_STORE_GPR31 l.sw 0x84(r0),r31
+#define EXCEPTION_T_LOAD_GPR31(reg) l.lwz reg,0x84(r0)
+
+/* =========================================================[ macros ]=== */
+
+
+#define GET_CURRENT_PGD(reg,t1) \
+ LOAD_SYMBOL_2_GPR(reg,current_pgd) ;\
+ tophys (t1,reg) ;\
+ l.lwz reg,0(t1)
+
+
+/*
+ * DSCR: this is a common hook for handling exceptions. it will save
+ * the needed registers, set up stack and pointer to current
+ * then jump to the handler while enabling MMU
+ *
+ * PRMS: handler - a function to jump to. it has to save the
+ * remaining registers to kernel stack, call
+ * appropriate arch-independant exception handler
+ * and finaly jump to ret_from_except
+ *
+ * PREQ: unchanged state from the time exception happened
+ *
+ * POST: SAVED the following registers original value
+ * to the new created exception frame pointed to by r1
+ *
+ * r1 - ksp pointing to the new (exception) frame
+ * r4 - EEAR exception EA
+ * r10 - current pointing to current_thread_info struct
+ * r12 - syscall 0, since we didn't come from syscall
+ * r13 - temp it actually contains new SR, not needed anymore
+ * r31 - handler address of the handler we'll jump to
+ *
+ * handler has to save remaining registers to the exception
+ * ksp frame *before* tainting them!
+ *
+ * NOTE: this function is not reentrant per se. reentrancy is guaranteed
+ * by processor disabling all exceptions/interrupts when exception
+ * accours.
+ *
+ * OPTM: no need to make it so wasteful to extract ksp when in user mode
+ */
+
+#define EXCEPTION_HANDLE(handler) \
+ EXCEPTION_T_STORE_GPR30 ;\
+ l.mfspr r30,r0,SPR_ESR_BASE ;\
+ l.andi r30,r30,SPR_SR_SM ;\
+ l.sfeqi r30,0 ;\
+ EXCEPTION_T_STORE_GPR10 ;\
+ l.bnf 2f /* kernel_mode */ ;\
+ EXCEPTION_T_STORE_SP /* delay slot */ ;\
+1: /* user_mode: */ ;\
+ LOAD_SYMBOL_2_GPR(r1,current_thread_info_set) ;\
+ tophys (r30,r1) ;\
+ /* r10: current_thread_info */ ;\
+ l.lwz r10,0(r30) ;\
+ tophys (r30,r10) ;\
+ l.lwz r1,(TI_KSP)(r30) ;\
+ /* fall through */ ;\
+2: /* kernel_mode: */ ;\
+ /* create new stack frame, save only needed gprs */ ;\
+ /* r1: KSP, r10: current, r4: EEAR, r31: __pa(KSP) */ ;\
+ /* r12: temp, syscall indicator */ ;\
+ l.addi r1,r1,-(INT_FRAME_SIZE) ;\
+ /* r1 is KSP, r30 is __pa(KSP) */ ;\
+ tophys (r30,r1) ;\
+ l.sw PT_GPR12(r30),r12 ;\
+ l.mfspr r12,r0,SPR_EPCR_BASE ;\
+ l.sw PT_PC(r30),r12 ;\
+ l.mfspr r12,r0,SPR_ESR_BASE ;\
+ l.sw PT_SR(r30),r12 ;\
+ /* save r30 */ ;\
+ EXCEPTION_T_LOAD_GPR30(r12) ;\
+ l.sw PT_GPR30(r30),r12 ;\
+ /* save r10 as was prior to exception */ ;\
+ EXCEPTION_T_LOAD_GPR10(r12) ;\
+ l.sw PT_GPR10(r30),r12 ;\
+ /* save PT_SP as was prior to exception */ ;\
+ EXCEPTION_T_LOAD_SP(r12) ;\
+ l.sw PT_SP(r30),r12 ;\
+ /* save exception r4, set r4 = EA */ ;\
+ l.sw PT_GPR4(r30),r4 ;\
+ l.mfspr r4,r0,SPR_EEAR_BASE ;\
+ /* r12 == 1 if we come from syscall */ ;\
+ CLEAR_GPR(r12) ;\
+ /* ----- turn on MMU ----- */ ;\
+ l.ori r30,r0,(EXCEPTION_SR) ;\
+ l.mtspr r0,r30,SPR_ESR_BASE ;\
+ /* r30: EA address of handler */ ;\
+ LOAD_SYMBOL_2_GPR(r30,handler) ;\
+ l.mtspr r0,r30,SPR_EPCR_BASE ;\
+ l.rfe
+
+/*
+ * this doesn't work
+ *
+ *
+ * #ifdef CONFIG_JUMP_UPON_UNHANDLED_EXCEPTION
+ * #define UNHANDLED_EXCEPTION(handler) \
+ * l.ori r3,r0,0x1 ;\
+ * l.mtspr r0,r3,SPR_SR ;\
+ * l.movhi r3,hi(0xf0000100) ;\
+ * l.ori r3,r3,lo(0xf0000100) ;\
+ * l.jr r3 ;\
+ * l.nop 1
+ *
+ * #endif
+ */
+
+/* DSCR: this is the same as EXCEPTION_HANDLE(), we are just
+ * a bit more carefull (if we have a PT_SP or current pointer
+ * corruption) and set them up from 'current_set'
+ *
+ */
+#define UNHANDLED_EXCEPTION(handler) \
+ EXCEPTION_T_STORE_GPR31 ;\
+ EXCEPTION_T_STORE_GPR10 ;\
+ EXCEPTION_T_STORE_SP ;\
+ /* temporary store r3, r9 into r1, r10 */ ;\
+ l.addi r1,r3,0x0 ;\
+ l.addi r10,r9,0x0 ;\
+ /* the string referenced by r3 must be low enough */ ;\
+ l.jal _emergency_print ;\
+ l.ori r3,r0,lo(_string_unhandled_exception) ;\
+ l.mfspr r3,r0,SPR_NPC ;\
+ l.jal _emergency_print_nr ;\
+ l.andi r3,r3,0x1f00 ;\
+ /* the string referenced by r3 must be low enough */ ;\
+ l.jal _emergency_print ;\
+ l.ori r3,r0,lo(_string_epc_prefix) ;\
+ l.jal _emergency_print_nr ;\
+ l.mfspr r3,r0,SPR_EPCR_BASE ;\
+ l.jal _emergency_print ;\
+ l.ori r3,r0,lo(_string_nl) ;\
+ /* end of printing */ ;\
+ l.addi r3,r1,0x0 ;\
+ l.addi r9,r10,0x0 ;\
+ /* extract current, ksp from current_set */ ;\
+ LOAD_SYMBOL_2_GPR(r1,_unhandled_stack_top) ;\
+ LOAD_SYMBOL_2_GPR(r10,init_thread_union) ;\
+ /* create new stack frame, save only needed gprs */ ;\
+ /* r1: KSP, r10: current, r31: __pa(KSP) */ ;\
+ /* r12: temp, syscall indicator, r13 temp */ ;\
+ l.addi r1,r1,-(INT_FRAME_SIZE) ;\
+ /* r1 is KSP, r31 is __pa(KSP) */ ;\
+ tophys (r31,r1) ;\
+ l.sw PT_GPR12(r31),r12 ;\
+ l.mfspr r12,r0,SPR_EPCR_BASE ;\
+ l.sw PT_PC(r31),r12 ;\
+ l.mfspr r12,r0,SPR_ESR_BASE ;\
+ l.sw PT_SR(r31),r12 ;\
+ /* save r31 */ ;\
+ EXCEPTION_T_LOAD_GPR31(r12) ;\
+ l.sw PT_GPR31(r31),r12 ;\
+ /* save r10 as was prior to exception */ ;\
+ EXCEPTION_T_LOAD_GPR10(r12) ;\
+ l.sw PT_GPR10(r31),r12 ;\
+ /* save PT_SP as was prior to exception */ ;\
+ EXCEPTION_T_LOAD_SP(r12) ;\
+ l.sw PT_SP(r31),r12 ;\
+ l.sw PT_GPR13(r31),r13 ;\
+ /* --> */ ;\
+ /* save exception r4, set r4 = EA */ ;\
+ l.sw PT_GPR4(r31),r4 ;\
+ l.mfspr r4,r0,SPR_EEAR_BASE ;\
+ /* r12 == 1 if we come from syscall */ ;\
+ CLEAR_GPR(r12) ;\
+ /* ----- play a MMU trick ----- */ ;\
+ l.ori r31,r0,(EXCEPTION_SR) ;\
+ l.mtspr r0,r31,SPR_ESR_BASE ;\
+ /* r31: EA address of handler */ ;\
+ LOAD_SYMBOL_2_GPR(r31,handler) ;\
+ l.mtspr r0,r31,SPR_EPCR_BASE ;\
+ l.rfe
+
+/* =====================================================[ exceptions] === */
+
+/* ---[ 0x100: RESET exception ]----------------------------------------- */
+ .org 0x100
+ /* Jump to .init code at _start which lives in the .head section
+ * and will be discarded after boot.
+ */
+ LOAD_SYMBOL_2_GPR(r4, _start)
+ tophys (r3,r4) /* MMU disabled */
+ l.jr r3
+ l.nop
+
+/* ---[ 0x200: BUS exception ]------------------------------------------- */
+ .org 0x200
+_dispatch_bus_fault:
+ EXCEPTION_HANDLE(_bus_fault_handler)
+
+/* ---[ 0x300: Data Page Fault exception ]------------------------------- */
+ .org 0x300
+_dispatch_do_dpage_fault:
+// totaly disable timer interrupt
+// l.mtspr r0,r0,SPR_TTMR
+// DEBUG_TLB_PROBE(0x300)
+// EXCEPTION_DEBUG_VALUE_ER_ENABLED(0x300)
+ EXCEPTION_HANDLE(_data_page_fault_handler)
+
+/* ---[ 0x400: Insn Page Fault exception ]------------------------------- */
+ .org 0x400
+_dispatch_do_ipage_fault:
+// totaly disable timer interrupt
+// l.mtspr r0,r0,SPR_TTMR
+// DEBUG_TLB_PROBE(0x400)
+// EXCEPTION_DEBUG_VALUE_ER_ENABLED(0x400)
+ EXCEPTION_HANDLE(_insn_page_fault_handler)
+
+/* ---[ 0x500: Timer exception ]----------------------------------------- */
+ .org 0x500
+ EXCEPTION_HANDLE(_timer_handler)
+
+/* ---[ 0x600: Aligment exception ]-------------------------------------- */
+ .org 0x600
+ EXCEPTION_HANDLE(_alignment_handler)
+
+/* ---[ 0x700: Illegal insn exception ]---------------------------------- */
+ .org 0x700
+ EXCEPTION_HANDLE(_illegal_instruction_handler)
+
+/* ---[ 0x800: External interrupt exception ]---------------------------- */
+ .org 0x800
+ EXCEPTION_HANDLE(_external_irq_handler)
+
+/* ---[ 0x900: DTLB miss exception ]------------------------------------- */
+ .org 0x900
+ l.j boot_dtlb_miss_handler
+ l.nop
+
+/* ---[ 0xa00: ITLB miss exception ]------------------------------------- */
+ .org 0xa00
+ l.j boot_itlb_miss_handler
+ l.nop
+
+/* ---[ 0xb00: Range exception ]----------------------------------------- */
+ .org 0xb00
+ UNHANDLED_EXCEPTION(_vector_0xb00)
+
+/* ---[ 0xc00: Syscall exception ]--------------------------------------- */
+ .org 0xc00
+ EXCEPTION_HANDLE(_sys_call_handler)
+
+/* ---[ 0xd00: Trap exception ]------------------------------------------ */
+ .org 0xd00
+ UNHANDLED_EXCEPTION(_vector_0xd00)
+
+/* ---[ 0xe00: Trap exception ]------------------------------------------ */
+ .org 0xe00
+// UNHANDLED_EXCEPTION(_vector_0xe00)
+ EXCEPTION_HANDLE(_trap_handler)
+
+/* ---[ 0xf00: Reserved exception ]-------------------------------------- */
+ .org 0xf00
+ UNHANDLED_EXCEPTION(_vector_0xf00)
+
+/* ---[ 0x1000: Reserved exception ]------------------------------------- */
+ .org 0x1000
+ UNHANDLED_EXCEPTION(_vector_0x1000)
+
+/* ---[ 0x1100: Reserved exception ]------------------------------------- */
+ .org 0x1100
+ UNHANDLED_EXCEPTION(_vector_0x1100)
+
+/* ---[ 0x1200: Reserved exception ]------------------------------------- */
+ .org 0x1200
+ UNHANDLED_EXCEPTION(_vector_0x1200)
+
+/* ---[ 0x1300: Reserved exception ]------------------------------------- */
+ .org 0x1300
+ UNHANDLED_EXCEPTION(_vector_0x1300)
+
+/* ---[ 0x1400: Reserved exception ]------------------------------------- */
+ .org 0x1400
+ UNHANDLED_EXCEPTION(_vector_0x1400)
+
+/* ---[ 0x1500: Reserved exception ]------------------------------------- */
+ .org 0x1500
+ UNHANDLED_EXCEPTION(_vector_0x1500)
+
+/* ---[ 0x1600: Reserved exception ]------------------------------------- */
+ .org 0x1600
+ UNHANDLED_EXCEPTION(_vector_0x1600)
+
+/* ---[ 0x1700: Reserved exception ]------------------------------------- */
+ .org 0x1700
+ UNHANDLED_EXCEPTION(_vector_0x1700)
+
+/* ---[ 0x1800: Reserved exception ]------------------------------------- */
+ .org 0x1800
+ UNHANDLED_EXCEPTION(_vector_0x1800)
+
+/* ---[ 0x1900: Reserved exception ]------------------------------------- */
+ .org 0x1900
+ UNHANDLED_EXCEPTION(_vector_0x1900)
+
+/* ---[ 0x1a00: Reserved exception ]------------------------------------- */
+ .org 0x1a00
+ UNHANDLED_EXCEPTION(_vector_0x1a00)
+
+/* ---[ 0x1b00: Reserved exception ]------------------------------------- */
+ .org 0x1b00
+ UNHANDLED_EXCEPTION(_vector_0x1b00)
+
+/* ---[ 0x1c00: Reserved exception ]------------------------------------- */
+ .org 0x1c00
+ UNHANDLED_EXCEPTION(_vector_0x1c00)
+
+/* ---[ 0x1d00: Reserved exception ]------------------------------------- */
+ .org 0x1d00
+ UNHANDLED_EXCEPTION(_vector_0x1d00)
+
+/* ---[ 0x1e00: Reserved exception ]------------------------------------- */
+ .org 0x1e00
+ UNHANDLED_EXCEPTION(_vector_0x1e00)
+
+/* ---[ 0x1f00: Reserved exception ]------------------------------------- */
+ .org 0x1f00
+ UNHANDLED_EXCEPTION(_vector_0x1f00)
+
+ .org 0x2000
+/* ===================================================[ kernel start ]=== */
+
+/* .text*/
+
+/* This early stuff belongs in HEAD, but some of the functions below definitely
+ * don't... */
+
+ __HEAD
+ .global _start
+_start:
+ /*
+ * ensure a deterministic start
+ */
+
+ l.ori r3,r0,0x1
+ l.mtspr r0,r3,SPR_SR
+
+ CLEAR_GPR(r1)
+ CLEAR_GPR(r2)
+ CLEAR_GPR(r3)
+ CLEAR_GPR(r4)
+ CLEAR_GPR(r5)
+ CLEAR_GPR(r6)
+ CLEAR_GPR(r7)
+ CLEAR_GPR(r8)
+ CLEAR_GPR(r9)
+ CLEAR_GPR(r10)
+ CLEAR_GPR(r11)
+ CLEAR_GPR(r12)
+ CLEAR_GPR(r13)
+ CLEAR_GPR(r14)
+ CLEAR_GPR(r15)
+ CLEAR_GPR(r16)
+ CLEAR_GPR(r17)
+ CLEAR_GPR(r18)
+ CLEAR_GPR(r19)
+ CLEAR_GPR(r20)
+ CLEAR_GPR(r21)
+ CLEAR_GPR(r22)
+ CLEAR_GPR(r23)
+ CLEAR_GPR(r24)
+ CLEAR_GPR(r25)
+ CLEAR_GPR(r26)
+ CLEAR_GPR(r27)
+ CLEAR_GPR(r28)
+ CLEAR_GPR(r29)
+ CLEAR_GPR(r30)
+ CLEAR_GPR(r31)
+
+ /*
+ * set up initial ksp and current
+ */
+ LOAD_SYMBOL_2_GPR(r1,init_thread_union+0x2000) // setup kernel stack
+ LOAD_SYMBOL_2_GPR(r10,init_thread_union) // setup current
+ tophys (r31,r10)
+ l.sw TI_KSP(r31), r1
+
+ l.ori r4,r0,0x0
+
+
+ /*
+ * .data contains initialized data,
+ * .bss contains uninitialized data - clear it up
+ */
+clear_bss:
+ LOAD_SYMBOL_2_GPR(r24, __bss_start)
+ LOAD_SYMBOL_2_GPR(r26, _end)
+ tophys(r28,r24)
+ tophys(r30,r26)
+ CLEAR_GPR(r24)
+ CLEAR_GPR(r26)
+1:
+ l.sw (0)(r28),r0
+ l.sfltu r28,r30
+ l.bf 1b
+ l.addi r28,r28,4
+
+enable_ic:
+ l.jal _ic_enable
+ l.nop
+
+enable_dc:
+ l.jal _dc_enable
+ l.nop
+
+flush_tlb:
+ /*
+ * I N V A L I D A T E T L B e n t r i e s
+ */
+ LOAD_SYMBOL_2_GPR(r5,SPR_DTLBMR_BASE(0))
+ LOAD_SYMBOL_2_GPR(r6,SPR_ITLBMR_BASE(0))
+ l.addi r7,r0,128 /* Maximum number of sets */
+1:
+ l.mtspr r5,r0,0x0
+ l.mtspr r6,r0,0x0
+
+ l.addi r5,r5,1
+ l.addi r6,r6,1
+ l.sfeq r7,r0
+ l.bnf 1b
+ l.addi r7,r7,-1
+
+
+/* The MMU needs to be enabled before or32_early_setup is called */
+
+enable_mmu:
+ /*
+ * enable dmmu & immu
+ * SR[5] = 0, SR[6] = 0, 6th and 7th bit of SR set to 0
+ */
+ l.mfspr r30,r0,SPR_SR
+ l.movhi r28,hi(SPR_SR_DME | SPR_SR_IME)
+ l.ori r28,r28,lo(SPR_SR_DME | SPR_SR_IME)
+ l.or r30,r30,r28
+ l.mtspr r0,r30,SPR_SR
+ l.nop
+ l.nop
+ l.nop
+ l.nop
+ l.nop
+ l.nop
+ l.nop
+ l.nop
+ l.nop
+ l.nop
+ l.nop
+ l.nop
+ l.nop
+ l.nop
+ l.nop
+ l.nop
+
+ // reset the simulation counters
+ l.nop 5
+
+ LOAD_SYMBOL_2_GPR(r24, or32_early_setup)
+ l.jalr r24
+ l.nop
+
+clear_regs:
+ /*
+ * clear all GPRS to increase determinism
+ */
+ CLEAR_GPR(r2)
+ CLEAR_GPR(r3)
+ CLEAR_GPR(r4)
+ CLEAR_GPR(r5)
+ CLEAR_GPR(r6)
+ CLEAR_GPR(r7)
+ CLEAR_GPR(r8)
+ CLEAR_GPR(r9)
+ CLEAR_GPR(r11)
+ CLEAR_GPR(r12)
+ CLEAR_GPR(r13)
+ CLEAR_GPR(r14)
+ CLEAR_GPR(r15)
+ CLEAR_GPR(r16)
+ CLEAR_GPR(r17)
+ CLEAR_GPR(r18)
+ CLEAR_GPR(r19)
+ CLEAR_GPR(r20)
+ CLEAR_GPR(r21)
+ CLEAR_GPR(r22)
+ CLEAR_GPR(r23)
+ CLEAR_GPR(r24)
+ CLEAR_GPR(r25)
+ CLEAR_GPR(r26)
+ CLEAR_GPR(r27)
+ CLEAR_GPR(r28)
+ CLEAR_GPR(r29)
+ CLEAR_GPR(r30)
+ CLEAR_GPR(r31)
+
+jump_start_kernel:
+ /*
+ * jump to kernel entry (start_kernel)
+ */
+ LOAD_SYMBOL_2_GPR(r30, start_kernel)
+ l.jr r30
+ l.nop
+
+/* ========================================[ cache ]=== */
+
+ /* aligment here so we don't change memory offsets with
+ * memory controler defined
+ */
+ .align 0x2000
+
+_ic_enable:
+ /* Check if IC present and skip enabling otherwise */
+ l.mfspr r24,r0,SPR_UPR
+ l.andi r26,r24,SPR_UPR_ICP
+ l.sfeq r26,r0
+ l.bf 9f
+ l.nop
+
+ /* Disable IC */
+ l.mfspr r6,r0,SPR_SR
+ l.addi r5,r0,-1
+ l.xori r5,r5,SPR_SR_ICE
+ l.and r5,r6,r5
+ l.mtspr r0,r5,SPR_SR
+
+ /* Establish cache block size
+ If BS=0, 16;
+ If BS=1, 32;
+ r14 contain block size
+ */
+ l.mfspr r24,r0,SPR_ICCFGR
+ l.andi r26,r24,SPR_ICCFGR_CBS
+ l.srli r28,r26,7
+ l.ori r30,r0,16
+ l.sll r14,r30,r28
+
+ /* Establish number of cache sets
+ r16 contains number of cache sets
+ r28 contains log(# of cache sets)
+ */
+ l.andi r26,r24,SPR_ICCFGR_NCS
+ l.srli r28,r26,3
+ l.ori r30,r0,1
+ l.sll r16,r30,r28
+
+ /* Invalidate IC */
+ l.addi r6,r0,0
+ l.sll r5,r14,r28
+// l.mul r5,r14,r16
+// l.trap 1
+// l.addi r5,r0,IC_SIZE
+1:
+ l.mtspr r0,r6,SPR_ICBIR
+ l.sfne r6,r5
+ l.bf 1b
+ l.add r6,r6,r14
+ // l.addi r6,r6,IC_LINE
+
+ /* Enable IC */
+ l.mfspr r6,r0,SPR_SR
+ l.ori r6,r6,SPR_SR_ICE
+ l.mtspr r0,r6,SPR_SR
+ l.nop
+ l.nop
+ l.nop
+ l.nop
+ l.nop
+ l.nop
+ l.nop
+ l.nop
+ l.nop
+ l.nop
+9:
+ l.jr r9
+ l.nop
+
+_dc_enable:
+ /* Check if DC present and skip enabling otherwise */
+ l.mfspr r24,r0,SPR_UPR
+ l.andi r26,r24,SPR_UPR_DCP
+ l.sfeq r26,r0
+ l.bf 9f
+ l.nop
+
+ /* Disable DC */
+ l.mfspr r6,r0,SPR_SR
+ l.addi r5,r0,-1
+ l.xori r5,r5,SPR_SR_DCE
+ l.and r5,r6,r5
+ l.mtspr r0,r5,SPR_SR
+
+ /* Establish cache block size
+ If BS=0, 16;
+ If BS=1, 32;
+ r14 contain block size
+ */
+ l.mfspr r24,r0,SPR_DCCFGR
+ l.andi r26,r24,SPR_DCCFGR_CBS
+ l.srli r28,r26,7
+ l.ori r30,r0,16
+ l.sll r14,r30,r28
+
+ /* Establish number of cache sets
+ r16 contains number of cache sets
+ r28 contains log(# of cache sets)
+ */
+ l.andi r26,r24,SPR_DCCFGR_NCS
+ l.srli r28,r26,3
+ l.ori r30,r0,1
+ l.sll r16,r30,r28
+
+ /* Invalidate DC */
+ l.addi r6,r0,0
+ l.sll r5,r14,r28
+1:
+ l.mtspr r0,r6,SPR_DCBIR
+ l.sfne r6,r5
+ l.bf 1b
+ l.add r6,r6,r14
+
+ /* Enable DC */
+ l.mfspr r6,r0,SPR_SR
+ l.ori r6,r6,SPR_SR_DCE
+ l.mtspr r0,r6,SPR_SR
+9:
+ l.jr r9
+ l.nop
+
+/* ===============================================[ page table masks ]=== */
+
+/* bit 4 is used in hardware as write back cache bit. we never use this bit
+ * explicitly, so we can reuse it as _PAGE_FILE bit and mask it out when
+ * writing into hardware pte's
+ */
+
+#define DTLB_UP_CONVERT_MASK 0x3fa
+#define ITLB_UP_CONVERT_MASK 0x3a
+
+/* for SMP we'd have (this is a bit subtle, CC must be always set
+ * for SMP, but since we have _PAGE_PRESENT bit always defined
+ * we can just modify the mask)
+ */
+#define DTLB_SMP_CONVERT_MASK 0x3fb
+#define ITLB_SMP_CONVERT_MASK 0x3b
+
+/* ---[ boot dtlb miss handler ]----------------------------------------- */
+
+boot_dtlb_miss_handler:
+
+/* mask for DTLB_MR register: - (0) sets V (valid) bit,
+ * - (31-12) sets bits belonging to VPN (31-12)
+ */
+#define DTLB_MR_MASK 0xfffff001
+
+/* mask for DTLB_TR register: - (2) sets CI (cache inhibit) bit,
+ * - (4) sets A (access) bit,
+ * - (5) sets D (dirty) bit,
+ * - (8) sets SRE (superuser read) bit
+ * - (9) sets SWE (superuser write) bit
+ * - (31-12) sets bits belonging to VPN (31-12)
+ */
+#define DTLB_TR_MASK 0xfffff332
+
+/* These are for masking out the VPN/PPN value from the MR/TR registers...
+ * it's not the same as the PFN */
+#define VPN_MASK 0xfffff000
+#define PPN_MASK 0xfffff000
+
+
+ EXCEPTION_STORE_GPR6
+
+#if 0
+ l.mfspr r6,r0,SPR_ESR_BASE //
+ l.andi r6,r6,SPR_SR_SM // are we in kernel mode ?
+ l.sfeqi r6,0 // r6 == 0x1 --> SM
+ l.bf exit_with_no_dtranslation //
+ l.nop
+#endif
+
+ /* this could be optimized by moving storing of
+ * non r6 registers here, and jumping r6 restore
+ * if not in supervisor mode
+ */
+
+ EXCEPTION_STORE_GPR2
+ EXCEPTION_STORE_GPR3
+ EXCEPTION_STORE_GPR4
+ EXCEPTION_STORE_GPR5
+
+ l.mfspr r4,r0,SPR_EEAR_BASE // get the offending EA
+
+immediate_translation:
+ CLEAR_GPR(r6)
+
+ l.srli r3,r4,0xd // r3 <- r4 / 8192 (sets are relative to page size (8Kb) NOT VPN size (4Kb)
+
+ l.mfspr r6, r0, SPR_DMMUCFGR
+ l.andi r6, r6, SPR_DMMUCFGR_NTS
+ l.srli r6, r6, SPR_DMMUCFGR_NTS_OFF
+ l.ori r5, r0, 0x1
+ l.sll r5, r5, r6 // r5 = number DMMU sets
+ l.addi r6, r5, -1 // r6 = nsets mask
+ l.and r2, r3, r6 // r2 <- r3 % NSETS_MASK
+
+ l.or r6,r6,r4 // r6 <- r4
+ l.ori r6,r6,~(VPN_MASK) // r6 <- VPN :VPN .xfff - clear up lo(r6) to 0x**** *fff
+ l.movhi r5,hi(DTLB_MR_MASK) // r5 <- ffff:0000.x000
+ l.ori r5,r5,lo(DTLB_MR_MASK) // r5 <- ffff:1111.x001 - apply DTLB_MR_MASK
+ l.and r5,r5,r6 // r5 <- VPN :VPN .x001 - we have DTLBMR entry
+ l.mtspr r2,r5,SPR_DTLBMR_BASE(0) // set DTLBMR
+
+ /* set up DTLB with no translation for EA <= 0xbfffffff */
+ LOAD_SYMBOL_2_GPR(r6,0xbfffffff)
+ l.sfgeu r6,r4 // flag if r6 >= r4 (if 0xbfffffff >= EA)
+ l.bf 1f // goto out
+ l.and r3,r4,r4 // delay slot :: 24 <- r4 (if flag==1)
+
+ tophys(r3,r4) // r3 <- PA
+1:
+ l.ori r3,r3,~(PPN_MASK) // r3 <- PPN :PPN .xfff - clear up lo(r6) to 0x**** *fff
+ l.movhi r5,hi(DTLB_TR_MASK) // r5 <- ffff:0000.x000
+ l.ori r5,r5,lo(DTLB_TR_MASK) // r5 <- ffff:1111.x330 - apply DTLB_MR_MASK
+ l.and r5,r5,r3 // r5 <- PPN :PPN .x330 - we have DTLBTR entry
+ l.mtspr r2,r5,SPR_DTLBTR_BASE(0) // set DTLBTR
+
+ EXCEPTION_LOAD_GPR6
+ EXCEPTION_LOAD_GPR5
+ EXCEPTION_LOAD_GPR4
+ EXCEPTION_LOAD_GPR3
+ EXCEPTION_LOAD_GPR2
+
+ l.rfe // SR <- ESR, PC <- EPC
+
+exit_with_no_dtranslation:
+ /* EA out of memory or not in supervisor mode */
+ EXCEPTION_LOAD_GPR6
+ EXCEPTION_LOAD_GPR4
+ l.j _dispatch_bus_fault
+
+/* ---[ boot itlb miss handler ]----------------------------------------- */
+
+boot_itlb_miss_handler:
+
+/* mask for ITLB_MR register: - sets V (valid) bit,
+ * - sets bits belonging to VPN (15-12)
+ */
+#define ITLB_MR_MASK 0xfffff001
+
+/* mask for ITLB_TR register: - sets A (access) bit,
+ * - sets SXE (superuser execute) bit
+ * - sets bits belonging to VPN (15-12)
+ */
+#define ITLB_TR_MASK 0xfffff050
+
+/*
+#define VPN_MASK 0xffffe000
+#define PPN_MASK 0xffffe000
+*/
+
+
+
+ EXCEPTION_STORE_GPR2
+ EXCEPTION_STORE_GPR3
+ EXCEPTION_STORE_GPR4
+ EXCEPTION_STORE_GPR5
+ EXCEPTION_STORE_GPR6
+
+#if 0
+ l.mfspr r6,r0,SPR_ESR_BASE //
+ l.andi r6,r6,SPR_SR_SM // are we in kernel mode ?
+ l.sfeqi r6,0 // r6 == 0x1 --> SM
+ l.bf exit_with_no_itranslation
+ l.nop
+#endif
+
+
+ l.mfspr r4,r0,SPR_EEAR_BASE // get the offending EA
+
+earlyearly:
+ CLEAR_GPR(r6)
+
+ l.srli r3,r4,0xd // r3 <- r4 / 8192 (sets are relative to page size (8Kb) NOT VPN size (4Kb)
+
+ l.mfspr r6, r0, SPR_IMMUCFGR
+ l.andi r6, r6, SPR_IMMUCFGR_NTS
+ l.srli r6, r6, SPR_IMMUCFGR_NTS_OFF
+ l.ori r5, r0, 0x1
+ l.sll r5, r5, r6 // r5 = number IMMU sets from IMMUCFGR
+ l.addi r6, r5, -1 // r6 = nsets mask
+ l.and r2, r3, r6 // r2 <- r3 % NSETS_MASK
+
+ l.or r6,r6,r4 // r6 <- r4
+ l.ori r6,r6,~(VPN_MASK) // r6 <- VPN :VPN .xfff - clear up lo(r6) to 0x**** *fff
+ l.movhi r5,hi(ITLB_MR_MASK) // r5 <- ffff:0000.x000
+ l.ori r5,r5,lo(ITLB_MR_MASK) // r5 <- ffff:1111.x001 - apply ITLB_MR_MASK
+ l.and r5,r5,r6 // r5 <- VPN :VPN .x001 - we have ITLBMR entry
+ l.mtspr r2,r5,SPR_ITLBMR_BASE(0) // set ITLBMR
+
+ /*
+ * set up ITLB with no translation for EA <= 0x0fffffff
+ *
+ * we need this for head.S mapping (EA = PA). if we move all functions
+ * which run with mmu enabled into entry.S, we might be able to eliminate this.
+ *
+ */
+ LOAD_SYMBOL_2_GPR(r6,0x0fffffff)
+ l.sfgeu r6,r4 // flag if r6 >= r4 (if 0xb0ffffff >= EA)
+ l.bf 1f // goto out
+ l.and r3,r4,r4 // delay slot :: 24 <- r4 (if flag==1)
+
+ tophys(r3,r4) // r3 <- PA
+1:
+ l.ori r3,r3,~(PPN_MASK) // r3 <- PPN :PPN .xfff - clear up lo(r6) to 0x**** *fff
+ l.movhi r5,hi(ITLB_TR_MASK) // r5 <- ffff:0000.x000
+ l.ori r5,r5,lo(ITLB_TR_MASK) // r5 <- ffff:1111.x050 - apply ITLB_MR_MASK
+ l.and r5,r5,r3 // r5 <- PPN :PPN .x050 - we have ITLBTR entry
+ l.mtspr r2,r5,SPR_ITLBTR_BASE(0) // set ITLBTR
+
+ EXCEPTION_LOAD_GPR6
+ EXCEPTION_LOAD_GPR5
+ EXCEPTION_LOAD_GPR4
+ EXCEPTION_LOAD_GPR3
+ EXCEPTION_LOAD_GPR2
+
+ l.rfe // SR <- ESR, PC <- EPC
+
+exit_with_no_itranslation:
+ EXCEPTION_LOAD_GPR4
+ EXCEPTION_LOAD_GPR6
+ l.j _dispatch_bus_fault
+ l.nop
+
+/* ====================================================================== */
+/*
+ * Stuff below here shouldn't go into .head section... maybe this stuff
+ * can be moved to entry.S ???
+ */
+
+/* ==============================================[ DTLB miss handler ]=== */
+
+/*
+ * Comments:
+ * Exception handlers are entered with MMU off so the following handler
+ * needs to use physical addressing
+ *
+ */
+
+ .text
+ENTRY(dtlb_miss_handler)
+ EXCEPTION_STORE_GPR2
+ EXCEPTION_STORE_GPR3
+ EXCEPTION_STORE_GPR4
+ EXCEPTION_STORE_GPR5
+ EXCEPTION_STORE_GPR6
+ /*
+ * get EA of the miss
+ */
+ l.mfspr r2,r0,SPR_EEAR_BASE
+ /*
+ * pmd = (pmd_t *)(current_pgd + pgd_index(daddr));
+ */
+ GET_CURRENT_PGD(r3,r5) // r3 is current_pgd, r5 is temp
+ l.srli r4,r2,0x18 // >> PAGE_SHIFT + (PAGE_SHIFT - 2)
+ l.slli r4,r4,0x2 // to get address << 2
+ l.add r5,r4,r3 // r4 is pgd_index(daddr)
+ /*
+ * if (pmd_none(*pmd))
+ * goto pmd_none:
+ */
+ tophys (r4,r5)
+ l.lwz r3,0x0(r4) // get *pmd value
+ l.sfne r3,r0
+ l.bnf d_pmd_none
+ l.andi r3,r3,~PAGE_MASK //0x1fff // ~PAGE_MASK
+ /*
+ * if (pmd_bad(*pmd))
+ * pmd_clear(pmd)
+ * goto pmd_bad:
+ */
+// l.sfeq r3,r0 // check *pmd value
+// l.bf d_pmd_good
+ l.addi r3,r0,0xffffe000 // PAGE_MASK
+// l.j d_pmd_bad
+// l.sw 0x0(r4),r0 // clear pmd
+d_pmd_good:
+ /*
+ * pte = *pte_offset(pmd, daddr);
+ */
+ l.lwz r4,0x0(r4) // get **pmd value
+ l.and r4,r4,r3 // & PAGE_MASK
+ l.srli r5,r2,0xd // >> PAGE_SHIFT, r2 == EEAR
+ l.andi r3,r5,0x7ff // (1UL << PAGE_SHIFT - 2) - 1
+ l.slli r3,r3,0x2 // to get address << 2
+ l.add r3,r3,r4
+ l.lwz r2,0x0(r3) // this is pte at last
+ /*
+ * if (!pte_present(pte))
+ */
+ l.andi r4,r2,0x1
+ l.sfne r4,r0 // is pte present
+ l.bnf d_pte_not_present
+ l.addi r3,r0,0xffffe3fa // PAGE_MASK | DTLB_UP_CONVERT_MASK
+ /*
+ * fill DTLB TR register
+ */
+ l.and r4,r2,r3 // apply the mask
+ // Determine number of DMMU sets
+ l.mfspr r6, r0, SPR_DMMUCFGR
+ l.andi r6, r6, SPR_DMMUCFGR_NTS
+ l.srli r6, r6, SPR_DMMUCFGR_NTS_OFF
+ l.ori r3, r0, 0x1
+ l.sll r3, r3, r6 // r3 = number DMMU sets DMMUCFGR
+ l.addi r6, r3, -1 // r6 = nsets mask
+ l.and r5, r5, r6 // calc offset: & (NUM_TLB_ENTRIES-1)
+ //NUM_TLB_ENTRIES
+ l.mtspr r5,r4,SPR_DTLBTR_BASE(0)
+ /*
+ * fill DTLB MR register
+ */
+ l.mfspr r2,r0,SPR_EEAR_BASE
+ l.addi r3,r0,0xffffe000 // PAGE_MASK
+ l.and r4,r2,r3 // apply PAGE_MASK to EA (__PHX__ do we really need this?)
+ l.ori r4,r4,0x1 // set hardware valid bit: DTBL_MR entry
+ l.mtspr r5,r4,SPR_DTLBMR_BASE(0)
+
+ EXCEPTION_LOAD_GPR2
+ EXCEPTION_LOAD_GPR3
+ EXCEPTION_LOAD_GPR4
+ EXCEPTION_LOAD_GPR5
+ EXCEPTION_LOAD_GPR6
+ l.rfe
+d_pmd_bad:
+ l.nop 1
+ EXCEPTION_LOAD_GPR2
+ EXCEPTION_LOAD_GPR3
+ EXCEPTION_LOAD_GPR4
+ EXCEPTION_LOAD_GPR5
+ EXCEPTION_LOAD_GPR6
+ l.rfe
+d_pmd_none:
+d_pte_not_present:
+ EXCEPTION_LOAD_GPR2
+ EXCEPTION_LOAD_GPR3
+ EXCEPTION_LOAD_GPR4
+ EXCEPTION_LOAD_GPR5
+ EXCEPTION_LOAD_GPR6
+ l.j _dispatch_do_dpage_fault
+ l.nop
+
+/* ==============================================[ ITLB miss handler ]=== */
+ENTRY(itlb_miss_handler)
+ EXCEPTION_STORE_GPR2
+ EXCEPTION_STORE_GPR3
+ EXCEPTION_STORE_GPR4
+ EXCEPTION_STORE_GPR5
+ EXCEPTION_STORE_GPR6
+ /*
+ * get EA of the miss
+ */
+ l.mfspr r2,r0,SPR_EEAR_BASE
+
+ /*
+ * pmd = (pmd_t *)(current_pgd + pgd_index(daddr));
+ *
+ */
+ GET_CURRENT_PGD(r3,r5) // r3 is current_pgd, r5 is temp
+ l.srli r4,r2,0x18 // >> PAGE_SHIFT + (PAGE_SHIFT - 2)
+ l.slli r4,r4,0x2 // to get address << 2
+ l.add r5,r4,r3 // r4 is pgd_index(daddr)
+ /*
+ * if (pmd_none(*pmd))
+ * goto pmd_none:
+ */
+ tophys (r4,r5)
+ l.lwz r3,0x0(r4) // get *pmd value
+ l.sfne r3,r0
+ l.bnf i_pmd_none
+ l.andi r3,r3,0x1fff // ~PAGE_MASK
+ /*
+ * if (pmd_bad(*pmd))
+ * pmd_clear(pmd)
+ * goto pmd_bad:
+ */
+
+// l.sfeq r3,r0 // check *pmd value
+// l.bf i_pmd_good
+ l.addi r3,r0,0xffffe000 // PAGE_MASK
+// l.j i_pmd_bad
+// l.sw 0x0(r4),r0 // clear pmd
+
+i_pmd_good:
+ /*
+ * pte = *pte_offset(pmd, iaddr);
+ *
+ */
+ l.lwz r4,0x0(r4) // get **pmd value
+ l.and r4,r4,r3 // & PAGE_MASK
+ l.srli r5,r2,0xd // >> PAGE_SHIFT, r2 == EEAR
+ l.andi r3,r5,0x7ff // (1UL << PAGE_SHIFT - 2) - 1
+ l.slli r3,r3,0x2 // to get address << 2
+ l.add r3,r3,r4
+ l.lwz r2,0x0(r3) // this is pte at last
+ /*
+ * if (!pte_present(pte))
+ *
+ */
+ l.andi r4,r2,0x1
+ l.sfne r4,r0 // is pte present
+ l.bnf i_pte_not_present
+ l.addi r3,r0,0xffffe03a // PAGE_MASK | ITLB_UP_CONVERT_MASK
+ /*
+ * fill ITLB TR register
+ */
+ l.and r4,r2,r3 // apply the mask
+ l.andi r3,r2,0x7c0 // _PAGE_EXEC | _PAGE_SRE | _PAGE_SWE | _PAGE_URE | _PAGE_UWE
+// l.andi r3,r2,0x400 // _PAGE_EXEC
+ l.sfeq r3,r0
+ l.bf itlb_tr_fill //_workaround
+ // Determine number of IMMU sets
+ l.mfspr r6, r0, SPR_IMMUCFGR
+ l.andi r6, r6, SPR_IMMUCFGR_NTS
+ l.srli r6, r6, SPR_IMMUCFGR_NTS_OFF
+ l.ori r3, r0, 0x1
+ l.sll r3, r3, r6 // r3 = number IMMU sets IMMUCFGR
+ l.addi r6, r3, -1 // r6 = nsets mask
+ l.and r5, r5, r6 // calc offset: & (NUM_TLB_ENTRIES-1)
+
+/*
+ * __PHX__ :: fixme
+ * we should not just blindly set executable flags,
+ * but it does help with ping. the clean way would be to find out
+ * (and fix it) why stack doesn't have execution permissions
+ */
+
+itlb_tr_fill_workaround:
+ l.ori r4,r4,0xc0 // | (SPR_ITLBTR_UXE | ITLBTR_SXE)
+itlb_tr_fill:
+ l.mtspr r5,r4,SPR_ITLBTR_BASE(0)
+ /*
+ * fill DTLB MR register
+ */
+ l.mfspr r2,r0,SPR_EEAR_BASE
+ l.addi r3,r0,0xffffe000 // PAGE_MASK
+ l.and r4,r2,r3 // apply PAGE_MASK to EA (__PHX__ do we really need this?)
+ l.ori r4,r4,0x1 // set hardware valid bit: DTBL_MR entry
+ l.mtspr r5,r4,SPR_ITLBMR_BASE(0)
+
+ EXCEPTION_LOAD_GPR2
+ EXCEPTION_LOAD_GPR3
+ EXCEPTION_LOAD_GPR4
+ EXCEPTION_LOAD_GPR5
+ EXCEPTION_LOAD_GPR6
+ l.rfe
+
+i_pmd_bad:
+ l.nop 1
+ EXCEPTION_LOAD_GPR2
+ EXCEPTION_LOAD_GPR3
+ EXCEPTION_LOAD_GPR4
+ EXCEPTION_LOAD_GPR5
+ EXCEPTION_LOAD_GPR6
+ l.rfe
+i_pmd_none:
+i_pte_not_present:
+ EXCEPTION_LOAD_GPR2
+ EXCEPTION_LOAD_GPR3
+ EXCEPTION_LOAD_GPR4
+ EXCEPTION_LOAD_GPR5
+ EXCEPTION_LOAD_GPR6
+ l.j _dispatch_do_ipage_fault
+ l.nop
+
+/* ==============================================[ boot tlb handlers ]=== */
+
+
+/* =================================================[ debugging aids ]=== */
+
+ .align 64
+_immu_trampoline:
+ .space 64
+_immu_trampoline_top:
+
+#define TRAMP_SLOT_0 (0x0)
+#define TRAMP_SLOT_1 (0x4)
+#define TRAMP_SLOT_2 (0x8)
+#define TRAMP_SLOT_3 (0xc)
+#define TRAMP_SLOT_4 (0x10)
+#define TRAMP_SLOT_5 (0x14)
+#define TRAMP_FRAME_SIZE (0x18)
+
+ENTRY(_immu_trampoline_workaround)
+ // r2 EEA
+ // r6 is physical EEA
+ tophys(r6,r2)
+
+ LOAD_SYMBOL_2_GPR(r5,_immu_trampoline)
+ tophys (r3,r5) // r3 is trampoline (physical)
+
+ LOAD_SYMBOL_2_GPR(r4,0x15000000)
+ l.sw TRAMP_SLOT_0(r3),r4
+ l.sw TRAMP_SLOT_1(r3),r4
+ l.sw TRAMP_SLOT_4(r3),r4
+ l.sw TRAMP_SLOT_5(r3),r4
+
+ // EPC = EEA - 0x4
+ l.lwz r4,0x0(r6) // load op @ EEA + 0x0 (fc address)
+ l.sw TRAMP_SLOT_3(r3),r4 // store it to _immu_trampoline_data
+ l.lwz r4,-0x4(r6) // load op @ EEA - 0x4 (f8 address)
+ l.sw TRAMP_SLOT_2(r3),r4 // store it to _immu_trampoline_data
+
+ l.srli r5,r4,26 // check opcode for write access
+ l.sfeqi r5,0 // l.j
+ l.bf 0f
+ l.sfeqi r5,0x11 // l.jr
+ l.bf 1f
+ l.sfeqi r5,1 // l.jal
+ l.bf 2f
+ l.sfeqi r5,0x12 // l.jalr
+ l.bf 3f
+ l.sfeqi r5,3 // l.bnf
+ l.bf 4f
+ l.sfeqi r5,4 // l.bf
+ l.bf 5f
+99:
+ l.nop
+ l.j 99b // should never happen
+ l.nop 1
+
+ // r2 is EEA
+ // r3 is trampoline address (physical)
+ // r4 is instruction
+ // r6 is physical(EEA)
+ //
+ // r5
+
+2: // l.jal
+
+ /* 19 20 aa aa l.movhi r9,0xaaaa
+ * a9 29 bb bb l.ori r9,0xbbbb
+ *
+ * where 0xaaaabbbb is EEA + 0x4 shifted right 2
+ */
+
+ l.addi r6,r2,0x4 // this is 0xaaaabbbb
+
+ // l.movhi r9,0xaaaa
+ l.ori r5,r0,0x1920 // 0x1920 == l.movhi r9
+ l.sh (TRAMP_SLOT_0+0x0)(r3),r5
+ l.srli r5,r6,16
+ l.sh (TRAMP_SLOT_0+0x2)(r3),r5
+
+ // l.ori r9,0xbbbb
+ l.ori r5,r0,0xa929 // 0xa929 == l.ori r9
+ l.sh (TRAMP_SLOT_1+0x0)(r3),r5
+ l.andi r5,r6,0xffff
+ l.sh (TRAMP_SLOT_1+0x2)(r3),r5
+
+ /* falthrough, need to set up new jump offset */
+
+
+0: // l.j
+ l.slli r6,r4,6 // original offset shifted left 6 - 2
+// l.srli r6,r6,6 // original offset shifted right 2
+
+ l.slli r4,r2,4 // old jump position: EEA shifted left 4
+// l.srli r4,r4,6 // old jump position: shifted right 2
+
+ l.addi r5,r3,0xc // new jump position (physical)
+ l.slli r5,r5,4 // new jump position: shifted left 4
+
+ // calculate new jump offset
+ // new_off = old_off + (old_jump - new_jump)
+
+ l.sub r5,r4,r5 // old_jump - new_jump
+ l.add r5,r6,r5 // orig_off + (old_jump - new_jump)
+ l.srli r5,r5,6 // new offset shifted right 2
+
+ // r5 is new jump offset
+ // l.j has opcode 0x0...
+ l.sw TRAMP_SLOT_2(r3),r5 // write it back
+
+ l.j trampoline_out
+ l.nop
+
+/* ----------------------------- */
+
+3: // l.jalr
+
+ /* 19 20 aa aa l.movhi r9,0xaaaa
+ * a9 29 bb bb l.ori r9,0xbbbb
+ *
+ * where 0xaaaabbbb is EEA + 0x4 shifted right 2
+ */
+
+ l.addi r6,r2,0x4 // this is 0xaaaabbbb
+
+ // l.movhi r9,0xaaaa
+ l.ori r5,r0,0x1920 // 0x1920 == l.movhi r9
+ l.sh (TRAMP_SLOT_0+0x0)(r3),r5
+ l.srli r5,r6,16
+ l.sh (TRAMP_SLOT_0+0x2)(r3),r5
+
+ // l.ori r9,0xbbbb
+ l.ori r5,r0,0xa929 // 0xa929 == l.ori r9
+ l.sh (TRAMP_SLOT_1+0x0)(r3),r5
+ l.andi r5,r6,0xffff
+ l.sh (TRAMP_SLOT_1+0x2)(r3),r5
+
+ l.lhz r5,(TRAMP_SLOT_2+0x0)(r3) // load hi part of jump instruction
+ l.andi r5,r5,0x3ff // clear out opcode part
+ l.ori r5,r5,0x4400 // opcode changed from l.jalr -> l.jr
+ l.sh (TRAMP_SLOT_2+0x0)(r3),r5 // write it back
+
+ /* falthrough */
+
+1: // l.jr
+ l.j trampoline_out
+ l.nop
+
+/* ----------------------------- */
+
+4: // l.bnf
+5: // l.bf
+ l.slli r6,r4,6 // original offset shifted left 6 - 2
+// l.srli r6,r6,6 // original offset shifted right 2
+
+ l.slli r4,r2,4 // old jump position: EEA shifted left 4
+// l.srli r4,r4,6 // old jump position: shifted right 2
+
+ l.addi r5,r3,0xc // new jump position (physical)
+ l.slli r5,r5,4 // new jump position: shifted left 4
+
+ // calculate new jump offset
+ // new_off = old_off + (old_jump - new_jump)
+
+ l.add r6,r6,r4 // (orig_off + old_jump)
+ l.sub r6,r6,r5 // (orig_off + old_jump) - new_jump
+ l.srli r6,r6,6 // new offset shifted right 2
+
+ // r6 is new jump offset
+ l.lwz r4,(TRAMP_SLOT_2+0x0)(r3) // load jump instruction
+ l.srli r4,r4,16
+ l.andi r4,r4,0xfc00 // get opcode part
+ l.slli r4,r4,16
+ l.or r6,r4,r6 // l.b(n)f new offset
+ l.sw TRAMP_SLOT_2(r3),r6 // write it back
+
+ /* we need to add l.j to EEA + 0x8 */
+ tophys (r4,r2) // may not be needed (due to shifts down_
+ l.addi r4,r4,(0x8 - 0x8) // jump target = r2 + 0x8 (compensate for 0x8)
+ // jump position = r5 + 0x8 (0x8 compensated)
+ l.sub r4,r4,r5 // jump offset = target - new_position + 0x8
+
+ l.slli r4,r4,4 // the amount of info in imediate of jump
+ l.srli r4,r4,6 // jump instruction with offset
+ l.sw TRAMP_SLOT_4(r3),r4 // write it to 4th slot
+
+ /* fallthrough */
+
+trampoline_out:
+ // set up new EPC to point to our trampoline code
+ LOAD_SYMBOL_2_GPR(r5,_immu_trampoline)
+ l.mtspr r0,r5,SPR_EPCR_BASE
+
+ // immu_trampoline is (4x) CACHE_LINE aligned
+ // and only 6 instructions long,
+ // so we need to invalidate only 2 lines
+
+ /* Establish cache block size
+ If BS=0, 16;
+ If BS=1, 32;
+ r14 contain block size
+ */
+ l.mfspr r21,r0,SPR_ICCFGR
+ l.andi r21,r21,SPR_ICCFGR_CBS
+ l.srli r21,r21,7
+ l.ori r23,r0,16
+ l.sll r14,r23,r21
+
+ l.mtspr r0,r5,SPR_ICBIR
+ l.add r5,r5,r14
+ l.mtspr r0,r5,SPR_ICBIR
+
+ l.jr r9
+ l.nop
+
+
+/*
+ * DSCR: prints a string referenced by r3.
+ *
+ * PRMS: r3 - address of the first character of null
+ * terminated string to be printed
+ *
+ * PREQ: UART at UART_BASE_ADD has to be initialized
+ *
+ * POST: caller should be aware that r3, r9 are changed
+ */
+ENTRY(_emergency_print)
+ EMERGENCY_PRINT_STORE_GPR4
+ EMERGENCY_PRINT_STORE_GPR5
+ EMERGENCY_PRINT_STORE_GPR6
+ EMERGENCY_PRINT_STORE_GPR7
+2:
+ l.lbz r7,0(r3)
+ l.sfeq r7,r0
+ l.bf 9f
+ l.nop
+
+// putc:
+ l.movhi r4,hi(UART_BASE_ADD)
+
+ l.addi r6,r0,0x20
+1: l.lbz r5,5(r4)
+ l.andi r5,r5,0x20
+ l.sfeq r5,r6
+ l.bnf 1b
+ l.nop
+
+ l.sb 0(r4),r7
+
+ l.addi r6,r0,0x60
+1: l.lbz r5,5(r4)
+ l.andi r5,r5,0x60
+ l.sfeq r5,r6
+ l.bnf 1b
+ l.nop
+
+ /* next character */
+ l.j 2b
+ l.addi r3,r3,0x1
+
+9:
+ EMERGENCY_PRINT_LOAD_GPR7
+ EMERGENCY_PRINT_LOAD_GPR6
+ EMERGENCY_PRINT_LOAD_GPR5
+ EMERGENCY_PRINT_LOAD_GPR4
+ l.jr r9
+ l.nop
+
+ENTRY(_emergency_print_nr)
+ EMERGENCY_PRINT_STORE_GPR4
+ EMERGENCY_PRINT_STORE_GPR5
+ EMERGENCY_PRINT_STORE_GPR6
+ EMERGENCY_PRINT_STORE_GPR7
+ EMERGENCY_PRINT_STORE_GPR8
+
+ l.addi r8,r0,32 // shift register
+
+1: /* remove leading zeros */
+ l.addi r8,r8,-0x4
+ l.srl r7,r3,r8
+ l.andi r7,r7,0xf
+
+ /* don't skip the last zero if number == 0x0 */
+ l.sfeqi r8,0x4
+ l.bf 2f
+ l.nop
+
+ l.sfeq r7,r0
+ l.bf 1b
+ l.nop
+
+2:
+ l.srl r7,r3,r8
+
+ l.andi r7,r7,0xf
+ l.sflts r8,r0
+ l.bf 9f
+
+ l.sfgtui r7,0x9
+ l.bnf 8f
+ l.nop
+ l.addi r7,r7,0x27
+
+8:
+ l.addi r7,r7,0x30
+// putc:
+ l.movhi r4,hi(UART_BASE_ADD)
+
+ l.addi r6,r0,0x20
+1: l.lbz r5,5(r4)
+ l.andi r5,r5,0x20
+ l.sfeq r5,r6
+ l.bnf 1b
+ l.nop
+
+ l.sb 0(r4),r7
+
+ l.addi r6,r0,0x60
+1: l.lbz r5,5(r4)
+ l.andi r5,r5,0x60
+ l.sfeq r5,r6
+ l.bnf 1b
+ l.nop
+
+ /* next character */
+ l.j 2b
+ l.addi r8,r8,-0x4
+
+9:
+ EMERGENCY_PRINT_LOAD_GPR8
+ EMERGENCY_PRINT_LOAD_GPR7
+ EMERGENCY_PRINT_LOAD_GPR6
+ EMERGENCY_PRINT_LOAD_GPR5
+ EMERGENCY_PRINT_LOAD_GPR4
+ l.jr r9
+ l.nop
+
+
+/*
+ * This should be used for debugging only.
+ * It messes up the Linux early serial output
+ * somehow, so use it sparingly and essentially
+ * only if you need to debug something that goes wrong
+ * before Linux gets the early serial going.
+ *
+ * Furthermore, you'll have to make sure you set the
+ * UART_DEVISOR correctly according to the system
+ * clock rate.
+ *
+ *
+ */
+
+
+
+#define SYS_CLK 20000000
+//#define SYS_CLK 1843200
+#define OR32_CONSOLE_BAUD 115200
+#define UART_DIVISOR SYS_CLK/(16*OR32_CONSOLE_BAUD)
+
+ENTRY(_early_uart_init)
+ l.movhi r3,hi(UART_BASE_ADD)
+
+ l.addi r4,r0,0x7
+ l.sb 0x2(r3),r4
+
+ l.addi r4,r0,0x0
+ l.sb 0x1(r3),r4
+
+ l.addi r4,r0,0x3
+ l.sb 0x3(r3),r4
+
+ l.lbz r5,3(r3)
+ l.ori r4,r5,0x80
+ l.sb 0x3(r3),r4
+ l.addi r4,r0,((UART_DIVISOR>>8) & 0x000000ff)
+ l.sb UART_DLM(r3),r4
+ l.addi r4,r0,((UART_DIVISOR) & 0x000000ff)
+ l.sb UART_DLL(r3),r4
+ l.sb 0x3(r3),r5
+
+ l.jr r9
+ l.nop
+
+_string_copying_linux:
+ .string "\n\n\n\n\n\rCopying Linux... \0"
+
+_string_ok_booting:
+ .string "Ok, booting the kernel.\n\r\0"
+
+_string_unhandled_exception:
+ .string "\n\rRunarunaround: Unhandled exception 0x\0"
+
+_string_epc_prefix:
+ .string ": EPC=0x\0"
+
+_string_nl:
+ .string "\n\r\0"
+
+ .global _string_esr_irq_bug
+_string_esr_irq_bug:
+ .string "\n\rESR external interrupt bug, for details look into entry.S\n\r\0"
+
+
+
+/* ========================================[ page aligned structures ]=== */
+
+/*
+ * .data section should be page aligned
+ * (look into arch/or32/kernel/vmlinux.lds)
+ */
+ .section .data,"aw"
+ .align 8192
+ .global empty_zero_page
+empty_zero_page:
+ .space 8192
+
+ .global swapper_pg_dir
+swapper_pg_dir:
+ .space 8192
+
+ .global _unhandled_stack
+_unhandled_stack:
+ .space 8192
+_unhandled_stack_top:
+
+/* ============================================================[ EOF ]=== */
diff --git a/arch/openrisc/kernel/idle.c b/arch/openrisc/kernel/idle.c
new file mode 100644
index 000000000000..d5bc5f813e89
--- /dev/null
+++ b/arch/openrisc/kernel/idle.c
@@ -0,0 +1,77 @@
+/*
+ * OpenRISC idle.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Idle daemon for or32. Idle daemon will handle any action
+ * that needs to be taken when the system becomes idle.
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/tick.h>
+
+#include <asm/pgtable.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <asm/mmu.h>
+#include <asm/cache.h>
+#include <asm/pgalloc.h>
+
+void (*powersave) (void) = NULL;
+
+static inline void pm_idle(void)
+{
+ barrier();
+}
+
+void cpu_idle(void)
+{
+ set_thread_flag(TIF_POLLING_NRFLAG);
+
+ /* endless idle loop with no priority at all */
+ while (1) {
+ tick_nohz_stop_sched_tick(1);
+
+ while (!need_resched()) {
+ check_pgt_cache();
+ rmb();
+
+ clear_thread_flag(TIF_POLLING_NRFLAG);
+
+ local_irq_disable();
+ /* Don't trace irqs off for idle */
+ stop_critical_timings();
+ if (!need_resched() && powersave != NULL)
+ powersave();
+ start_critical_timings();
+ local_irq_enable();
+ set_thread_flag(TIF_POLLING_NRFLAG);
+ }
+
+ tick_nohz_restart_sched_tick();
+ preempt_enable_no_resched();
+ schedule();
+ preempt_disable();
+ }
+}
diff --git a/arch/openrisc/kernel/init_task.c b/arch/openrisc/kernel/init_task.c
new file mode 100644
index 000000000000..45744a384927
--- /dev/null
+++ b/arch/openrisc/kernel/init_task.c
@@ -0,0 +1,41 @@
+/*
+ * OpenRISC init_task.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/init_task.h>
+#include <linux/mqueue.h>
+
+static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
+static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
+
+/*
+ * Initial thread structure.
+ *
+ * We need to make sure that this is THREAD_SIZE aligned due to the
+ * way process stacks are handled. This is done by having a special
+ * "init_task" linker map entry..
+ */
+union thread_union init_thread_union __init_task_data = {
+ INIT_THREAD_INFO(init_task)
+};
+
+/*
+ * Initial task structure.
+ *
+ * All other task structs will be allocated on slabs in fork.c
+ */
+struct task_struct init_task = INIT_TASK(init_task);
+EXPORT_SYMBOL(init_task);
diff --git a/arch/openrisc/kernel/irq.c b/arch/openrisc/kernel/irq.c
new file mode 100644
index 000000000000..59b302338331
--- /dev/null
+++ b/arch/openrisc/kernel/irq.c
@@ -0,0 +1,172 @@
+/*
+ * OpenRISC irq.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/ftrace.h>
+#include <linux/irq.h>
+#include <linux/seq_file.h>
+#include <linux/kernel_stat.h>
+
+#include <linux/irqflags.h>
+
+/* read interrupt enabled status */
+unsigned long arch_local_save_flags(void)
+{
+ return mfspr(SPR_SR) & (SPR_SR_IEE|SPR_SR_TEE);
+}
+EXPORT_SYMBOL(arch_local_save_flags);
+
+/* set interrupt enabled status */
+void arch_local_irq_restore(unsigned long flags)
+{
+ mtspr(SPR_SR, ((mfspr(SPR_SR) & ~(SPR_SR_IEE|SPR_SR_TEE)) | flags));
+}
+EXPORT_SYMBOL(arch_local_irq_restore);
+
+
+/* OR1K PIC implementation */
+
+/* We're a couple of cycles faster than the generic implementations with
+ * these 'fast' versions.
+ */
+
+static void or1k_pic_mask(struct irq_data *data)
+{
+ mtspr(SPR_PICMR, mfspr(SPR_PICMR) & ~(1UL << data->irq));
+}
+
+static void or1k_pic_unmask(struct irq_data *data)
+{
+ mtspr(SPR_PICMR, mfspr(SPR_PICMR) | (1UL << data->irq));
+}
+
+static void or1k_pic_ack(struct irq_data *data)
+{
+ /* EDGE-triggered interrupts need to be ack'ed in order to clear
+ * the latch.
+ * LEVER-triggered interrupts do not need to be ack'ed; however,
+ * ack'ing the interrupt has no ill-effect and is quicker than
+ * trying to figure out what type it is...
+ */
+
+ /* The OpenRISC 1000 spec says to write a 1 to the bit to ack the
+ * interrupt, but the OR1200 does this backwards and requires a 0
+ * to be written...
+ */
+
+#ifdef CONFIG_OR1K_1200
+ /* There are two oddities with the OR1200 PIC implementation:
+ * i) LEVEL-triggered interrupts are latched and need to be cleared
+ * ii) the interrupt latch is cleared by writing a 0 to the bit,
+ * as opposed to a 1 as mandated by the spec
+ */
+
+ mtspr(SPR_PICSR, mfspr(SPR_PICSR) & ~(1UL << data->irq));
+#else
+ WARN(1, "Interrupt handling possibily broken\n");
+ mtspr(SPR_PICSR, (1UL << irq));
+#endif
+}
+
+static void or1k_pic_mask_ack(struct irq_data *data)
+{
+ /* Comments for pic_ack apply here, too */
+
+#ifdef CONFIG_OR1K_1200
+ mtspr(SPR_PICSR, mfspr(SPR_PICSR) & ~(1UL << data->irq));
+#else
+ WARN(1, "Interrupt handling possibily broken\n");
+ mtspr(SPR_PICSR, (1UL << irq));
+#endif
+}
+
+static int or1k_pic_set_type(struct irq_data *data, unsigned int flow_type)
+{
+ /* There's nothing to do in the PIC configuration when changing
+ * flow type. Level and edge-triggered interrupts are both
+ * supported, but it's PIC-implementation specific which type
+ * is handled. */
+
+ return irq_setup_alt_chip(data, flow_type);
+}
+
+static inline int pic_get_irq(int first)
+{
+ int irq;
+
+ irq = ffs(mfspr(SPR_PICSR) >> first);
+
+ return irq ? irq + first - 1 : NO_IRQ;
+}
+
+static void __init or1k_irq_init(void)
+{
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+
+ /* Disable all interrupts until explicitly requested */
+ mtspr(SPR_PICMR, (0UL));
+
+ gc = irq_alloc_generic_chip("or1k-PIC", 1, 0, 0, handle_level_irq);
+ ct = gc->chip_types;
+
+ ct->chip.irq_unmask = or1k_pic_unmask;
+ ct->chip.irq_mask = or1k_pic_mask;
+ ct->chip.irq_ack = or1k_pic_ack;
+ ct->chip.irq_mask_ack = or1k_pic_mask_ack;
+ ct->chip.irq_set_type = or1k_pic_set_type;
+
+ /* The OR1K PIC can handle both level and edge trigged
+ * interrupts in roughly the same manner
+ */
+#if 0
+ /* FIXME: chip.type??? */
+ ct->chip.type = IRQ_TYPE_EDGE_BOTH | IRQ_TYPE_LEVEL_MASK;
+#endif
+
+ irq_setup_generic_chip(gc, IRQ_MSK(NR_IRQS), 0,
+ IRQ_NOREQUEST, IRQ_LEVEL | IRQ_NOPROBE);
+}
+
+void __init init_IRQ(void)
+{
+ or1k_irq_init();
+}
+
+void __irq_entry do_IRQ(struct pt_regs *regs)
+{
+ int irq = -1;
+ struct pt_regs *old_regs = set_irq_regs(regs);
+
+ irq_enter();
+
+ while ((irq = pic_get_irq(irq + 1)) != NO_IRQ)
+ generic_handle_irq(irq);
+
+ irq_exit();
+ set_irq_regs(old_regs);
+}
+
+unsigned int irq_create_of_mapping(struct device_node *controller,
+ const u32 *intspec, unsigned int intsize)
+{
+ return intspec[0];
+}
+EXPORT_SYMBOL_GPL(irq_create_of_mapping);
diff --git a/arch/openrisc/kernel/module.c b/arch/openrisc/kernel/module.c
new file mode 100644
index 000000000000..10ff50f0202a
--- /dev/null
+++ b/arch/openrisc/kernel/module.c
@@ -0,0 +1,72 @@
+/*
+ * OpenRISC module.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/moduleloader.h>
+#include <linux/elf.h>
+
+int apply_relocate_add(Elf32_Shdr *sechdrs,
+ const char *strtab,
+ unsigned int symindex,
+ unsigned int relsec,
+ struct module *me)
+{
+ unsigned int i;
+ Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
+ Elf32_Sym *sym;
+ uint32_t *location;
+ uint32_t value;
+
+ pr_debug("Applying relocate section %u to %u\n", relsec,
+ sechdrs[relsec].sh_info);
+ for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+ /* This is where to make the change */
+ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ + rel[i].r_offset;
+
+ /* This is the symbol it is referring to. Note that all
+ undefined symbols have been resolved. */
+ sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ + ELF32_R_SYM(rel[i].r_info);
+ value = sym->st_value + rel[i].r_addend;
+
+ switch (ELF32_R_TYPE(rel[i].r_info)) {
+ case R_OR32_32:
+ *location = value;
+ break;
+ case R_OR32_CONST:
+ location = (uint16_t *)location + 1;
+ *((uint16_t *)location) = (uint16_t) (value);
+ break;
+ case R_OR32_CONSTH:
+ location = (uint16_t *)location + 1;
+ *((uint16_t *)location) = (uint16_t) (value >> 16);
+ break;
+ case R_OR32_JUMPTARG:
+ value -= (uint32_t)location;
+ value >>= 2;
+ value &= 0x03ffffff;
+ value |= *location & 0xfc000000;
+ *location = value;
+ break;
+ default:
+ pr_err("module %s: Unknown relocation: %u\n",
+ me->name, ELF32_R_TYPE(rel[i].r_info));
+ break;
+ }
+ }
+
+ return 0;
+}
diff --git a/arch/openrisc/kernel/or32_ksyms.c b/arch/openrisc/kernel/or32_ksyms.c
new file mode 100644
index 000000000000..83ccf7c0c58d
--- /dev/null
+++ b/arch/openrisc/kernel/or32_ksyms.c
@@ -0,0 +1,46 @@
+/*
+ * OpenRISC or32_ksyms.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/elfcore.h>
+#include <linux/sched.h>
+#include <linux/in6.h>
+#include <linux/interrupt.h>
+#include <linux/vmalloc.h>
+#include <linux/semaphore.h>
+
+#include <asm/processor.h>
+#include <asm/uaccess.h>
+#include <asm/checksum.h>
+#include <asm/io.h>
+#include <asm/hardirq.h>
+#include <asm/delay.h>
+#include <asm/pgalloc.h>
+
+#define DECLARE_EXPORT(name) extern void name(void); EXPORT_SYMBOL(name)
+
+/* compiler generated symbols */
+DECLARE_EXPORT(__udivsi3);
+DECLARE_EXPORT(__divsi3);
+DECLARE_EXPORT(__umodsi3);
+DECLARE_EXPORT(__modsi3);
+DECLARE_EXPORT(__muldi3);
+DECLARE_EXPORT(__ashrdi3);
+DECLARE_EXPORT(__ashldi3);
+DECLARE_EXPORT(__lshrdi3);
+
+EXPORT_SYMBOL(__copy_tofrom_user);
diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c
new file mode 100644
index 000000000000..e4209af879ec
--- /dev/null
+++ b/arch/openrisc/kernel/process.c
@@ -0,0 +1,311 @@
+/*
+ * OpenRISC process.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * This file handles the architecture-dependent parts of process handling...
+ */
+
+#define __KERNEL_SYSCALLS__
+#include <stdarg.h>
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/elfcore.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/init_task.h>
+#include <linux/mqueue.h>
+#include <linux/fs.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <asm/spr_defs.h>
+
+#include <linux/smp.h>
+
+/*
+ * Pointer to Current thread info structure.
+ *
+ * Used at user space -> kernel transitions.
+ */
+struct thread_info *current_thread_info_set[NR_CPUS] = { &init_thread_info, };
+
+void machine_restart(void)
+{
+ printk(KERN_INFO "*** MACHINE RESTART ***\n");
+ __asm__("l.nop 1");
+}
+
+/*
+ * Similar to machine_power_off, but don't shut off power. Add code
+ * here to freeze the system for e.g. post-mortem debug purpose when
+ * possible. This halt has nothing to do with the idle halt.
+ */
+void machine_halt(void)
+{
+ printk(KERN_INFO "*** MACHINE HALT ***\n");
+ __asm__("l.nop 1");
+}
+
+/* If or when software power-off is implemented, add code here. */
+void machine_power_off(void)
+{
+ printk(KERN_INFO "*** MACHINE POWER OFF ***\n");
+ __asm__("l.nop 1");
+}
+
+void (*pm_power_off) (void) = machine_power_off;
+
+/*
+ * When a process does an "exec", machine state like FPU and debug
+ * registers need to be reset. This is a hook function for that.
+ * Currently we don't have any such state to reset, so this is empty.
+ */
+void flush_thread(void)
+{
+}
+
+void show_regs(struct pt_regs *regs)
+{
+ extern void show_registers(struct pt_regs *regs);
+
+ /* __PHX__ cleanup this mess */
+ show_registers(regs);
+}
+
+unsigned long thread_saved_pc(struct task_struct *t)
+{
+ return (unsigned long)user_regs(t->stack)->pc;
+}
+
+void release_thread(struct task_struct *dead_task)
+{
+}
+
+/*
+ * Copy the thread-specific (arch specific) info from the current
+ * process to the new one p
+ */
+extern asmlinkage void ret_from_fork(void);
+
+int
+copy_thread(unsigned long clone_flags, unsigned long usp,
+ unsigned long unused, struct task_struct *p, struct pt_regs *regs)
+{
+ struct pt_regs *childregs;
+ struct pt_regs *kregs;
+ unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
+ struct thread_info *ti;
+ unsigned long top_of_kernel_stack;
+
+ top_of_kernel_stack = sp;
+
+ p->set_child_tid = p->clear_child_tid = NULL;
+
+ /* Copy registers */
+ /* redzone */
+ sp -= STACK_FRAME_OVERHEAD;
+ sp -= sizeof(struct pt_regs);
+ childregs = (struct pt_regs *)sp;
+
+ /* Copy parent registers */
+ *childregs = *regs;
+
+ if ((childregs->sr & SPR_SR_SM) == 1) {
+ /* for kernel thread, set `current_thread_info'
+ * and stackptr in new task
+ */
+ childregs->sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
+ childregs->gpr[10] = (unsigned long)task_thread_info(p);
+ } else {
+ childregs->sp = usp;
+ }
+
+ childregs->gpr[11] = 0; /* Result from fork() */
+
+ /*
+ * The way this works is that at some point in the future
+ * some task will call _switch to switch to the new task.
+ * That will pop off the stack frame created below and start
+ * the new task running at ret_from_fork. The new task will
+ * do some house keeping and then return from the fork or clone
+ * system call, using the stack frame created above.
+ */
+ /* redzone */
+ sp -= STACK_FRAME_OVERHEAD;
+ sp -= sizeof(struct pt_regs);
+ kregs = (struct pt_regs *)sp;
+
+ ti = task_thread_info(p);
+ ti->ksp = sp;
+
+ /* kregs->sp must store the location of the 'pre-switch' kernel stack
+ * pointer... for a newly forked process, this is simply the top of
+ * the kernel stack.
+ */
+ kregs->sp = top_of_kernel_stack;
+ kregs->gpr[3] = (unsigned long)current; /* arg to schedule_tail */
+ kregs->gpr[10] = (unsigned long)task_thread_info(p);
+ kregs->gpr[9] = (unsigned long)ret_from_fork;
+
+ return 0;
+}
+
+/*
+ * Set up a thread for executing a new program
+ */
+void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
+{
+ unsigned long sr = regs->sr & ~SPR_SR_SM;
+
+ set_fs(USER_DS);
+ memset(regs->gpr, 0, sizeof(regs->gpr));
+
+ regs->pc = pc;
+ regs->sr = sr;
+ regs->sp = sp;
+
+/* printk("start thread, ksp = %lx\n", current_thread_info()->ksp);*/
+}
+
+/* Fill in the fpu structure for a core dump. */
+int dump_fpu(struct pt_regs *regs, elf_fpregset_t * fpu)
+{
+ /* TODO */
+ return 0;
+}
+
+extern struct thread_info *_switch(struct thread_info *old_ti,
+ struct thread_info *new_ti);
+
+struct task_struct *__switch_to(struct task_struct *old,
+ struct task_struct *new)
+{
+ struct task_struct *last;
+ struct thread_info *new_ti, *old_ti;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ /* current_set is an array of saved current pointers
+ * (one for each cpu). we need them at user->kernel transition,
+ * while we save them at kernel->user transition
+ */
+ new_ti = new->stack;
+ old_ti = old->stack;
+
+ current_thread_info_set[smp_processor_id()] = new_ti;
+ last = (_switch(old_ti, new_ti))->task;
+
+ local_irq_restore(flags);
+
+ return last;
+}
+
+/*
+ * Write out registers in core dump format, as defined by the
+ * struct user_regs_struct
+ */
+void dump_elf_thread(elf_greg_t *dest, struct pt_regs* regs)
+{
+ dest[0] = 0; /* r0 */
+ memcpy(dest+1, regs->gpr+1, 31*sizeof(unsigned long));
+ dest[32] = regs->pc;
+ dest[33] = regs->sr;
+ dest[34] = 0;
+ dest[35] = 0;
+}
+
+extern void _kernel_thread_helper(void);
+
+void __noreturn kernel_thread_helper(int (*fn) (void *), void *arg)
+{
+ do_exit(fn(arg));
+}
+
+/*
+ * Create a kernel thread.
+ */
+int kernel_thread(int (*fn) (void *), void *arg, unsigned long flags)
+{
+ struct pt_regs regs;
+
+ memset(&regs, 0, sizeof(regs));
+
+ regs.gpr[20] = (unsigned long)fn;
+ regs.gpr[22] = (unsigned long)arg;
+ regs.sr = mfspr(SPR_SR);
+ regs.pc = (unsigned long)_kernel_thread_helper;
+
+ return do_fork(flags | CLONE_VM | CLONE_UNTRACED,
+ 0, &regs, 0, NULL, NULL);
+}
+
+/*
+ * sys_execve() executes a new program.
+ */
+asmlinkage long _sys_execve(const char __user *name,
+ const char __user * const __user *argv,
+ const char __user * const __user *envp,
+ struct pt_regs *regs)
+{
+ int error;
+ char *filename;
+
+ filename = getname(name);
+ error = PTR_ERR(filename);
+
+ if (IS_ERR(filename))
+ goto out;
+
+ error = do_execve(filename, argv, envp, regs);
+ putname(filename);
+
+out:
+ return error;
+}
+
+unsigned long get_wchan(struct task_struct *p)
+{
+ /* TODO */
+
+ return 0;
+}
+
+int kernel_execve(const char *filename, char *const argv[], char *const envp[])
+{
+ register long __res asm("r11") = __NR_execve;
+ register long __a asm("r3") = (long)(filename);
+ register long __b asm("r4") = (long)(argv);
+ register long __c asm("r5") = (long)(envp);
+ __asm__ volatile ("l.sys 1"
+ : "=r" (__res), "=r"(__a), "=r"(__b), "=r"(__c)
+ : "0"(__res), "1"(__a), "2"(__b), "3"(__c)
+ : "r6", "r7", "r8", "r12", "r13", "r15",
+ "r17", "r19", "r21", "r23", "r25", "r27",
+ "r29", "r31");
+ __asm__ volatile ("l.nop");
+ return __res;
+}
diff --git a/arch/openrisc/kernel/prom.c b/arch/openrisc/kernel/prom.c
new file mode 100644
index 000000000000..1bb58ba89afa
--- /dev/null
+++ b/arch/openrisc/kernel/prom.c
@@ -0,0 +1,108 @@
+/*
+ * OpenRISC prom.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Architecture specific procedures for creating, accessing and
+ * interpreting the device tree.
+ *
+ */
+
+#include <stdarg.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/threads.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/stringify.h>
+#include <linux/delay.h>
+#include <linux/initrd.h>
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/kexec.h>
+#include <linux/debugfs.h>
+#include <linux/irq.h>
+#include <linux/memblock.h>
+#include <linux/of_fdt.h>
+
+#include <asm/prom.h>
+#include <asm/page.h>
+#include <asm/processor.h>
+#include <asm/irq.h>
+#include <linux/io.h>
+#include <asm/system.h>
+#include <asm/mmu.h>
+#include <asm/pgtable.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+
+extern char cmd_line[COMMAND_LINE_SIZE];
+
+void __init early_init_dt_add_memory_arch(u64 base, u64 size)
+{
+ size &= PAGE_MASK;
+ memblock_add(base, size);
+}
+
+void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
+{
+ return __va(memblock_alloc(size, align));
+}
+
+void __init early_init_devtree(void *params)
+{
+ void *alloc;
+
+ /* Setup flat device-tree pointer */
+ initial_boot_params = params;
+
+
+ /* Retrieve various informations from the /chosen node of the
+ * device-tree, including the platform type, initrd location and
+ * size, TCE reserve, and more ...
+ */
+ of_scan_flat_dt(early_init_dt_scan_chosen, cmd_line);
+
+ /* Scan memory nodes and rebuild MEMBLOCKs */
+ memblock_init();
+ of_scan_flat_dt(early_init_dt_scan_root, NULL);
+ of_scan_flat_dt(early_init_dt_scan_memory, NULL);
+
+ /* Save command line for /proc/cmdline and then parse parameters */
+ strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE);
+
+ memblock_analyze();
+
+ /* We must copy the flattend device tree from init memory to regular
+ * memory because the device tree references the strings in it
+ * directly.
+ */
+
+ alloc = __va(memblock_alloc(initial_boot_params->totalsize, PAGE_SIZE));
+
+ memcpy(alloc, initial_boot_params, initial_boot_params->totalsize);
+
+ initial_boot_params = alloc;
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void __init early_init_dt_setup_initrd_arch(unsigned long start,
+ unsigned long end)
+{
+ initrd_start = (unsigned long)__va(start);
+ initrd_end = (unsigned long)__va(end);
+ initrd_below_start_ok = 1;
+}
+#endif
diff --git a/arch/openrisc/kernel/ptrace.c b/arch/openrisc/kernel/ptrace.c
new file mode 100644
index 000000000000..656b94beab89
--- /dev/null
+++ b/arch/openrisc/kernel/ptrace.c
@@ -0,0 +1,211 @@
+/*
+ * OpenRISC ptrace.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2005 Gyorgy Jeney <nog@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <stddef.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/audit.h>
+#include <linux/regset.h>
+#include <linux/tracehook.h>
+#include <linux/elf.h>
+
+#include <asm/thread_info.h>
+#include <asm/segment.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+
+/*
+ * Copy the thread state to a regset that can be interpreted by userspace.
+ *
+ * It doesn't matter what our internal pt_regs structure looks like. The
+ * important thing is that we export a consistent view of the thread state
+ * to userspace. As such, we need to make sure that the regset remains
+ * ABI compatible as defined by the struct user_regs_struct:
+ *
+ * (Each item is a 32-bit word)
+ * r0 = 0 (exported for clarity)
+ * 31 GPRS r1-r31
+ * PC (Program counter)
+ * SR (Supervision register)
+ */
+static int genregs_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user * ubuf)
+{
+ const struct pt_regs *regs = task_pt_regs(target);
+ int ret;
+
+ /* r0 */
+ ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 0, 4);
+
+ if (!ret)
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ regs->gpr+1, 4, 4*32);
+ if (!ret)
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &regs->pc, 4*32, 4*33);
+ if (!ret)
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &regs->sr, 4*33, 4*34);
+ if (!ret)
+ ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+ 4*34, -1);
+
+ return ret;
+}
+
+/*
+ * Set the thread state from a regset passed in via ptrace
+ */
+static int genregs_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user * ubuf)
+{
+ struct pt_regs *regs = task_pt_regs(target);
+ int ret;
+
+ /* ignore r0 */
+ ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, 4);
+ /* r1 - r31 */
+ if (!ret)
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ regs->gpr+1, 4, 4*32);
+ /* PC */
+ if (!ret)
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &regs->pc, 4*32, 4*33);
+ /*
+ * Skip SR and padding... userspace isn't allowed to changes bits in
+ * the Supervision register
+ */
+ if (!ret)
+ ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ 4*33, -1);
+
+ return ret;
+}
+
+/*
+ * Define the register sets available on OpenRISC under Linux
+ */
+enum or1k_regset {
+ REGSET_GENERAL,
+};
+
+static const struct user_regset or1k_regsets[] = {
+ [REGSET_GENERAL] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = ELF_NGREG,
+ .size = sizeof(long),
+ .align = sizeof(long),
+ .get = genregs_get,
+ .set = genregs_set,
+ },
+};
+
+static const struct user_regset_view user_or1k_native_view = {
+ .name = "or1k",
+ .e_machine = EM_OPENRISC,
+ .regsets = or1k_regsets,
+ .n = ARRAY_SIZE(or1k_regsets),
+};
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+ return &user_or1k_native_view;
+}
+
+/*
+ * does not yet catch signals sent when the child dies.
+ * in exit.c or in signal.c.
+ */
+
+
+/*
+ * Called by kernel/ptrace.c when detaching..
+ *
+ * Make sure the single step bit is not set.
+ */
+void ptrace_disable(struct task_struct *child)
+{
+ pr_debug("ptrace_disable(): TODO\n");
+
+ user_disable_single_step(child);
+ clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+}
+
+long arch_ptrace(struct task_struct *child, long request, unsigned long addr,
+ unsigned long data)
+{
+ int ret;
+
+ switch (request) {
+ default:
+ ret = ptrace_request(child, request, addr, data);
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * Notification of system call entry/exit
+ * - triggered by current->work.syscall_trace
+ */
+asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
+{
+ long ret = 0;
+
+ if (test_thread_flag(TIF_SYSCALL_TRACE) &&
+ tracehook_report_syscall_entry(regs))
+ /*
+ * Tracing decided this syscall should not happen.
+ * We'll return a bogus call number to get an ENOSYS
+ * error, but leave the original number in <something>.
+ */
+ ret = -1L;
+
+ /* Are these regs right??? */
+ if (unlikely(current->audit_context))
+ audit_syscall_entry(audit_arch(), regs->syscallno,
+ regs->gpr[3], regs->gpr[4],
+ regs->gpr[5], regs->gpr[6]);
+
+ return ret ? : regs->syscallno;
+}
+
+asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
+{
+ int step;
+
+ if (unlikely(current->audit_context))
+ audit_syscall_exit(AUDITSC_RESULT(regs->gpr[11]),
+ regs->gpr[11]);
+
+ step = test_thread_flag(TIF_SINGLESTEP);
+ if (step || test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall_exit(regs, step);
+}
diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c
new file mode 100644
index 000000000000..1422f747f52b
--- /dev/null
+++ b/arch/openrisc/kernel/setup.c
@@ -0,0 +1,381 @@
+/*
+ * OpenRISC setup.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * This file handles the architecture-dependent parts of initialization
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/tty.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/console.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/seq_file.h>
+#include <linux/serial.h>
+#include <linux/initrd.h>
+#include <linux/of_fdt.h>
+#include <linux/of.h>
+#include <linux/memblock.h>
+#include <linux/device.h>
+#include <linux/of_platform.h>
+
+#include <asm/segment.h>
+#include <asm/system.h>
+#include <asm/pgtable.h>
+#include <asm/types.h>
+#include <asm/setup.h>
+#include <asm/io.h>
+#include <asm/cpuinfo.h>
+#include <asm/delay.h>
+
+#include "vmlinux.h"
+
+char __initdata cmd_line[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
+
+static unsigned long __init setup_memory(void)
+{
+ unsigned long bootmap_size;
+ unsigned long ram_start_pfn;
+ unsigned long free_ram_start_pfn;
+ unsigned long ram_end_pfn;
+ phys_addr_t memory_start, memory_end;
+ struct memblock_region *region;
+
+ memory_end = memory_start = 0;
+
+ /* Find main memory where is the kernel */
+ for_each_memblock(memory, region) {
+ memory_start = region->base;
+ memory_end = region->base + region->size;
+ printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__,
+ memory_start, memory_end);
+ }
+
+ if (!memory_end) {
+ panic("No memory!");
+ }
+
+ ram_start_pfn = PFN_UP(memory_start);
+ /* free_ram_start_pfn is first page after kernel */
+ free_ram_start_pfn = PFN_UP(__pa(&_end));
+ ram_end_pfn = PFN_DOWN(memblock_end_of_DRAM());
+
+ max_pfn = ram_end_pfn;
+
+ /*
+ * initialize the boot-time allocator (with low memory only).
+ *
+ * This makes the memory from the end of the kernel to the end of
+ * RAM usable.
+ * init_bootmem sets the global values min_low_pfn, max_low_pfn.
+ */
+ bootmap_size = init_bootmem(free_ram_start_pfn,
+ ram_end_pfn - ram_start_pfn);
+ free_bootmem(PFN_PHYS(free_ram_start_pfn),
+ (ram_end_pfn - free_ram_start_pfn) << PAGE_SHIFT);
+ reserve_bootmem(PFN_PHYS(free_ram_start_pfn), bootmap_size,
+ BOOTMEM_DEFAULT);
+
+ for_each_memblock(reserved, region) {
+ printk(KERN_INFO "Reserved - 0x%08x-0x%08x\n",
+ (u32) region->base, (u32) region->size);
+ reserve_bootmem(region->base, region->size, BOOTMEM_DEFAULT);
+ }
+
+ return ram_end_pfn;
+}
+
+struct cpuinfo cpuinfo;
+
+static void print_cpuinfo(void)
+{
+ unsigned long upr = mfspr(SPR_UPR);
+ unsigned long vr = mfspr(SPR_VR);
+ unsigned int version;
+ unsigned int revision;
+
+ version = (vr & SPR_VR_VER) >> 24;
+ revision = (vr & SPR_VR_REV);
+
+ printk(KERN_INFO "CPU: OpenRISC-%x (revision %d) @%d MHz\n",
+ version, revision, cpuinfo.clock_frequency / 1000000);
+
+ if (!(upr & SPR_UPR_UP)) {
+ printk(KERN_INFO
+ "-- no UPR register... unable to detect configuration\n");
+ return;
+ }
+
+ if (upr & SPR_UPR_DCP)
+ printk(KERN_INFO
+ "-- dcache: %4d bytes total, %2d bytes/line, %d way(s)\n",
+ cpuinfo.dcache_size, cpuinfo.dcache_block_size, 1);
+ else
+ printk(KERN_INFO "-- dcache disabled\n");
+ if (upr & SPR_UPR_ICP)
+ printk(KERN_INFO
+ "-- icache: %4d bytes total, %2d bytes/line, %d way(s)\n",
+ cpuinfo.icache_size, cpuinfo.icache_block_size, 1);
+ else
+ printk(KERN_INFO "-- icache disabled\n");
+
+ if (upr & SPR_UPR_DMP)
+ printk(KERN_INFO "-- dmmu: %4d entries, %lu way(s)\n",
+ 1 << ((mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTS) >> 2),
+ 1 + (mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTW));
+ if (upr & SPR_UPR_IMP)
+ printk(KERN_INFO "-- immu: %4d entries, %lu way(s)\n",
+ 1 << ((mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTS) >> 2),
+ 1 + (mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTW));
+
+ printk(KERN_INFO "-- additional features:\n");
+ if (upr & SPR_UPR_DUP)
+ printk(KERN_INFO "-- debug unit\n");
+ if (upr & SPR_UPR_PCUP)
+ printk(KERN_INFO "-- performance counters\n");
+ if (upr & SPR_UPR_PMP)
+ printk(KERN_INFO "-- power management\n");
+ if (upr & SPR_UPR_PICP)
+ printk(KERN_INFO "-- PIC\n");
+ if (upr & SPR_UPR_TTP)
+ printk(KERN_INFO "-- timer\n");
+ if (upr & SPR_UPR_CUP)
+ printk(KERN_INFO "-- custom unit(s)\n");
+}
+
+void __init setup_cpuinfo(void)
+{
+ struct device_node *cpu;
+ unsigned long iccfgr, dccfgr;
+ unsigned long cache_set_size, cache_ways;
+
+ cpu = of_find_compatible_node(NULL, NULL, "opencores,or1200-rtlsvn481");
+ if (!cpu)
+ panic("No compatible CPU found in device tree...\n");
+
+ iccfgr = mfspr(SPR_ICCFGR);
+ cache_ways = 1 << (iccfgr & SPR_ICCFGR_NCW);
+ cache_set_size = 1 << ((iccfgr & SPR_ICCFGR_NCS) >> 3);
+ cpuinfo.icache_block_size = 16 << ((iccfgr & SPR_ICCFGR_CBS) >> 7);
+ cpuinfo.icache_size =
+ cache_set_size * cache_ways * cpuinfo.icache_block_size;
+
+ dccfgr = mfspr(SPR_DCCFGR);
+ cache_ways = 1 << (dccfgr & SPR_DCCFGR_NCW);
+ cache_set_size = 1 << ((dccfgr & SPR_DCCFGR_NCS) >> 3);
+ cpuinfo.dcache_block_size = 16 << ((dccfgr & SPR_DCCFGR_CBS) >> 7);
+ cpuinfo.dcache_size =
+ cache_set_size * cache_ways * cpuinfo.dcache_block_size;
+
+ if (of_property_read_u32(cpu, "clock-frequency",
+ &cpuinfo.clock_frequency)) {
+ printk(KERN_WARNING
+ "Device tree missing CPU 'clock-frequency' parameter."
+ "Assuming frequency 25MHZ"
+ "This is probably not what you want.");
+ }
+
+ of_node_put(cpu);
+
+ print_cpuinfo();
+}
+
+/**
+ * or32_early_setup
+ *
+ * Handles the pointer to the device tree that this kernel is to use
+ * for establishing the available platform devices.
+ *
+ * For now, this is limited to using the built-in device tree. In the future,
+ * it is intended that this function will take a pointer to the device tree
+ * that is potentially built-in, but potentially also passed in by the
+ * bootloader, or discovered by some equally clever means...
+ */
+
+void __init or32_early_setup(void)
+{
+
+ early_init_devtree(__dtb_start);
+
+ printk(KERN_INFO "Compiled-in FDT at 0x%p\n", __dtb_start);
+}
+
+static int __init openrisc_device_probe(void)
+{
+ of_platform_populate(NULL, NULL, NULL, NULL);
+
+ return 0;
+}
+
+device_initcall(openrisc_device_probe);
+
+static inline unsigned long extract_value_bits(unsigned long reg,
+ short bit_nr, short width)
+{
+ return (reg >> bit_nr) & (0 << width);
+}
+
+static inline unsigned long extract_value(unsigned long reg, unsigned long mask)
+{
+ while (!(mask & 0x1)) {
+ reg = reg >> 1;
+ mask = mask >> 1;
+ }
+ return mask & reg;
+}
+
+void __init detect_unit_config(unsigned long upr, unsigned long mask,
+ char *text, void (*func) (void))
+{
+ if (text != NULL)
+ printk("%s", text);
+
+ if (upr & mask) {
+ if (func != NULL)
+ func();
+ else
+ printk("present\n");
+ } else
+ printk("not present\n");
+}
+
+/*
+ * calibrate_delay
+ *
+ * Lightweight calibrate_delay implementation that calculates loops_per_jiffy
+ * from the clock frequency passed in via the device tree
+ *
+ */
+
+void __cpuinit calibrate_delay(void)
+{
+ const int *val;
+ struct device_node *cpu = NULL;
+ cpu = of_find_compatible_node(NULL, NULL, "opencores,or1200-rtlsvn481");
+ val = of_get_property(cpu, "clock-frequency", NULL);
+ if (!val)
+ panic("no cpu 'clock-frequency' parameter in device tree");
+ loops_per_jiffy = *val / HZ;
+ pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n",
+ loops_per_jiffy / (500000 / HZ),
+ (loops_per_jiffy / (5000 / HZ)) % 100, loops_per_jiffy);
+}
+
+void __init setup_arch(char **cmdline_p)
+{
+ unsigned long max_low_pfn;
+
+ unflatten_device_tree();
+
+ setup_cpuinfo();
+
+ /* process 1's initial memory region is the kernel code/data */
+ init_mm.start_code = (unsigned long)&_stext;
+ init_mm.end_code = (unsigned long)&_etext;
+ init_mm.end_data = (unsigned long)&_edata;
+ init_mm.brk = (unsigned long)&_end;
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ initrd_start = (unsigned long)&__initrd_start;
+ initrd_end = (unsigned long)&__initrd_end;
+ if (initrd_start == initrd_end) {
+ initrd_start = 0;
+ initrd_end = 0;
+ }
+ initrd_below_start_ok = 1;
+#endif
+
+ /* setup bootmem allocator */
+ max_low_pfn = setup_memory();
+
+ /* paging_init() sets up the MMU and marks all pages as reserved */
+ paging_init();
+
+#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
+ if (!conswitchp)
+ conswitchp = &dummy_con;
+#endif
+
+ *cmdline_p = cmd_line;
+
+ printk(KERN_INFO "OpenRISC Linux -- http://openrisc.net\n");
+}
+
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+ unsigned long vr;
+ int version, revision;
+
+ vr = mfspr(SPR_VR);
+ version = (vr & SPR_VR_VER) >> 24;
+ revision = vr & SPR_VR_REV;
+
+ return seq_printf(m,
+ "cpu\t\t: OpenRISC-%x\n"
+ "revision\t: %d\n"
+ "frequency\t: %ld\n"
+ "dcache size\t: %d bytes\n"
+ "dcache block size\t: %d bytes\n"
+ "icache size\t: %d bytes\n"
+ "icache block size\t: %d bytes\n"
+ "immu\t\t: %d entries, %lu ways\n"
+ "dmmu\t\t: %d entries, %lu ways\n"
+ "bogomips\t: %lu.%02lu\n",
+ version,
+ revision,
+ loops_per_jiffy * HZ,
+ cpuinfo.dcache_size,
+ cpuinfo.dcache_block_size,
+ cpuinfo.icache_size,
+ cpuinfo.icache_block_size,
+ 1 << ((mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTS) >> 2),
+ 1 + (mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTW),
+ 1 << ((mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTS) >> 2),
+ 1 + (mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTW),
+ (loops_per_jiffy * HZ) / 500000,
+ ((loops_per_jiffy * HZ) / 5000) % 100);
+}
+
+static void *c_start(struct seq_file *m, loff_t * pos)
+{
+ /* We only have one CPU... */
+ return *pos < 1 ? (void *)1 : NULL;
+}
+
+static void *c_next(struct seq_file *m, void *v, loff_t * pos)
+{
+ ++*pos;
+ return NULL;
+}
+
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+const struct seq_operations cpuinfo_op = {
+ .start = c_start,
+ .next = c_next,
+ .stop = c_stop,
+ .show = show_cpuinfo,
+};
diff --git a/arch/openrisc/kernel/signal.c b/arch/openrisc/kernel/signal.c
new file mode 100644
index 000000000000..5f759c76834e
--- /dev/null
+++ b/arch/openrisc/kernel/signal.c
@@ -0,0 +1,396 @@
+/*
+ * OpenRISC signal.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/stddef.h>
+#include <linux/tracehook.h>
+
+#include <asm/processor.h>
+#include <asm/ucontext.h>
+#include <asm/uaccess.h>
+
+#define DEBUG_SIG 0
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+asmlinkage long
+_sys_sigaltstack(const stack_t *uss, stack_t *uoss, struct pt_regs *regs)
+{
+ return do_sigaltstack(uss, uoss, regs->sp);
+}
+
+struct rt_sigframe {
+ struct siginfo *pinfo;
+ void *puc;
+ struct siginfo info;
+ struct ucontext uc;
+ unsigned char retcode[16]; /* trampoline code */
+};
+
+static int restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
+{
+ unsigned int err = 0;
+ unsigned long old_usp;
+
+ /* Alwys make any pending restarted system call return -EINTR */
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+ /* restore the regs from &sc->regs (same as sc, since regs is first)
+ * (sc is already checked for VERIFY_READ since the sigframe was
+ * checked in sys_sigreturn previously)
+ */
+
+ if (__copy_from_user(regs, sc, sizeof(struct pt_regs)))
+ goto badframe;
+
+ /* make sure the SM-bit is cleared so user-mode cannot fool us */
+ regs->sr &= ~SPR_SR_SM;
+
+ /* restore the old USP as it was before we stacked the sc etc.
+ * (we cannot just pop the sigcontext since we aligned the sp and
+ * stuff after pushing it)
+ */
+
+ err |= __get_user(old_usp, &sc->usp);
+
+ regs->sp = old_usp;
+
+ /* TODO: the other ports use regs->orig_XX to disable syscall checks
+ * after this completes, but we don't use that mechanism. maybe we can
+ * use it now ?
+ */
+
+ return err;
+
+badframe:
+ return 1;
+}
+
+asmlinkage long _sys_rt_sigreturn(struct pt_regs *regs)
+{
+ struct rt_sigframe *frame = (struct rt_sigframe __user *)regs->sp;
+ sigset_t set;
+ stack_t st;
+
+ /*
+ * Since we stacked the signal on a dword boundary,
+ * then frame should be dword aligned here. If it's
+ * not, then the user is trying to mess with us.
+ */
+ if (((long)frame) & 3)
+ goto badframe;
+
+ if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+ if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+ goto badframe;
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(&current->sighand->siglock);
+ current->blocked = set;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+
+ if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
+ goto badframe;
+
+ if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
+ goto badframe;
+ /* It is more difficult to avoid calling this function than to
+ call it and ignore errors. */
+ do_sigaltstack(&st, NULL, regs->sp);
+
+ return regs->gpr[11];
+
+badframe:
+ force_sig(SIGSEGV, current);
+ return 0;
+}
+
+/*
+ * Set up a signal frame.
+ */
+
+static int setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
+ unsigned long mask)
+{
+ int err = 0;
+ unsigned long usp = regs->sp;
+
+ /* copy the regs. they are first in sc so we can use sc directly */
+
+ err |= __copy_to_user(sc, regs, sizeof(struct pt_regs));
+
+ /* then some other stuff */
+
+ err |= __put_user(mask, &sc->oldmask);
+
+ err |= __put_user(usp, &sc->usp);
+
+ return err;
+}
+
+static inline unsigned long align_sigframe(unsigned long sp)
+{
+ return sp & ~3UL;
+}
+
+/*
+ * Work out where the signal frame should go. It's either on the user stack
+ * or the alternate stack.
+ */
+
+static inline void __user *get_sigframe(struct k_sigaction *ka,
+ struct pt_regs *regs, size_t frame_size)
+{
+ unsigned long sp = regs->sp;
+ int onsigstack = on_sig_stack(sp);
+
+ /* redzone */
+ sp -= STACK_FRAME_OVERHEAD;
+
+ /* This is the X/Open sanctioned signal stack switching. */
+ if ((ka->sa.sa_flags & SA_ONSTACK) && !onsigstack) {
+ if (current->sas_ss_size)
+ sp = current->sas_ss_sp + current->sas_ss_size;
+ }
+
+ sp = align_sigframe(sp - frame_size);
+
+ /*
+ * If we are on the alternate signal stack and would overflow it, don't.
+ * Return an always-bogus address instead so we will die with SIGSEGV.
+ */
+ if (onsigstack && !likely(on_sig_stack(sp)))
+ return (void __user *)-1L;
+
+ return (void __user *)sp;
+}
+
+/* grab and setup a signal frame.
+ *
+ * basically we stack a lot of state info, and arrange for the
+ * user-mode program to return to the kernel using either a
+ * trampoline which performs the syscall sigreturn, or a provided
+ * user-mode trampoline.
+ */
+static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ sigset_t *set, struct pt_regs *regs)
+{
+ struct rt_sigframe *frame;
+ unsigned long return_ip;
+ int err = 0;
+
+ frame = get_sigframe(ka, regs, sizeof(*frame));
+
+ if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+ goto give_sigsegv;
+
+ err |= __put_user(&frame->info, &frame->pinfo);
+ err |= __put_user(&frame->uc, &frame->puc);
+
+ if (ka->sa.sa_flags & SA_SIGINFO)
+ err |= copy_siginfo_to_user(&frame->info, info);
+ if (err)
+ goto give_sigsegv;
+
+ /* Clear all the bits of the ucontext we don't use. */
+ err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext));
+ err |= __put_user(0, &frame->uc.uc_flags);
+ err |= __put_user(NULL, &frame->uc.uc_link);
+ err |= __put_user((void *)current->sas_ss_sp,
+ &frame->uc.uc_stack.ss_sp);
+ err |= __put_user(sas_ss_flags(regs->sp), &frame->uc.uc_stack.ss_flags);
+ err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+ err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]);
+
+ err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+
+ if (err)
+ goto give_sigsegv;
+
+ /* trampoline - the desired return ip is the retcode itself */
+ return_ip = (unsigned long)&frame->retcode;
+ /* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */
+ err |= __put_user(0xa960, (short *)(frame->retcode + 0));
+ err |= __put_user(__NR_rt_sigreturn, (short *)(frame->retcode + 2));
+ err |= __put_user(0x20000001, (unsigned long *)(frame->retcode + 4));
+ err |= __put_user(0x15000000, (unsigned long *)(frame->retcode + 8));
+
+ if (err)
+ goto give_sigsegv;
+
+ /* TODO what is the current->exec_domain stuff and invmap ? */
+
+ /* Set up registers for signal handler */
+ regs->pc = (unsigned long)ka->sa.sa_handler; /* what we enter NOW */
+ regs->gpr[9] = (unsigned long)return_ip; /* what we enter LATER */
+ regs->gpr[3] = (unsigned long)sig; /* arg 1: signo */
+ regs->gpr[4] = (unsigned long)&frame->info; /* arg 2: (siginfo_t*) */
+ regs->gpr[5] = (unsigned long)&frame->uc; /* arg 3: ucontext */
+
+ /* actually move the usp to reflect the stacked frame */
+ regs->sp = (unsigned long)frame;
+
+ return;
+
+give_sigsegv:
+ if (sig == SIGSEGV)
+ ka->sa.sa_handler = SIG_DFL;
+ force_sig(SIGSEGV, current);
+}
+
+static inline void
+handle_signal(unsigned long sig,
+ siginfo_t *info, struct k_sigaction *ka,
+ sigset_t *oldset, struct pt_regs *regs)
+{
+ setup_rt_frame(sig, ka, info, oldset, regs);
+
+ if (ka->sa.sa_flags & SA_ONESHOT)
+ ka->sa.sa_handler = SIG_DFL;
+
+ spin_lock_irq(&current->sighand->siglock);
+ sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
+ if (!(ka->sa.sa_flags & SA_NODEFER))
+ sigaddset(&current->blocked, sig);
+ recalc_sigpending();
+
+ spin_unlock_irq(&current->sighand->siglock);
+}
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ *
+ * Also note that the regs structure given here as an argument, is the latest
+ * pushed pt_regs. It may or may not be the same as the first pushed registers
+ * when the initial usermode->kernelmode transition took place. Therefore
+ * we can use user_mode(regs) to see if we came directly from kernel or user
+ * mode below.
+ */
+
+void do_signal(struct pt_regs *regs)
+{
+ siginfo_t info;
+ int signr;
+ struct k_sigaction ka;
+
+ /*
+ * We want the common case to go fast, which
+ * is why we may in certain cases get here from
+ * kernel mode. Just return without doing anything
+ * if so.
+ */
+ if (!user_mode(regs))
+ return;
+
+ signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+
+ /* If we are coming out of a syscall then we need
+ * to check if the syscall was interrupted and wants to be
+ * restarted after handling the signal. If so, the original
+ * syscall number is put back into r11 and the PC rewound to
+ * point at the l.sys instruction that resulted in the
+ * original syscall. Syscall results other than the four
+ * below mean that the syscall executed to completion and no
+ * restart is necessary.
+ */
+ if (regs->syscallno) {
+ int restart = 0;
+
+ switch (regs->gpr[11]) {
+ case -ERESTART_RESTARTBLOCK:
+ case -ERESTARTNOHAND:
+ /* Restart if there is no signal handler */
+ restart = (signr <= 0);
+ break;
+ case -ERESTARTSYS:
+ /* Restart if there no signal handler or
+ * SA_RESTART flag is set */
+ restart = (signr <= 0 || (ka.sa.sa_flags & SA_RESTART));
+ break;
+ case -ERESTARTNOINTR:
+ /* Always restart */
+ restart = 1;
+ break;
+ }
+
+ if (restart) {
+ if (regs->gpr[11] == -ERESTART_RESTARTBLOCK)
+ regs->gpr[11] = __NR_restart_syscall;
+ else
+ regs->gpr[11] = regs->orig_gpr11;
+ regs->pc -= 4;
+ } else {
+ regs->gpr[11] = -EINTR;
+ }
+ }
+
+ if (signr <= 0) {
+ /* no signal to deliver so we just put the saved sigmask
+ * back */
+ if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
+ clear_thread_flag(TIF_RESTORE_SIGMASK);
+ sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
+ }
+
+ } else { /* signr > 0 */
+ sigset_t *oldset;
+
+ if (current_thread_info()->flags & _TIF_RESTORE_SIGMASK)
+ oldset = &current->saved_sigmask;
+ else
+ oldset = &current->blocked;
+
+ /* Whee! Actually deliver the signal. */
+ handle_signal(signr, &info, &ka, oldset, regs);
+ /* a signal was successfully delivered; the saved
+ * sigmask will have been stored in the signal frame,
+ * and will be restored by sigreturn, so we can simply
+ * clear the TIF_RESTORE_SIGMASK flag */
+ if (test_thread_flag(TIF_RESTORE_SIGMASK))
+ clear_thread_flag(TIF_RESTORE_SIGMASK);
+
+ tracehook_signal_handler(signr, &info, &ka, regs,
+ test_thread_flag(TIF_SINGLESTEP));
+ }
+
+ return;
+}
+
+asmlinkage void do_notify_resume(struct pt_regs *regs)
+{
+ if (current_thread_info()->flags & _TIF_SIGPENDING)
+ do_signal(regs);
+
+ if (current_thread_info()->flags & _TIF_NOTIFY_RESUME) {
+ clear_thread_flag(TIF_NOTIFY_RESUME);
+ tracehook_notify_resume(regs);
+ if (current->replacement_session_keyring)
+ key_replace_session_keyring();
+ }
+}
diff --git a/arch/openrisc/kernel/sys_call_table.c b/arch/openrisc/kernel/sys_call_table.c
new file mode 100644
index 000000000000..e1f8ce8c72a8
--- /dev/null
+++ b/arch/openrisc/kernel/sys_call_table.c
@@ -0,0 +1,28 @@
+/*
+ * OpenRISC sys_call_table.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/syscalls.h>
+#include <linux/signal.h>
+#include <linux/unistd.h>
+
+#include <asm/syscalls.h>
+
+#undef __SYSCALL
+#define __SYSCALL(nr, call) [nr] = (call),
+
+void *sys_call_table[__NR_syscalls] = {
+#include <asm/unistd.h>
+};
diff --git a/arch/openrisc/kernel/sys_or32.c b/arch/openrisc/kernel/sys_or32.c
new file mode 100644
index 000000000000..57060084c0cc
--- /dev/null
+++ b/arch/openrisc/kernel/sys_or32.c
@@ -0,0 +1,57 @@
+/*
+ * OpenRISC sys_or32.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * This file contains various random system calls that
+ * have a non-standard calling sequence on some platforms.
+ * Since we don't have to do any backwards compatibility, our
+ * versions are done in the most "normal" way possible.
+ */
+
+#include <linux/errno.h>
+#include <linux/syscalls.h>
+#include <linux/mm.h>
+
+#include <asm/syscalls.h>
+
+/* These are secondary entry points as the primary entry points are defined in
+ * entry.S where we add the 'regs' parameter value
+ */
+
+asmlinkage long _sys_clone(unsigned long clone_flags, unsigned long newsp,
+ int __user *parent_tid, int __user *child_tid,
+ struct pt_regs *regs)
+{
+ long ret;
+
+ /* FIXME: Is alignment necessary? */
+ /* newsp = ALIGN(newsp, 4); */
+
+ if (!newsp)
+ newsp = regs->sp;
+
+ ret = do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
+
+ return ret;
+}
+
+asmlinkage int _sys_fork(struct pt_regs *regs)
+{
+#ifdef CONFIG_MMU
+ return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
+#else
+ return -EINVAL;
+#endif
+}
diff --git a/arch/openrisc/kernel/time.c b/arch/openrisc/kernel/time.c
new file mode 100644
index 000000000000..bd946ef1623d
--- /dev/null
+++ b/arch/openrisc/kernel/time.c
@@ -0,0 +1,181 @@
+/*
+ * OpenRISC time.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/time.h>
+#include <linux/timex.h>
+#include <linux/interrupt.h>
+#include <linux/ftrace.h>
+
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+
+#include <asm/cpuinfo.h>
+
+static int openrisc_timer_set_next_event(unsigned long delta,
+ struct clock_event_device *dev)
+{
+ u32 c;
+
+ /* Read 32-bit counter value, add delta, mask off the low 28 bits.
+ * We're guaranteed delta won't be bigger than 28 bits because the
+ * generic timekeeping code ensures that for us.
+ */
+ c = mfspr(SPR_TTCR);
+ c += delta;
+ c &= SPR_TTMR_TP;
+
+ /* Set counter and enable interrupt.
+ * Keep timer in continuous mode always.
+ */
+ mtspr(SPR_TTMR, SPR_TTMR_CR | SPR_TTMR_IE | c);
+
+ return 0;
+}
+
+static void openrisc_timer_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ pr_debug(KERN_INFO "%s: periodic\n", __func__);
+ BUG();
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ pr_debug(KERN_INFO "%s: oneshot\n", __func__);
+ break;
+ case CLOCK_EVT_MODE_UNUSED:
+ pr_debug(KERN_INFO "%s: unused\n", __func__);
+ break;
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ pr_debug(KERN_INFO "%s: shutdown\n", __func__);
+ break;
+ case CLOCK_EVT_MODE_RESUME:
+ pr_debug(KERN_INFO "%s: resume\n", __func__);
+ break;
+ }
+}
+
+/* This is the clock event device based on the OR1K tick timer.
+ * As the timer is being used as a continuous clock-source (required for HR
+ * timers) we cannot enable the PERIODIC feature. The tick timer can run using
+ * one-shot events, so no problem.
+ */
+
+static struct clock_event_device clockevent_openrisc_timer = {
+ .name = "openrisc_timer_clockevent",
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .rating = 300,
+ .set_next_event = openrisc_timer_set_next_event,
+ .set_mode = openrisc_timer_set_mode,
+};
+
+static inline void timer_ack(void)
+{
+ /* Clear the IP bit and disable further interrupts */
+ /* This can be done very simply... we just need to keep the timer
+ running, so just maintain the CR bits while clearing the rest
+ of the register
+ */
+ mtspr(SPR_TTMR, SPR_TTMR_CR);
+}
+
+/*
+ * The timer interrupt is mostly handled in generic code nowadays... this
+ * function just acknowledges the interrupt and fires the event handler that
+ * has been set on the clockevent device by the generic time management code.
+ *
+ * This function needs to be called by the timer exception handler and that's
+ * all the exception handler needs to do.
+ */
+
+irqreturn_t __irq_entry timer_interrupt(struct pt_regs *regs)
+{
+ struct pt_regs *old_regs = set_irq_regs(regs);
+ struct clock_event_device *evt = &clockevent_openrisc_timer;
+
+ timer_ack();
+
+ /*
+ * update_process_times() expects us to have called irq_enter().
+ */
+ irq_enter();
+ evt->event_handler(evt);
+ irq_exit();
+
+ set_irq_regs(old_regs);
+
+ return IRQ_HANDLED;
+}
+
+static __init void openrisc_clockevent_init(void)
+{
+ clockevents_calc_mult_shift(&clockevent_openrisc_timer,
+ cpuinfo.clock_frequency, 4);
+
+ /* We only have 28 bits */
+ clockevent_openrisc_timer.max_delta_ns =
+ clockevent_delta2ns((u32) 0x0fffffff, &clockevent_openrisc_timer);
+ clockevent_openrisc_timer.min_delta_ns =
+ clockevent_delta2ns(1, &clockevent_openrisc_timer);
+ clockevent_openrisc_timer.cpumask = cpumask_of(0);
+ clockevents_register_device(&clockevent_openrisc_timer);
+}
+
+/**
+ * Clocksource: Based on OpenRISC timer/counter
+ *
+ * This sets up the OpenRISC Tick Timer as a clock source. The tick timer
+ * is 32 bits wide and runs at the CPU clock frequency.
+ */
+
+static cycle_t openrisc_timer_read(struct clocksource *cs)
+{
+ return (cycle_t) mfspr(SPR_TTCR);
+}
+
+static struct clocksource openrisc_timer = {
+ .name = "openrisc_timer",
+ .rating = 200,
+ .read = openrisc_timer_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static int __init openrisc_timer_init(void)
+{
+ if (clocksource_register_hz(&openrisc_timer, cpuinfo.clock_frequency))
+ panic("failed to register clocksource");
+
+ /* Enable the incrementer: 'continuous' mode with interrupt disabled */
+ mtspr(SPR_TTMR, SPR_TTMR_CR);
+
+ return 0;
+}
+
+void __init time_init(void)
+{
+ u32 upr;
+
+ upr = mfspr(SPR_UPR);
+ if (!(upr & SPR_UPR_TTP))
+ panic("Linux not supported on devices without tick timer");
+
+ openrisc_timer_init();
+ openrisc_clockevent_init();
+}
diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c
new file mode 100644
index 000000000000..a4ec44a052b2
--- /dev/null
+++ b/arch/openrisc/kernel/traps.c
@@ -0,0 +1,366 @@
+/*
+ * OpenRISC traps.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Here we handle the break vectors not used by the system call
+ * mechanism, as well as some general stack/register dumping
+ * things.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/kmod.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/timer.h>
+#include <linux/mm.h>
+#include <linux/kallsyms.h>
+#include <asm/uaccess.h>
+
+#include <asm/system.h>
+#include <asm/segment.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+
+extern char _etext, _stext;
+
+int kstack_depth_to_print = 0x180;
+
+static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
+{
+ return p > (void *)tinfo && p < (void *)tinfo + THREAD_SIZE - 3;
+}
+
+void show_trace(struct task_struct *task, unsigned long *stack)
+{
+ struct thread_info *context;
+ unsigned long addr;
+
+ context = (struct thread_info *)
+ ((unsigned long)stack & (~(THREAD_SIZE - 1)));
+
+ while (valid_stack_ptr(context, stack)) {
+ addr = *stack++;
+ if (__kernel_text_address(addr)) {
+ printk(" [<%08lx>]", addr);
+ print_symbol(" %s", addr);
+ printk("\n");
+ }
+ }
+ printk(" =======================\n");
+}
+
+/* displays a short stack trace */
+void show_stack(struct task_struct *task, unsigned long *esp)
+{
+ unsigned long addr, *stack;
+ int i;
+
+ if (esp == NULL)
+ esp = (unsigned long *)&esp;
+
+ stack = esp;
+
+ printk("Stack dump [0x%08lx]:\n", (unsigned long)esp);
+ for (i = 0; i < kstack_depth_to_print; i++) {
+ if (kstack_end(stack))
+ break;
+ if (__get_user(addr, stack)) {
+ /* This message matches "failing address" marked
+ s390 in ksymoops, so lines containing it will
+ not be filtered out by ksymoops. */
+ printk("Failing address 0x%lx\n", (unsigned long)stack);
+ break;
+ }
+ stack++;
+
+ printk("sp + %02d: 0x%08lx\n", i * 4, addr);
+ }
+ printk("\n");
+
+ show_trace(task, esp);
+
+ return;
+}
+
+void show_trace_task(struct task_struct *tsk)
+{
+ /*
+ * TODO: SysRq-T trace dump...
+ */
+}
+
+/*
+ * The architecture-independent backtrace generator
+ */
+void dump_stack(void)
+{
+ unsigned long stack;
+
+ show_stack(current, &stack);
+}
+
+void show_registers(struct pt_regs *regs)
+{
+ int i;
+ int in_kernel = 1;
+ unsigned long esp;
+
+ esp = (unsigned long)(&regs->sp);
+ if (user_mode(regs))
+ in_kernel = 0;
+
+ printk("CPU #: %d\n"
+ " PC: %08lx SR: %08lx SP: %08lx\n",
+ smp_processor_id(), regs->pc, regs->sr, regs->sp);
+ printk("GPR00: %08lx GPR01: %08lx GPR02: %08lx GPR03: %08lx\n",
+ 0L, regs->gpr[1], regs->gpr[2], regs->gpr[3]);
+ printk("GPR04: %08lx GPR05: %08lx GPR06: %08lx GPR07: %08lx\n",
+ regs->gpr[4], regs->gpr[5], regs->gpr[6], regs->gpr[7]);
+ printk("GPR08: %08lx GPR09: %08lx GPR10: %08lx GPR11: %08lx\n",
+ regs->gpr[8], regs->gpr[9], regs->gpr[10], regs->gpr[11]);
+ printk("GPR12: %08lx GPR13: %08lx GPR14: %08lx GPR15: %08lx\n",
+ regs->gpr[12], regs->gpr[13], regs->gpr[14], regs->gpr[15]);
+ printk("GPR16: %08lx GPR17: %08lx GPR18: %08lx GPR19: %08lx\n",
+ regs->gpr[16], regs->gpr[17], regs->gpr[18], regs->gpr[19]);
+ printk("GPR20: %08lx GPR21: %08lx GPR22: %08lx GPR23: %08lx\n",
+ regs->gpr[20], regs->gpr[21], regs->gpr[22], regs->gpr[23]);
+ printk("GPR24: %08lx GPR25: %08lx GPR26: %08lx GPR27: %08lx\n",
+ regs->gpr[24], regs->gpr[25], regs->gpr[26], regs->gpr[27]);
+ printk("GPR28: %08lx GPR29: %08lx GPR30: %08lx GPR31: %08lx\n",
+ regs->gpr[28], regs->gpr[29], regs->gpr[30], regs->gpr[31]);
+ printk(" RES: %08lx oGPR11: %08lx syscallno: %08lx\n",
+ regs->gpr[11], regs->orig_gpr11, regs->syscallno);
+
+ printk("Process %s (pid: %d, stackpage=%08lx)\n",
+ current->comm, current->pid, (unsigned long)current);
+ /*
+ * When in-kernel, we also print out the stack and code at the
+ * time of the fault..
+ */
+ if (in_kernel) {
+
+ printk("\nStack: ");
+ show_stack(NULL, (unsigned long *)esp);
+
+ printk("\nCode: ");
+ if (regs->pc < PAGE_OFFSET)
+ goto bad;
+
+ for (i = -24; i < 24; i++) {
+ unsigned char c;
+ if (__get_user(c, &((unsigned char *)regs->pc)[i])) {
+bad:
+ printk(" Bad PC value.");
+ break;
+ }
+
+ if (i == 0)
+ printk("(%02x) ", c);
+ else
+ printk("%02x ", c);
+ }
+ }
+ printk("\n");
+}
+
+void nommu_dump_state(struct pt_regs *regs,
+ unsigned long ea, unsigned long vector)
+{
+ int i;
+ unsigned long addr, stack = regs->sp;
+
+ printk("\n\r[nommu_dump_state] :: ea %lx, vector %lx\n\r", ea, vector);
+
+ printk("CPU #: %d\n"
+ " PC: %08lx SR: %08lx SP: %08lx\n",
+ 0, regs->pc, regs->sr, regs->sp);
+ printk("GPR00: %08lx GPR01: %08lx GPR02: %08lx GPR03: %08lx\n",
+ 0L, regs->gpr[1], regs->gpr[2], regs->gpr[3]);
+ printk("GPR04: %08lx GPR05: %08lx GPR06: %08lx GPR07: %08lx\n",
+ regs->gpr[4], regs->gpr[5], regs->gpr[6], regs->gpr[7]);
+ printk("GPR08: %08lx GPR09: %08lx GPR10: %08lx GPR11: %08lx\n",
+ regs->gpr[8], regs->gpr[9], regs->gpr[10], regs->gpr[11]);
+ printk("GPR12: %08lx GPR13: %08lx GPR14: %08lx GPR15: %08lx\n",
+ regs->gpr[12], regs->gpr[13], regs->gpr[14], regs->gpr[15]);
+ printk("GPR16: %08lx GPR17: %08lx GPR18: %08lx GPR19: %08lx\n",
+ regs->gpr[16], regs->gpr[17], regs->gpr[18], regs->gpr[19]);
+ printk("GPR20: %08lx GPR21: %08lx GPR22: %08lx GPR23: %08lx\n",
+ regs->gpr[20], regs->gpr[21], regs->gpr[22], regs->gpr[23]);
+ printk("GPR24: %08lx GPR25: %08lx GPR26: %08lx GPR27: %08lx\n",
+ regs->gpr[24], regs->gpr[25], regs->gpr[26], regs->gpr[27]);
+ printk("GPR28: %08lx GPR29: %08lx GPR30: %08lx GPR31: %08lx\n",
+ regs->gpr[28], regs->gpr[29], regs->gpr[30], regs->gpr[31]);
+ printk(" RES: %08lx oGPR11: %08lx syscallno: %08lx\n",
+ regs->gpr[11], regs->orig_gpr11, regs->syscallno);
+
+ printk("Process %s (pid: %d, stackpage=%08lx)\n",
+ ((struct task_struct *)(__pa(current)))->comm,
+ ((struct task_struct *)(__pa(current)))->pid,
+ (unsigned long)current);
+
+ printk("\nStack: ");
+ printk("Stack dump [0x%08lx]:\n", (unsigned long)stack);
+ for (i = 0; i < kstack_depth_to_print; i++) {
+ if (((long)stack & (THREAD_SIZE - 1)) == 0)
+ break;
+ stack++;
+
+ printk("%lx :: sp + %02d: 0x%08lx\n", stack, i * 4,
+ *((unsigned long *)(__pa(stack))));
+ }
+ printk("\n");
+
+ printk("Call Trace: ");
+ i = 1;
+ while (((long)stack & (THREAD_SIZE - 1)) != 0) {
+ addr = *((unsigned long *)__pa(stack));
+ stack++;
+
+ if (kernel_text_address(addr)) {
+ if (i && ((i % 6) == 0))
+ printk("\n ");
+ printk(" [<%08lx>]", addr);
+ i++;
+ }
+ }
+ printk("\n");
+
+ printk("\nCode: ");
+
+ for (i = -24; i < 24; i++) {
+ unsigned char c;
+ c = ((unsigned char *)(__pa(regs->pc)))[i];
+
+ if (i == 0)
+ printk("(%02x) ", c);
+ else
+ printk("%02x ", c);
+ }
+ printk("\n");
+}
+
+/* This is normally the 'Oops' routine */
+void die(const char *str, struct pt_regs *regs, long err)
+{
+
+ console_verbose();
+ printk("\n%s#: %04lx\n", str, err & 0xffff);
+ show_registers(regs);
+#ifdef CONFIG_JUMP_UPON_UNHANDLED_EXCEPTION
+ printk("\n\nUNHANDLED_EXCEPTION: entering infinite loop\n");
+
+ /* shut down interrupts */
+ local_irq_disable();
+
+ __asm__ __volatile__("l.nop 1");
+ do {} while (1);
+#endif
+ do_exit(SIGSEGV);
+}
+
+/* This is normally the 'Oops' routine */
+void die_if_kernel(const char *str, struct pt_regs *regs, long err)
+{
+ if (user_mode(regs))
+ return;
+
+ die(str, regs, err);
+}
+
+void unhandled_exception(struct pt_regs *regs, int ea, int vector)
+{
+ printk("Unable to handle exception at EA =0x%x, vector 0x%x",
+ ea, vector);
+ die("Oops", regs, 9);
+}
+
+void __init trap_init(void)
+{
+ /* Nothing needs to be done */
+}
+
+asmlinkage void do_trap(struct pt_regs *regs, unsigned long address)
+{
+ siginfo_t info;
+ memset(&info, 0, sizeof(info));
+ info.si_signo = SIGTRAP;
+ info.si_code = TRAP_TRACE;
+ info.si_addr = (void *)address;
+ force_sig_info(SIGTRAP, &info, current);
+
+ regs->pc += 4;
+}
+
+asmlinkage void do_unaligned_access(struct pt_regs *regs, unsigned long address)
+{
+ siginfo_t info;
+
+ if (user_mode(regs)) {
+ /* Send a SIGSEGV */
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ /* info.si_code has been set above */
+ info.si_addr = (void *)address;
+ force_sig_info(SIGSEGV, &info, current);
+ } else {
+ printk("KERNEL: Unaligned Access 0x%.8lx\n", address);
+ show_registers(regs);
+ die("Die:", regs, address);
+ }
+
+}
+
+asmlinkage void do_bus_fault(struct pt_regs *regs, unsigned long address)
+{
+ siginfo_t info;
+
+ if (user_mode(regs)) {
+ /* Send a SIGBUS */
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = BUS_ADRERR;
+ info.si_addr = (void *)address;
+ force_sig_info(SIGBUS, &info, current);
+ } else { /* Kernel mode */
+ printk("KERNEL: Bus error (SIGBUS) 0x%.8lx\n", address);
+ show_registers(regs);
+ die("Die:", regs, address);
+ }
+}
+
+asmlinkage void do_illegal_instruction(struct pt_regs *regs,
+ unsigned long address)
+{
+ siginfo_t info;
+
+ if (user_mode(regs)) {
+ /* Send a SIGILL */
+ info.si_signo = SIGILL;
+ info.si_errno = 0;
+ info.si_code = ILL_ILLOPC;
+ info.si_addr = (void *)address;
+ force_sig_info(SIGBUS, &info, current);
+ } else { /* Kernel mode */
+ printk("KERNEL: Illegal instruction (SIGILL) 0x%.8lx\n",
+ address);
+ show_registers(regs);
+ die("Die:", regs, address);
+ }
+}
diff --git a/arch/openrisc/kernel/vmlinux.h b/arch/openrisc/kernel/vmlinux.h
new file mode 100644
index 000000000000..ee842a2d3f36
--- /dev/null
+++ b/arch/openrisc/kernel/vmlinux.h
@@ -0,0 +1,12 @@
+#ifndef __OPENRISC_VMLINUX_H_
+#define __OPENRISC_VMLINUX_H_
+
+extern char _stext, _etext, _edata, _end;
+#ifdef CONFIG_BLK_DEV_INITRD
+extern char __initrd_start, __initrd_end;
+extern char __initramfs_start;
+#endif
+
+extern u32 __dtb_start[];
+
+#endif
diff --git a/arch/openrisc/kernel/vmlinux.lds.S b/arch/openrisc/kernel/vmlinux.lds.S
new file mode 100644
index 000000000000..2d69a853b742
--- /dev/null
+++ b/arch/openrisc/kernel/vmlinux.lds.S
@@ -0,0 +1,115 @@
+/*
+ * OpenRISC vmlinux.lds.S
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * ld script for OpenRISC architecture
+ */
+
+/* TODO
+ * - clean up __offset & stuff
+ * - change all 8192 aligment to PAGE !!!
+ * - recheck if all aligments are really needed
+ */
+
+# define LOAD_OFFSET PAGE_OFFSET
+# define LOAD_BASE PAGE_OFFSET
+
+#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm-generic/vmlinux.lds.h>
+
+OUTPUT_FORMAT("elf32-or32", "elf32-or32", "elf32-or32")
+jiffies = jiffies_64 + 4;
+
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ . = LOAD_BASE ;
+
+ /* _s_kernel_ro must be page aligned */
+ . = ALIGN(PAGE_SIZE);
+ _s_kernel_ro = .;
+
+ .text : AT(ADDR(.text) - LOAD_OFFSET)
+ {
+ _stext = .;
+ TEXT_TEXT
+ SCHED_TEXT
+ LOCK_TEXT
+ KPROBES_TEXT
+ IRQENTRY_TEXT
+ *(.fixup)
+ *(.text.__*)
+ _etext = .;
+ }
+ /* TODO: Check if fixup and text.__* are really necessary
+ * fixup is definitely necessary
+ */
+
+ _sdata = .;
+
+ /* Page alignment required for RO_DATA_SECTION */
+ RO_DATA_SECTION(PAGE_SIZE)
+ _e_kernel_ro = .;
+
+ /* Whatever comes after _e_kernel_ro had better be page-aligend, too */
+
+ /* 32 here is cacheline size... recheck this */
+ RW_DATA_SECTION(32, PAGE_SIZE, PAGE_SIZE)
+
+ _edata = .;
+
+ EXCEPTION_TABLE(4)
+ NOTES
+
+ /* Init code and data */
+ . = ALIGN(PAGE_SIZE);
+ __init_begin = .;
+
+ HEAD_TEXT_SECTION
+
+ /* Page aligned */
+ INIT_TEXT_SECTION(PAGE_SIZE)
+
+ /* Align __setup_start on 16 byte boundary */
+ INIT_DATA_SECTION(16)
+
+ PERCPU_SECTION(L1_CACHE_BYTES)
+
+ __init_end = .;
+
+ . = ALIGN(PAGE_SIZE);
+ .initrd : AT(ADDR(.initrd) - LOAD_OFFSET)
+ {
+ __initrd_start = .;
+ *(.initrd)
+ __initrd_end = .;
+ FILL (0);
+ . = ALIGN (PAGE_SIZE);
+ }
+
+ __vmlinux_end = .; /* last address of the physical file */
+
+ BSS_SECTION(0, 0, 0x20)
+
+ _end = .;
+
+ /* Throw in the debugging sections */
+ STABS_DEBUG
+ DWARF_DEBUG
+
+ /* Sections to be discarded -- must be last */
+ DISCARDS
+}
diff --git a/arch/openrisc/lib/Makefile b/arch/openrisc/lib/Makefile
new file mode 100644
index 000000000000..966f65dbc6f0
--- /dev/null
+++ b/arch/openrisc/lib/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for or32 specific library files..
+#
+
+obj-y = string.o delay.o
diff --git a/arch/openrisc/lib/delay.c b/arch/openrisc/lib/delay.c
new file mode 100644
index 000000000000..01d9740ae6f3
--- /dev/null
+++ b/arch/openrisc/lib/delay.c
@@ -0,0 +1,60 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation
+ *
+ * Precise Delay Loops
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <asm/delay.h>
+#include <asm/timex.h>
+#include <asm/processor.h>
+
+int __devinit read_current_timer(unsigned long *timer_value)
+{
+ *timer_value = mfspr(SPR_TTCR);
+ return 0;
+}
+
+void __delay(unsigned long cycles)
+{
+ cycles_t target = get_cycles() + cycles;
+
+ while (get_cycles() < target)
+ cpu_relax();
+}
+EXPORT_SYMBOL(__delay);
+
+inline void __const_udelay(unsigned long xloops)
+{
+ unsigned long long loops;
+
+ loops = xloops * loops_per_jiffy * HZ;
+
+ __delay(loops >> 32);
+}
+EXPORT_SYMBOL(__const_udelay);
+
+void __udelay(unsigned long usecs)
+{
+ __const_udelay(usecs * 0x10C7UL); /* 2**32 / 1000000 (rounded up) */
+}
+EXPORT_SYMBOL(__udelay);
+
+void __ndelay(unsigned long nsecs)
+{
+ __const_udelay(nsecs * 0x5UL); /* 2**32 / 1000000000 (rounded up) */
+}
+EXPORT_SYMBOL(__ndelay);
diff --git a/arch/openrisc/lib/string.S b/arch/openrisc/lib/string.S
new file mode 100644
index 000000000000..465f04bc7deb
--- /dev/null
+++ b/arch/openrisc/lib/string.S
@@ -0,0 +1,204 @@
+/*
+ * OpenRISC string.S
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/linkage.h>
+#include <asm/errno.h>
+
+ /*
+ * this can be optimized by doing gcc inline assemlby with
+ * proper constraints (no need to save args registers...)
+ *
+ */
+
+
+/*
+ *
+ * int __copy_tofrom_user(void *to, const void *from, unsigned long size);
+ *
+ * NOTE: it returns number of bytes NOT copied !!!
+ *
+ */
+ .global __copy_tofrom_user
+__copy_tofrom_user:
+ l.addi r1,r1,-12
+ l.sw 0(r1),r6
+ l.sw 4(r1),r4
+ l.sw 8(r1),r3
+
+ l.addi r11,r5,0
+2: l.sfeq r11,r0
+ l.bf 1f
+ l.addi r11,r11,-1
+8: l.lbz r6,0(r4)
+9: l.sb 0(r3),r6
+ l.addi r3,r3,1
+ l.j 2b
+ l.addi r4,r4,1
+1:
+ l.addi r11,r11,1 // r11 holds the return value
+
+ l.lwz r6,0(r1)
+ l.lwz r4,4(r1)
+ l.lwz r3,8(r1)
+ l.jr r9
+ l.addi r1,r1,12
+
+ .section .fixup, "ax"
+99:
+ l.j 1b
+ l.nop
+ .previous
+
+ .section __ex_table, "a"
+ .long 8b, 99b // read fault
+ .long 9b, 99b // write fault
+ .previous
+
+/*
+ * unsigned long clear_user(void *addr, unsigned long size) ;
+ *
+ * NOTE: it returns number of bytes NOT cleared !!!
+ */
+ .global __clear_user
+__clear_user:
+ l.addi r1,r1,-8
+ l.sw 0(r1),r4
+ l.sw 4(r1),r3
+
+2: l.sfeq r4,r0
+ l.bf 1f
+ l.addi r4,r4,-1
+9: l.sb 0(r3),r0
+ l.j 2b
+ l.addi r3,r3,1
+
+1:
+ l.addi r11,r4,1
+
+ l.lwz r4,0(r1)
+ l.lwz r3,4(r1)
+ l.jr r9
+ l.addi r1,r1,8
+
+ .section .fixup, "ax"
+99:
+ l.j 1b
+ l.nop
+ .previous
+
+ .section __ex_table, "a"
+ .long 9b, 99b // write fault
+ .previous
+
+/*
+ * long strncpy_from_user(char *dst, const char *src, long count)
+ *
+ *
+ */
+ .global __strncpy_from_user
+__strncpy_from_user:
+ l.addi r1,r1,-16
+ l.sw 0(r1),r6
+ l.sw 4(r1),r5
+ l.sw 8(r1),r4
+ l.sw 12(r1),r3
+
+ l.addi r11,r5,0
+2: l.sfeq r5,r0
+ l.bf 1f
+ l.addi r5,r5,-1
+8: l.lbz r6,0(r4)
+ l.sfeq r6,r0
+ l.bf 1f
+9: l.sb 0(r3),r6
+ l.addi r3,r3,1
+ l.j 2b
+ l.addi r4,r4,1
+1:
+ l.lwz r6,0(r1)
+ l.addi r5,r5,1
+ l.sub r11,r11,r5 // r11 holds the return value
+
+ l.lwz r6,0(r1)
+ l.lwz r5,4(r1)
+ l.lwz r4,8(r1)
+ l.lwz r3,12(r1)
+ l.jr r9
+ l.addi r1,r1,16
+
+ .section .fixup, "ax"
+99:
+ l.movhi r11,hi(-EFAULT)
+ l.ori r11,r11,lo(-EFAULT)
+
+ l.lwz r6,0(r1)
+ l.lwz r5,4(r1)
+ l.lwz r4,8(r1)
+ l.lwz r3,12(r1)
+ l.jr r9
+ l.addi r1,r1,16
+ .previous
+
+ .section __ex_table, "a"
+ .long 8b, 99b // read fault
+ .previous
+
+/*
+ * extern int __strnlen_user(const char *str, long len, unsigned long top);
+ *
+ *
+ * RTRN: - length of a string including NUL termination character
+ * - on page fault 0
+ */
+
+ .global __strnlen_user
+__strnlen_user:
+ l.addi r1,r1,-8
+ l.sw 0(r1),r6
+ l.sw 4(r1),r3
+
+ l.addi r11,r0,0
+2: l.sfeq r11,r4
+ l.bf 1f
+ l.addi r11,r11,1
+8: l.lbz r6,0(r3)
+ l.sfeq r6,r0
+ l.bf 1f
+ l.sfgeu r3,r5 // are we over the top ?
+ l.bf 99f
+ l.j 2b
+ l.addi r3,r3,1
+
+1:
+ l.lwz r6,0(r1)
+ l.lwz r3,4(r1)
+ l.jr r9
+ l.addi r1,r1,8
+
+ .section .fixup, "ax"
+99:
+ l.addi r11,r0,0
+
+ l.lwz r6,0(r1)
+ l.lwz r3,4(r1)
+ l.jr r9
+ l.addi r1,r1,8
+ .previous
+
+ .section __ex_table, "a"
+ .long 8b, 99b // read fault
+ .previous
diff --git a/arch/openrisc/mm/Makefile b/arch/openrisc/mm/Makefile
new file mode 100644
index 000000000000..324ba2634529
--- /dev/null
+++ b/arch/openrisc/mm/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the linux openrisc-specific parts of the memory manager.
+#
+
+obj-y := fault.o tlb.o init.o ioremap.o
diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c
new file mode 100644
index 000000000000..a5dce82f864b
--- /dev/null
+++ b/arch/openrisc/mm/fault.c
@@ -0,0 +1,338 @@
+/*
+ * OpenRISC fault.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+
+#include <asm/uaccess.h>
+#include <asm/siginfo.h>
+#include <asm/signal.h>
+
+#define NUM_TLB_ENTRIES 64
+#define TLB_OFFSET(add) (((add) >> PAGE_SHIFT) & (NUM_TLB_ENTRIES-1))
+
+unsigned long pte_misses; /* updated by do_page_fault() */
+unsigned long pte_errors; /* updated by do_page_fault() */
+
+/* __PHX__ :: - check the vmalloc_fault in do_page_fault()
+ * - also look into include/asm-or32/mmu_context.h
+ */
+volatile pgd_t *current_pgd;
+
+extern void die(char *, struct pt_regs *, long);
+
+/*
+ * This routine handles page faults. It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+ * routines.
+ *
+ * If this routine detects a bad access, it returns 1, otherwise it
+ * returns 0.
+ */
+
+asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
+ unsigned long vector, int write_acc)
+{
+ struct task_struct *tsk;
+ struct mm_struct *mm;
+ struct vm_area_struct *vma;
+ siginfo_t info;
+ int fault;
+
+ tsk = current;
+
+ /*
+ * We fault-in kernel-space virtual memory on-demand. The
+ * 'reference' page table is init_mm.pgd.
+ *
+ * NOTE! We MUST NOT take any locks for this case. We may
+ * be in an interrupt or a critical region, and should
+ * only copy the information from the master page table,
+ * nothing more.
+ *
+ * NOTE2: This is done so that, when updating the vmalloc
+ * mappings we don't have to walk all processes pgdirs and
+ * add the high mappings all at once. Instead we do it as they
+ * are used. However vmalloc'ed page entries have the PAGE_GLOBAL
+ * bit set so sometimes the TLB can use a lingering entry.
+ *
+ * This verifies that the fault happens in kernel space
+ * and that the fault was not a protection error.
+ */
+
+ if (address >= VMALLOC_START &&
+ (vector != 0x300 && vector != 0x400) &&
+ !user_mode(regs))
+ goto vmalloc_fault;
+
+ /* If exceptions were enabled, we can reenable them here */
+ if (user_mode(regs)) {
+ /* Exception was in userspace: reenable interrupts */
+ local_irq_enable();
+ } else {
+ /* If exception was in a syscall, then IRQ's may have
+ * been enabled or disabled. If they were enabled,
+ * reenable them.
+ */
+ if (regs->sr && (SPR_SR_IEE | SPR_SR_TEE))
+ local_irq_enable();
+ }
+
+ mm = tsk->mm;
+ info.si_code = SEGV_MAPERR;
+
+ /*
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+
+ if (in_interrupt() || !mm)
+ goto no_context;
+
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, address);
+
+ if (!vma)
+ goto bad_area;
+
+ if (vma->vm_start <= address)
+ goto good_area;
+
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ goto bad_area;
+
+ if (user_mode(regs)) {
+ /*
+ * accessing the stack below usp is always a bug.
+ * we get page-aligned addresses so we can only check
+ * if we're within a page from usp, but that might be
+ * enough to catch brutal errors at least.
+ */
+ if (address + PAGE_SIZE < regs->sp)
+ goto bad_area;
+ }
+ if (expand_stack(vma, address))
+ goto bad_area;
+
+ /*
+ * Ok, we have a good vm_area for this memory access, so
+ * we can handle it..
+ */
+
+good_area:
+ info.si_code = SEGV_ACCERR;
+
+ /* first do some preliminary protection checks */
+
+ if (write_acc) {
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ } else {
+ /* not present */
+ if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+ goto bad_area;
+ }
+
+ /* are we trying to execute nonexecutable area */
+ if ((vector == 0x400) && !(vma->vm_page_prot.pgprot & _PAGE_EXEC))
+ goto bad_area;
+
+ /*
+ * If for any reason at all we couldn't handle the fault,
+ * make sure we exit gracefully rather than endlessly redo
+ * the fault.
+ */
+
+ fault = handle_mm_fault(mm, vma, address, write_acc);
+ if (unlikely(fault & VM_FAULT_ERROR)) {
+ if (fault & VM_FAULT_OOM)
+ goto out_of_memory;
+ else if (fault & VM_FAULT_SIGBUS)
+ goto do_sigbus;
+ BUG();
+ }
+ /*RGD modeled on Cris */
+ if (fault & VM_FAULT_MAJOR)
+ tsk->maj_flt++;
+ else
+ tsk->min_flt++;
+
+ up_read(&mm->mmap_sem);
+ return;
+
+ /*
+ * Something tried to access memory that isn't in our memory map..
+ * Fix it, but check if it's kernel or user first..
+ */
+
+bad_area:
+ up_read(&mm->mmap_sem);
+
+bad_area_nosemaphore:
+
+ /* User mode accesses just cause a SIGSEGV */
+
+ if (user_mode(regs)) {
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ /* info.si_code has been set above */
+ info.si_addr = (void *)address;
+ force_sig_info(SIGSEGV, &info, tsk);
+ return;
+ }
+
+no_context:
+
+ /* Are we prepared to handle this kernel fault?
+ *
+ * (The kernel has valid exception-points in the source
+ * when it acesses user-memory. When it fails in one
+ * of those points, we find it in a table and do a jump
+ * to some fixup code that loads an appropriate error
+ * code)
+ */
+
+ {
+ const struct exception_table_entry *entry;
+
+ __asm__ __volatile__("l.nop 42");
+
+ if ((entry = search_exception_tables(regs->pc)) != NULL) {
+ /* Adjust the instruction pointer in the stackframe */
+ regs->pc = entry->fixup;
+ return;
+ }
+ }
+
+ /*
+ * Oops. The kernel tried to access some bad page. We'll have to
+ * terminate things with extreme prejudice.
+ */
+
+ if ((unsigned long)(address) < PAGE_SIZE)
+ printk(KERN_ALERT
+ "Unable to handle kernel NULL pointer dereference");
+ else
+ printk(KERN_ALERT "Unable to handle kernel access");
+ printk(" at virtual address 0x%08lx\n", address);
+
+ die("Oops", regs, write_acc);
+
+ do_exit(SIGKILL);
+
+ /*
+ * We ran out of memory, or some other thing happened to us that made
+ * us unable to handle the page fault gracefully.
+ */
+
+out_of_memory:
+ __asm__ __volatile__("l.nop 42");
+ __asm__ __volatile__("l.nop 1");
+
+ up_read(&mm->mmap_sem);
+ printk("VM: killing process %s\n", tsk->comm);
+ if (user_mode(regs))
+ do_exit(SIGKILL);
+ goto no_context;
+
+do_sigbus:
+ up_read(&mm->mmap_sem);
+
+ /*
+ * Send a sigbus, regardless of whether we were in kernel
+ * or user mode.
+ */
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = BUS_ADRERR;
+ info.si_addr = (void *)address;
+ force_sig_info(SIGBUS, &info, tsk);
+
+ /* Kernel mode? Handle exceptions or die */
+ if (!user_mode(regs))
+ goto no_context;
+ return;
+
+vmalloc_fault:
+ {
+ /*
+ * Synchronize this task's top level page-table
+ * with the 'reference' page table.
+ *
+ * Use current_pgd instead of tsk->active_mm->pgd
+ * since the latter might be unavailable if this
+ * code is executed in a misfortunately run irq
+ * (like inside schedule() between switch_mm and
+ * switch_to...).
+ */
+
+ int offset = pgd_index(address);
+ pgd_t *pgd, *pgd_k;
+ pud_t *pud, *pud_k;
+ pmd_t *pmd, *pmd_k;
+ pte_t *pte_k;
+
+/*
+ phx_warn("do_page_fault(): vmalloc_fault will not work, "
+ "since current_pgd assign a proper value somewhere\n"
+ "anyhow we don't need this at the moment\n");
+
+ phx_mmu("vmalloc_fault");
+*/
+ pgd = (pgd_t *)current_pgd + offset;
+ pgd_k = init_mm.pgd + offset;
+
+ /* Since we're two-level, we don't need to do both
+ * set_pgd and set_pmd (they do the same thing). If
+ * we go three-level at some point, do the right thing
+ * with pgd_present and set_pgd here.
+ *
+ * Also, since the vmalloc area is global, we don't
+ * need to copy individual PTE's, it is enough to
+ * copy the pgd pointer into the pte page of the
+ * root task. If that is there, we'll find our pte if
+ * it exists.
+ */
+
+ pud = pud_offset(pgd, address);
+ pud_k = pud_offset(pgd_k, address);
+ if (!pud_present(*pud_k))
+ goto no_context;
+
+ pmd = pmd_offset(pud, address);
+ pmd_k = pmd_offset(pud_k, address);
+
+ if (!pmd_present(*pmd_k))
+ goto bad_area_nosemaphore;
+
+ set_pmd(pmd, *pmd_k);
+
+ /* Make sure the actual PTE exists as well to
+ * catch kernel vmalloc-area accesses to non-mapped
+ * addresses. If we don't do this, this will just
+ * silently loop forever.
+ */
+
+ pte_k = pte_offset_kernel(pmd_k, address);
+ if (!pte_present(*pte_k))
+ goto no_context;
+
+ return;
+ }
+}
diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
new file mode 100644
index 000000000000..359dcb20fe85
--- /dev/null
+++ b/arch/openrisc/mm/init.c
@@ -0,0 +1,283 @@
+/*
+ * OpenRISC idle.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/smp.h>
+#include <linux/bootmem.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/blkdev.h> /* for initrd_* */
+#include <linux/pagemap.h>
+#include <linux/memblock.h>
+
+#include <asm/system.h>
+#include <asm/segment.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/dma.h>
+#include <asm/io.h>
+#include <asm/tlb.h>
+#include <asm/mmu_context.h>
+#include <asm/kmap_types.h>
+#include <asm/fixmap.h>
+#include <asm/tlbflush.h>
+
+int mem_init_done;
+
+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+
+static void __init zone_sizes_init(void)
+{
+ unsigned long zones_size[MAX_NR_ZONES];
+
+ /* Clear the zone sizes */
+ memset(zones_size, 0, sizeof(zones_size));
+
+ /*
+ * We use only ZONE_NORMAL
+ */
+ zones_size[ZONE_NORMAL] = max_low_pfn;
+
+ free_area_init(zones_size);
+}
+
+extern const char _s_kernel_ro[], _e_kernel_ro[];
+
+/*
+ * Map all physical memory into kernel's address space.
+ *
+ * This is explicitly coded for two-level page tables, so if you need
+ * something else then this needs to change.
+ */
+static void __init map_ram(void)
+{
+ unsigned long v, p, e;
+ pgprot_t prot;
+ pgd_t *pge;
+ pud_t *pue;
+ pmd_t *pme;
+ pte_t *pte;
+ /* These mark extents of read-only kernel pages...
+ * ...from vmlinux.lds.S
+ */
+ struct memblock_region *region;
+
+ v = PAGE_OFFSET;
+
+ for_each_memblock(memory, region) {
+ p = (u32) region->base & PAGE_MASK;
+ e = p + (u32) region->size;
+
+ v = (u32) __va(p);
+ pge = pgd_offset_k(v);
+
+ while (p < e) {
+ int j;
+ pue = pud_offset(pge, v);
+ pme = pmd_offset(pue, v);
+
+ if ((u32) pue != (u32) pge || (u32) pme != (u32) pge) {
+ panic("%s: OR1K kernel hardcoded for "
+ "two-level page tables",
+ __func__);
+ }
+
+ /* Alloc one page for holding PTE's... */
+ pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
+ set_pmd(pme, __pmd(_KERNPG_TABLE + __pa(pte)));
+
+ /* Fill the newly allocated page with PTE'S */
+ for (j = 0; p < e && j < PTRS_PER_PGD;
+ v += PAGE_SIZE, p += PAGE_SIZE, j++, pte++) {
+ if (v >= (u32) _e_kernel_ro ||
+ v < (u32) _s_kernel_ro)
+ prot = PAGE_KERNEL;
+ else
+ prot = PAGE_KERNEL_RO;
+
+ set_pte(pte, mk_pte_phys(p, prot));
+ }
+
+ pge++;
+ }
+
+ printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__,
+ region->base, region->base + region->size);
+ }
+}
+
+void __init paging_init(void)
+{
+ extern void tlb_init(void);
+
+ unsigned long end;
+ int i;
+
+ printk(KERN_INFO "Setting up paging and PTEs.\n");
+
+ /* clear out the init_mm.pgd that will contain the kernel's mappings */
+
+ for (i = 0; i < PTRS_PER_PGD; i++)
+ swapper_pg_dir[i] = __pgd(0);
+
+ /* make sure the current pgd table points to something sane
+ * (even if it is most probably not used until the next
+ * switch_mm)
+ */
+ current_pgd = init_mm.pgd;
+
+ end = (unsigned long)__va(max_low_pfn * PAGE_SIZE);
+
+ map_ram();
+
+ zone_sizes_init();
+
+ /* self modifying code ;) */
+ /* Since the old TLB miss handler has been running up until now,
+ * the kernel pages are still all RW, so we can still modify the
+ * text directly... after this change and a TLB flush, the kernel
+ * pages will become RO.
+ */
+ {
+ extern unsigned long dtlb_miss_handler;
+ extern unsigned long itlb_miss_handler;
+
+ unsigned long *dtlb_vector = __va(0x900);
+ unsigned long *itlb_vector = __va(0xa00);
+
+ printk(KERN_INFO "dtlb_miss_handler %p\n", &dtlb_miss_handler);
+ *dtlb_vector = ((unsigned long)&dtlb_miss_handler -
+ (unsigned long)dtlb_vector) >> 2;
+
+ printk(KERN_INFO "itlb_miss_handler %p\n", &itlb_miss_handler);
+ *itlb_vector = ((unsigned long)&itlb_miss_handler -
+ (unsigned long)itlb_vector) >> 2;
+ }
+
+ /* Invalidate instruction caches after code modification */
+ mtspr(SPR_ICBIR, 0x900);
+ mtspr(SPR_ICBIR, 0xa00);
+
+ /* New TLB miss handlers and kernel page tables are in now place.
+ * Make sure that page flags get updated for all pages in TLB by
+ * flushing the TLB and forcing all TLB entries to be recreated
+ * from their page table flags.
+ */
+ flush_tlb_all();
+}
+
+/* References to section boundaries */
+
+extern char _stext, _etext, _edata, __bss_start, _end;
+extern char __init_begin, __init_end;
+
+static int __init free_pages_init(void)
+{
+ int reservedpages, pfn;
+
+ /* this will put all low memory onto the freelists */
+ totalram_pages = free_all_bootmem();
+
+ reservedpages = 0;
+ for (pfn = 0; pfn < max_low_pfn; pfn++) {
+ /*
+ * Only count reserved RAM pages
+ */
+ if (PageReserved(mem_map + pfn))
+ reservedpages++;
+ }
+
+ return reservedpages;
+}
+
+static void __init set_max_mapnr_init(void)
+{
+ max_mapnr = num_physpages = max_low_pfn;
+}
+
+void __init mem_init(void)
+{
+ int codesize, reservedpages, datasize, initsize;
+
+ if (!mem_map)
+ BUG();
+
+ set_max_mapnr_init();
+
+ high_memory = (void *)__va(max_low_pfn * PAGE_SIZE);
+
+ /* clear the zero-page */
+ memset((void *)empty_zero_page, 0, PAGE_SIZE);
+
+ reservedpages = free_pages_init();
+
+ codesize = (unsigned long)&_etext - (unsigned long)&_stext;
+ datasize = (unsigned long)&_edata - (unsigned long)&_etext;
+ initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
+
+ printk(KERN_INFO
+ "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
+ (unsigned long)nr_free_pages() << (PAGE_SHIFT - 10),
+ max_mapnr << (PAGE_SHIFT - 10), codesize >> 10,
+ reservedpages << (PAGE_SHIFT - 10), datasize >> 10,
+ initsize >> 10, (unsigned long)(0 << (PAGE_SHIFT - 10))
+ );
+
+ printk("mem_init_done ...........................................\n");
+ mem_init_done = 1;
+ return;
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+ printk(KERN_INFO "Freeing initrd memory: %ldk freed\n",
+ (end - start) >> 10);
+
+ for (; start < end; start += PAGE_SIZE) {
+ ClearPageReserved(virt_to_page(start));
+ init_page_count(virt_to_page(start));
+ free_page(start);
+ totalram_pages++;
+ }
+}
+#endif
+
+void free_initmem(void)
+{
+ unsigned long addr;
+
+ addr = (unsigned long)(&__init_begin);
+ for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
+ ClearPageReserved(virt_to_page(addr));
+ init_page_count(virt_to_page(addr));
+ free_page(addr);
+ totalram_pages++;
+ }
+ printk(KERN_INFO "Freeing unused kernel memory: %luk freed\n",
+ ((unsigned long)&__init_end -
+ (unsigned long)&__init_begin) >> 10);
+}
diff --git a/arch/openrisc/mm/ioremap.c b/arch/openrisc/mm/ioremap.c
new file mode 100644
index 000000000000..62b08ef392be
--- /dev/null
+++ b/arch/openrisc/mm/ioremap.c
@@ -0,0 +1,137 @@
+/*
+ * OpenRISC ioremap.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/io.h>
+#include <asm/pgalloc.h>
+#include <asm/kmap_types.h>
+#include <asm/fixmap.h>
+#include <asm/bug.h>
+#include <asm/pgtable.h>
+#include <linux/sched.h>
+#include <asm/tlbflush.h>
+
+extern int mem_init_done;
+
+static unsigned int fixmaps_used __initdata;
+
+/*
+ * Remap an arbitrary physical address space into the kernel virtual
+ * address space. Needed when the kernel wants to access high addresses
+ * directly.
+ *
+ * NOTE! We need to allow non-page-aligned mappings too: we will obviously
+ * have to convert them into an offset in a page-aligned mapping, but the
+ * caller shouldn't need to know that small detail.
+ */
+void __iomem *__init_refok
+__ioremap(phys_addr_t addr, unsigned long size, pgprot_t prot)
+{
+ phys_addr_t p;
+ unsigned long v;
+ unsigned long offset, last_addr;
+ struct vm_struct *area = NULL;
+
+ /* Don't allow wraparound or zero size */
+ last_addr = addr + size - 1;
+ if (!size || last_addr < addr)
+ return NULL;
+
+ /*
+ * Mappings have to be page-aligned
+ */
+ offset = addr & ~PAGE_MASK;
+ p = addr & PAGE_MASK;
+ size = PAGE_ALIGN(last_addr + 1) - p;
+
+ if (likely(mem_init_done)) {
+ area = get_vm_area(size, VM_IOREMAP);
+ if (!area)
+ return NULL;
+ v = (unsigned long)area->addr;
+ } else {
+ if ((fixmaps_used + (size >> PAGE_SHIFT)) > FIX_N_IOREMAPS)
+ return NULL;
+ v = fix_to_virt(FIX_IOREMAP_BEGIN + fixmaps_used);
+ fixmaps_used += (size >> PAGE_SHIFT);
+ }
+
+ if (ioremap_page_range(v, v + size, p, prot)) {
+ if (likely(mem_init_done))
+ vfree(area->addr);
+ else
+ fixmaps_used -= (size >> PAGE_SHIFT);
+ return NULL;
+ }
+
+ return (void __iomem *)(offset + (char *)v);
+}
+
+void iounmap(void *addr)
+{
+ /* If the page is from the fixmap pool then we just clear out
+ * the fixmap mapping.
+ */
+ if (unlikely((unsigned long)addr > FIXADDR_START)) {
+ /* This is a bit broken... we don't really know
+ * how big the area is so it's difficult to know
+ * how many fixed pages to invalidate...
+ * just flush tlb and hope for the best...
+ * consider this a FIXME
+ *
+ * Really we should be clearing out one or more page
+ * table entries for these virtual addresses so that
+ * future references cause a page fault... for now, we
+ * rely on two things:
+ * i) this code never gets called on known boards
+ * ii) invalid accesses to the freed areas aren't made
+ */
+ flush_tlb_all();
+ return;
+ }
+
+ return vfree((void *)(PAGE_MASK & (unsigned long)addr));
+}
+
+/**
+ * OK, this one's a bit tricky... ioremap can get called before memory is
+ * initialized (early serial console does this) and will want to alloc a page
+ * for its mapping. No userspace pages will ever get allocated before memory
+ * is initialized so this applies only to kernel pages. In the event that
+ * this is called before memory is initialized we allocate the page using
+ * the memblock infrastructure.
+ */
+
+pte_t __init_refok *pte_alloc_one_kernel(struct mm_struct *mm,
+ unsigned long address)
+{
+ pte_t *pte;
+
+ if (likely(mem_init_done)) {
+ pte = (pte_t *) __get_free_page(GFP_KERNEL | __GFP_REPEAT);
+ } else {
+ pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
+#if 0
+ /* FIXME: use memblock... */
+ pte = (pte_t *) __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
+#endif
+ }
+
+ if (pte)
+ clear_page(pte);
+ return pte;
+}
diff --git a/arch/openrisc/mm/tlb.c b/arch/openrisc/mm/tlb.c
new file mode 100644
index 000000000000..56b0b89624af
--- /dev/null
+++ b/arch/openrisc/mm/tlb.c
@@ -0,0 +1,193 @@
+/*
+ * OpenRISC tlb.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Julius Baxter <julius.baxter@orsoc.se>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+
+#include <asm/system.h>
+#include <asm/segment.h>
+#include <asm/tlbflush.h>
+#include <asm/pgtable.h>
+#include <asm/mmu_context.h>
+#include <asm/spr_defs.h>
+
+#define NO_CONTEXT -1
+
+#define NUM_DTLB_SETS (1 << ((mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTS) >> \
+ SPR_DMMUCFGR_NTS_OFF))
+#define NUM_ITLB_SETS (1 << ((mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTS) >> \
+ SPR_IMMUCFGR_NTS_OFF))
+#define DTLB_OFFSET(addr) (((addr) >> PAGE_SHIFT) & (NUM_DTLB_SETS-1))
+#define ITLB_OFFSET(addr) (((addr) >> PAGE_SHIFT) & (NUM_ITLB_SETS-1))
+/*
+ * Invalidate all TLB entries.
+ *
+ * This comes down to setting the 'valid' bit for all xTLBMR registers to 0.
+ * Easiest way to accomplish this is to just zero out the xTLBMR register
+ * completely.
+ *
+ */
+
+void flush_tlb_all(void)
+{
+ int i;
+ unsigned long num_tlb_sets;
+
+ /* Determine number of sets for IMMU. */
+ /* FIXME: Assumption is I & D nsets equal. */
+ num_tlb_sets = NUM_ITLB_SETS;
+
+ for (i = 0; i < num_tlb_sets; i++) {
+ mtspr_off(SPR_DTLBMR_BASE(0), i, 0);
+ mtspr_off(SPR_ITLBMR_BASE(0), i, 0);
+ }
+}
+
+#define have_dtlbeir (mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_TEIRI)
+#define have_itlbeir (mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_TEIRI)
+
+/*
+ * Invalidate a single page. This is what the xTLBEIR register is for.
+ *
+ * There's no point in checking the vma for PAGE_EXEC to determine whether it's
+ * the data or instruction TLB that should be flushed... that would take more
+ * than the few instructions that the following compiles down to!
+ *
+ * The case where we don't have the xTLBEIR register really only works for
+ * MMU's with a single way and is hard-coded that way.
+ */
+
+#define flush_dtlb_page_eir(addr) mtspr(SPR_DTLBEIR, addr)
+#define flush_dtlb_page_no_eir(addr) \
+ mtspr_off(SPR_DTLBMR_BASE(0), DTLB_OFFSET(addr), 0);
+
+#define flush_itlb_page_eir(addr) mtspr(SPR_ITLBEIR, addr)
+#define flush_itlb_page_no_eir(addr) \
+ mtspr_off(SPR_ITLBMR_BASE(0), ITLB_OFFSET(addr), 0);
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
+{
+ if (have_dtlbeir)
+ flush_dtlb_page_eir(addr);
+ else
+ flush_dtlb_page_no_eir(addr);
+
+ if (have_itlbeir)
+ flush_itlb_page_eir(addr);
+ else
+ flush_itlb_page_no_eir(addr);
+}
+
+void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ int addr;
+ bool dtlbeir;
+ bool itlbeir;
+
+ dtlbeir = have_dtlbeir;
+ itlbeir = have_itlbeir;
+
+ for (addr = start; addr < end; addr += PAGE_SIZE) {
+ if (dtlbeir)
+ flush_dtlb_page_eir(addr);
+ else
+ flush_dtlb_page_no_eir(addr);
+
+ if (itlbeir)
+ flush_itlb_page_eir(addr);
+ else
+ flush_itlb_page_no_eir(addr);
+ }
+}
+
+/*
+ * Invalidate the selected mm context only.
+ *
+ * FIXME: Due to some bug here, we're flushing everything for now.
+ * This should be changed to loop over over mm and call flush_tlb_range.
+ */
+
+void flush_tlb_mm(struct mm_struct *mm)
+{
+
+ /* Was seeing bugs with the mm struct passed to us. Scrapped most of
+ this function. */
+ /* Several architctures do this */
+ flush_tlb_all();
+}
+
+/* called in schedule() just before actually doing the switch_to */
+
+void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *next_tsk)
+{
+ /* remember the pgd for the fault handlers
+ * this is similar to the pgd register in some other CPU's.
+ * we need our own copy of it because current and active_mm
+ * might be invalid at points where we still need to derefer
+ * the pgd.
+ */
+ current_pgd = next->pgd;
+
+ /* We don't have context support implemented, so flush all
+ * entries belonging to previous map
+ */
+
+ if (prev != next)
+ flush_tlb_mm(prev);
+
+}
+
+/*
+ * Initialize the context related info for a new mm_struct
+ * instance.
+ */
+
+int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+ mm->context = NO_CONTEXT;
+ return 0;
+}
+
+/* called by __exit_mm to destroy the used MMU context if any before
+ * destroying the mm itself. this is only called when the last user of the mm
+ * drops it.
+ */
+
+void destroy_context(struct mm_struct *mm)
+{
+ flush_tlb_mm(mm);
+
+}
+
+/* called once during VM initialization, from init.c */
+
+void __init tlb_init(void)
+{
+ /* Do nothing... */
+ /* invalidate the entire TLB */
+ /* flush_tlb_all(); */
+}
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
index cedbbb8b18d9..5e34ccf39a49 100644
--- a/arch/parisc/kernel/module.c
+++ b/arch/parisc/kernel/module.c
@@ -540,18 +540,6 @@ static Elf_Addr get_stub(struct module *me, unsigned long value, long addend,
return (Elf_Addr)stub;
}
-int apply_relocate(Elf_Shdr *sechdrs,
- const char *strtab,
- unsigned int symindex,
- unsigned int relsec,
- struct module *me)
-{
- /* parisc should not need this ... */
- printk(KERN_ERR "module %s: RELOCATION unsupported\n",
- me->name);
- return -ENOEXEC;
-}
-
#ifndef CONFIG_64BIT
int apply_relocate_add(Elf_Shdr *sechdrs,
const char *strtab,
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index c0d842cfd012..e30442c539ce 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -179,8 +179,9 @@ extern const char *powerpc_base_platform;
#define LONG_ASM_CONST(x) 0
#endif
-
-#define CPU_FTR_HVMODE_206 LONG_ASM_CONST(0x0000000800000000)
+#define CPU_FTR_HVMODE LONG_ASM_CONST(0x0000000200000000)
+#define CPU_FTR_ARCH_201 LONG_ASM_CONST(0x0000000400000000)
+#define CPU_FTR_ARCH_206 LONG_ASM_CONST(0x0000000800000000)
#define CPU_FTR_CFAR LONG_ASM_CONST(0x0000001000000000)
#define CPU_FTR_IABR LONG_ASM_CONST(0x0000002000000000)
#define CPU_FTR_MMCRA LONG_ASM_CONST(0x0000004000000000)
@@ -401,9 +402,10 @@ extern const char *powerpc_base_platform;
CPU_FTR_MMCRA | CPU_FTR_CP_USE_DCBTZ | \
CPU_FTR_STCX_CHECKS_ADDRESS)
#define CPU_FTRS_PPC970 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
- CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_201 | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \
- CPU_FTR_CP_USE_DCBTZ | CPU_FTR_STCX_CHECKS_ADDRESS)
+ CPU_FTR_CP_USE_DCBTZ | CPU_FTR_STCX_CHECKS_ADDRESS | \
+ CPU_FTR_HVMODE)
#define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_MMCRA | CPU_FTR_SMT | \
@@ -417,13 +419,13 @@ extern const char *powerpc_base_platform;
CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_CFAR)
#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
- CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_HVMODE_206 |\
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | \
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
- CPU_FTR_ICSWX | CPU_FTR_CFAR)
+ CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE)
#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index f5dfe3411f64..8057f4f6980f 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -61,19 +61,22 @@
#define EXC_HV H
#define EXC_STD
-#define EXCEPTION_PROLOG_1(area) \
+#define __EXCEPTION_PROLOG_1(area, extra, vec) \
GET_PACA(r13); \
std r9,area+EX_R9(r13); /* save r9 - r12 */ \
std r10,area+EX_R10(r13); \
- std r11,area+EX_R11(r13); \
- std r12,area+EX_R12(r13); \
BEGIN_FTR_SECTION_NESTED(66); \
mfspr r10,SPRN_CFAR; \
std r10,area+EX_CFAR(r13); \
END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \
- GET_SCRATCH0(r9); \
- std r9,area+EX_R13(r13); \
- mfcr r9
+ mfcr r9; \
+ extra(vec); \
+ std r11,area+EX_R11(r13); \
+ std r12,area+EX_R12(r13); \
+ GET_SCRATCH0(r10); \
+ std r10,area+EX_R13(r13)
+#define EXCEPTION_PROLOG_1(area, extra, vec) \
+ __EXCEPTION_PROLOG_1(area, extra, vec)
#define __EXCEPTION_PROLOG_PSERIES_1(label, h) \
ld r12,PACAKBASE(r13); /* get high part of &label */ \
@@ -85,13 +88,65 @@
mtspr SPRN_##h##SRR1,r10; \
h##rfid; \
b . /* prevent speculative execution */
-#define EXCEPTION_PROLOG_PSERIES_1(label, h) \
+#define EXCEPTION_PROLOG_PSERIES_1(label, h) \
__EXCEPTION_PROLOG_PSERIES_1(label, h)
-#define EXCEPTION_PROLOG_PSERIES(area, label, h) \
- EXCEPTION_PROLOG_1(area); \
+#define EXCEPTION_PROLOG_PSERIES(area, label, h, extra, vec) \
+ EXCEPTION_PROLOG_1(area, extra, vec); \
EXCEPTION_PROLOG_PSERIES_1(label, h);
+#define __KVMTEST(n) \
+ lbz r10,HSTATE_IN_GUEST(r13); \
+ cmpwi r10,0; \
+ bne do_kvm_##n
+
+#define __KVM_HANDLER(area, h, n) \
+do_kvm_##n: \
+ ld r10,area+EX_R10(r13); \
+ stw r9,HSTATE_SCRATCH1(r13); \
+ ld r9,area+EX_R9(r13); \
+ std r12,HSTATE_SCRATCH0(r13); \
+ li r12,n; \
+ b kvmppc_interrupt
+
+#define __KVM_HANDLER_SKIP(area, h, n) \
+do_kvm_##n: \
+ cmpwi r10,KVM_GUEST_MODE_SKIP; \
+ ld r10,area+EX_R10(r13); \
+ beq 89f; \
+ stw r9,HSTATE_SCRATCH1(r13); \
+ ld r9,area+EX_R9(r13); \
+ std r12,HSTATE_SCRATCH0(r13); \
+ li r12,n; \
+ b kvmppc_interrupt; \
+89: mtocrf 0x80,r9; \
+ ld r9,area+EX_R9(r13); \
+ b kvmppc_skip_##h##interrupt
+
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+#define KVMTEST(n) __KVMTEST(n)
+#define KVM_HANDLER(area, h, n) __KVM_HANDLER(area, h, n)
+#define KVM_HANDLER_SKIP(area, h, n) __KVM_HANDLER_SKIP(area, h, n)
+
+#else
+#define KVMTEST(n)
+#define KVM_HANDLER(area, h, n)
+#define KVM_HANDLER_SKIP(area, h, n)
+#endif
+
+#ifdef CONFIG_KVM_BOOK3S_PR
+#define KVMTEST_PR(n) __KVMTEST(n)
+#define KVM_HANDLER_PR(area, h, n) __KVM_HANDLER(area, h, n)
+#define KVM_HANDLER_PR_SKIP(area, h, n) __KVM_HANDLER_SKIP(area, h, n)
+
+#else
+#define KVMTEST_PR(n)
+#define KVM_HANDLER_PR(area, h, n)
+#define KVM_HANDLER_PR_SKIP(area, h, n)
+#endif
+
+#define NOTEST(n)
+
/*
* The common exception prolog is used for all except a few exceptions
* such as a segment miss on a kernel address. We have to be prepared
@@ -164,57 +219,58 @@
.globl label##_pSeries; \
label##_pSeries: \
HMT_MEDIUM; \
- DO_KVM vec; \
SET_SCRATCH0(r13); /* save r13 */ \
- EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, EXC_STD)
+ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
+ EXC_STD, KVMTEST_PR, vec)
#define STD_EXCEPTION_HV(loc, vec, label) \
. = loc; \
.globl label##_hv; \
label##_hv: \
HMT_MEDIUM; \
- DO_KVM vec; \
- SET_SCRATCH0(r13); /* save r13 */ \
- EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, EXC_HV)
+ SET_SCRATCH0(r13); /* save r13 */ \
+ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
+ EXC_HV, KVMTEST, vec)
-#define __MASKABLE_EXCEPTION_PSERIES(vec, label, h) \
- HMT_MEDIUM; \
- DO_KVM vec; \
- SET_SCRATCH0(r13); /* save r13 */ \
- GET_PACA(r13); \
- std r9,PACA_EXGEN+EX_R9(r13); /* save r9, r10 */ \
- std r10,PACA_EXGEN+EX_R10(r13); \
+#define __SOFTEN_TEST(h) \
lbz r10,PACASOFTIRQEN(r13); \
- mfcr r9; \
cmpwi r10,0; \
- beq masked_##h##interrupt; \
- GET_SCRATCH0(r10); \
- std r10,PACA_EXGEN+EX_R13(r13); \
- std r11,PACA_EXGEN+EX_R11(r13); \
- std r12,PACA_EXGEN+EX_R12(r13); \
- ld r12,PACAKBASE(r13); /* get high part of &label */ \
- ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \
- mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
- LOAD_HANDLER(r12,label##_common) \
- mtspr SPRN_##h##SRR0,r12; \
- mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
- mtspr SPRN_##h##SRR1,r10; \
- h##rfid; \
- b . /* prevent speculative execution */
-#define _MASKABLE_EXCEPTION_PSERIES(vec, label, h) \
- __MASKABLE_EXCEPTION_PSERIES(vec, label, h)
+ beq masked_##h##interrupt
+#define _SOFTEN_TEST(h) __SOFTEN_TEST(h)
+
+#define SOFTEN_TEST_PR(vec) \
+ KVMTEST_PR(vec); \
+ _SOFTEN_TEST(EXC_STD)
+
+#define SOFTEN_TEST_HV(vec) \
+ KVMTEST(vec); \
+ _SOFTEN_TEST(EXC_HV)
+
+#define SOFTEN_TEST_HV_201(vec) \
+ KVMTEST(vec); \
+ _SOFTEN_TEST(EXC_STD)
+
+#define __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) \
+ HMT_MEDIUM; \
+ SET_SCRATCH0(r13); /* save r13 */ \
+ __EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec); \
+ EXCEPTION_PROLOG_PSERIES_1(label##_common, h);
+#define _MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) \
+ __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra)
#define MASKABLE_EXCEPTION_PSERIES(loc, vec, label) \
. = loc; \
.globl label##_pSeries; \
label##_pSeries: \
- _MASKABLE_EXCEPTION_PSERIES(vec, label, EXC_STD)
+ _MASKABLE_EXCEPTION_PSERIES(vec, label, \
+ EXC_STD, SOFTEN_TEST_PR)
#define MASKABLE_EXCEPTION_HV(loc, vec, label) \
. = loc; \
.globl label##_hv; \
label##_hv: \
- _MASKABLE_EXCEPTION_PSERIES(vec, label, EXC_HV)
+ _MASKABLE_EXCEPTION_PSERIES(vec, label, \
+ EXC_HV, SOFTEN_TEST_HV)
#ifdef CONFIG_PPC_ISERIES
#define DISABLE_INTS \
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index fd8201dddd4b..1c324ff55ea8 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -29,6 +29,10 @@
#define H_LONG_BUSY_ORDER_100_SEC 9905 /* Long busy, hint that 100sec \
is a good time to retry */
#define H_LONG_BUSY_END_RANGE 9905 /* End of long busy range */
+
+/* Internal value used in book3s_hv kvm support; not returned to guests */
+#define H_TOO_HARD 9999
+
#define H_HARDWARE -1 /* Hardware error */
#define H_FUNCTION -2 /* Function not supported */
#define H_PRIVILEGE -3 /* Caller not privileged */
@@ -100,6 +104,7 @@
#define H_PAGE_SET_ACTIVE H_PAGE_STATE_CHANGE
#define H_AVPN (1UL<<(63-32)) /* An avpn is provided as a sanity test */
#define H_ANDCOND (1UL<<(63-33))
+#define H_LOCAL (1UL<<(63-35))
#define H_ICACHE_INVALIDATE (1UL<<(63-40)) /* icbi, etc. (ignored for IO pages) */
#define H_ICACHE_SYNCHRONIZE (1UL<<(63-41)) /* dcbst, icbi, etc (ignored for IO pages */
#define H_COALESCE_CAND (1UL<<(63-42)) /* page is a good candidate for coalescing */
diff --git a/arch/powerpc/include/asm/kvm.h b/arch/powerpc/include/asm/kvm.h
index d2ca5ed3877b..a4f6c85431f8 100644
--- a/arch/powerpc/include/asm/kvm.h
+++ b/arch/powerpc/include/asm/kvm.h
@@ -22,6 +22,10 @@
#include <linux/types.h>
+/* Select powerpc specific features in <linux/kvm.h> */
+#define __KVM_HAVE_SPAPR_TCE
+#define __KVM_HAVE_PPC_SMT
+
struct kvm_regs {
__u64 pc;
__u64 cr;
@@ -272,4 +276,15 @@ struct kvm_guest_debug_arch {
#define KVM_INTERRUPT_UNSET -2U
#define KVM_INTERRUPT_SET_LEVEL -3U
+/* for KVM_CAP_SPAPR_TCE */
+struct kvm_create_spapr_tce {
+ __u64 liobn;
+ __u32 window_size;
+};
+
+/* for KVM_ALLOCATE_RMA */
+struct kvm_allocate_rma {
+ __u64 rma_size;
+};
+
#endif /* __LINUX_KVM_POWERPC_H */
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index 0951b17f4eb5..7b1f0e0fc653 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -64,8 +64,12 @@
#define BOOK3S_INTERRUPT_PROGRAM 0x700
#define BOOK3S_INTERRUPT_FP_UNAVAIL 0x800
#define BOOK3S_INTERRUPT_DECREMENTER 0x900
+#define BOOK3S_INTERRUPT_HV_DECREMENTER 0x980
#define BOOK3S_INTERRUPT_SYSCALL 0xc00
#define BOOK3S_INTERRUPT_TRACE 0xd00
+#define BOOK3S_INTERRUPT_H_DATA_STORAGE 0xe00
+#define BOOK3S_INTERRUPT_H_INST_STORAGE 0xe20
+#define BOOK3S_INTERRUPT_H_EMUL_ASSIST 0xe40
#define BOOK3S_INTERRUPT_PERFMON 0xf00
#define BOOK3S_INTERRUPT_ALTIVEC 0xf20
#define BOOK3S_INTERRUPT_VSX 0xf40
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index d62e703f1214..98da010252a3 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -24,20 +24,6 @@
#include <linux/kvm_host.h>
#include <asm/kvm_book3s_asm.h>
-struct kvmppc_slb {
- u64 esid;
- u64 vsid;
- u64 orige;
- u64 origv;
- bool valid : 1;
- bool Ks : 1;
- bool Kp : 1;
- bool nx : 1;
- bool large : 1; /* PTEs are 16MB */
- bool tb : 1; /* 1TB segment */
- bool class : 1;
-};
-
struct kvmppc_bat {
u64 raw;
u32 bepi;
@@ -67,11 +53,22 @@ struct kvmppc_sid_map {
#define VSID_POOL_SIZE (SID_CONTEXTS * 16)
#endif
+struct hpte_cache {
+ struct hlist_node list_pte;
+ struct hlist_node list_pte_long;
+ struct hlist_node list_vpte;
+ struct hlist_node list_vpte_long;
+ struct rcu_head rcu_head;
+ u64 host_va;
+ u64 pfn;
+ ulong slot;
+ struct kvmppc_pte pte;
+};
+
struct kvmppc_vcpu_book3s {
struct kvm_vcpu vcpu;
struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
struct kvmppc_sid_map sid_map[SID_MAP_NUM];
- struct kvmppc_slb slb[64];
struct {
u64 esid;
u64 vsid;
@@ -81,7 +78,6 @@ struct kvmppc_vcpu_book3s {
struct kvmppc_bat dbat[8];
u64 hid[6];
u64 gqr[8];
- int slb_nr;
u64 sdr1;
u64 hior;
u64 msr_mask;
@@ -93,7 +89,13 @@ struct kvmppc_vcpu_book3s {
u64 vsid_max;
#endif
int context_id[SID_CONTEXTS];
- ulong prog_flags; /* flags to inject when giving a 700 trap */
+
+ struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE];
+ struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
+ struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
+ struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG];
+ int hpte_cache_count;
+ spinlock_t mmu_lock;
};
#define CONTEXT_HOST 0
@@ -110,8 +112,10 @@ extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask)
extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);
extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end);
extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
+extern void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr);
extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
+extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu);
extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
@@ -123,19 +127,22 @@ extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu);
extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
extern int kvmppc_mmu_hpte_sysinit(void);
extern void kvmppc_mmu_hpte_sysexit(void);
+extern int kvmppc_mmu_hv_init(void);
extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec);
+extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags);
extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
bool upper, u32 val);
extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
-extern ulong kvmppc_trampoline_lowmem;
-extern ulong kvmppc_trampoline_enter;
+extern void kvmppc_handler_lowmem_trampoline(void);
+extern void kvmppc_handler_trampoline_enter(void);
extern void kvmppc_rmcall(ulong srr0, ulong srr1);
+extern void kvmppc_hv_entry_trampoline(void);
extern void kvmppc_load_up_fpu(void);
extern void kvmppc_load_up_altivec(void);
extern void kvmppc_load_up_vsx(void);
@@ -147,15 +154,32 @@ static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
return container_of(vcpu, struct kvmppc_vcpu_book3s, vcpu);
}
-static inline ulong dsisr(void)
+extern void kvm_return_point(void);
+
+/* Also add subarch specific defines */
+
+#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
+#include <asm/kvm_book3s_32.h>
+#endif
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+#include <asm/kvm_book3s_64.h>
+#endif
+
+#ifdef CONFIG_KVM_BOOK3S_PR
+
+static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
{
- ulong r;
- asm ( "mfdsisr %0 " : "=r" (r) );
- return r;
+ return to_book3s(vcpu)->hior;
}
-extern void kvm_return_point(void);
-static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu);
+static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
+ unsigned long pending_now, unsigned long old_pending)
+{
+ if (pending_now)
+ vcpu->arch.shared->int_pending = 1;
+ else if (old_pending)
+ vcpu->arch.shared->int_pending = 0;
+}
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
{
@@ -244,6 +268,120 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
return to_svcpu(vcpu)->fault_dar;
}
+static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
+{
+ ulong crit_raw = vcpu->arch.shared->critical;
+ ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
+ bool crit;
+
+ /* Truncate crit indicators in 32 bit mode */
+ if (!(vcpu->arch.shared->msr & MSR_SF)) {
+ crit_raw &= 0xffffffff;
+ crit_r1 &= 0xffffffff;
+ }
+
+ /* Critical section when crit == r1 */
+ crit = (crit_raw == crit_r1);
+ /* ... and we're in supervisor mode */
+ crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
+
+ return crit;
+}
+#else /* CONFIG_KVM_BOOK3S_PR */
+
+static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
+static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
+ unsigned long pending_now, unsigned long old_pending)
+{
+}
+
+static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
+{
+ vcpu->arch.gpr[num] = val;
+}
+
+static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
+{
+ return vcpu->arch.gpr[num];
+}
+
+static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
+{
+ vcpu->arch.cr = val;
+}
+
+static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.cr;
+}
+
+static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
+{
+ vcpu->arch.xer = val;
+}
+
+static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.xer;
+}
+
+static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
+{
+ vcpu->arch.ctr = val;
+}
+
+static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.ctr;
+}
+
+static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
+{
+ vcpu->arch.lr = val;
+}
+
+static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.lr;
+}
+
+static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
+{
+ vcpu->arch.pc = val;
+}
+
+static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.pc;
+}
+
+static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
+{
+ ulong pc = kvmppc_get_pc(vcpu);
+
+ /* Load the instruction manually if it failed to do so in the
+ * exit path */
+ if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
+ kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
+
+ return vcpu->arch.last_inst;
+}
+
+static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.fault_dar;
+}
+
+static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
+{
+ return false;
+}
+#endif
+
/* Magic register values loaded into r3 and r4 before the 'sc' assembly
* instruction for the OSI hypercalls */
#define OSI_SC_MAGIC_R3 0x113724FA
@@ -251,12 +389,4 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
#define INS_DCBZ 0x7c0007ec
-/* Also add subarch specific defines */
-
-#ifdef CONFIG_PPC_BOOK3S_32
-#include <asm/kvm_book3s_32.h>
-#else
-#include <asm/kvm_book3s_64.h>
-#endif
-
#endif /* __ASM_KVM_BOOK3S_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index 4cadd612d575..e43fe42b9875 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -20,9 +20,13 @@
#ifndef __ASM_KVM_BOOK3S_64_H__
#define __ASM_KVM_BOOK3S_64_H__
+#ifdef CONFIG_KVM_BOOK3S_PR
static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu)
{
return &get_paca()->shadow_vcpu;
}
+#endif
+
+#define SPAPR_TCE_SHIFT 12
#endif /* __ASM_KVM_BOOK3S_64_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index d5a8a3861635..ef7b3688c3b6 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -60,6 +60,36 @@ kvmppc_resume_\intno:
#else /*__ASSEMBLY__ */
+/*
+ * This struct goes in the PACA on 64-bit processors. It is used
+ * to store host state that needs to be saved when we enter a guest
+ * and restored when we exit, but isn't specific to any particular
+ * guest or vcpu. It also has some scratch fields used by the guest
+ * exit code.
+ */
+struct kvmppc_host_state {
+ ulong host_r1;
+ ulong host_r2;
+ ulong host_msr;
+ ulong vmhandler;
+ ulong scratch0;
+ ulong scratch1;
+ u8 in_guest;
+
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+ struct kvm_vcpu *kvm_vcpu;
+ struct kvmppc_vcore *kvm_vcore;
+ unsigned long xics_phys;
+ u64 dabr;
+ u64 host_mmcr[3];
+ u32 host_pmc[8];
+ u64 host_purr;
+ u64 host_spurr;
+ u64 host_dscr;
+ u64 dec_expires;
+#endif
+};
+
struct kvmppc_book3s_shadow_vcpu {
ulong gpr[14];
u32 cr;
@@ -73,17 +103,12 @@ struct kvmppc_book3s_shadow_vcpu {
ulong shadow_srr1;
ulong fault_dar;
- ulong host_r1;
- ulong host_r2;
- ulong handler;
- ulong scratch0;
- ulong scratch1;
- ulong vmhandler;
- u8 in_guest;
-
#ifdef CONFIG_PPC_BOOK3S_32
u32 sr[16]; /* Guest SRs */
+
+ struct kvmppc_host_state hstate;
#endif
+
#ifdef CONFIG_PPC_BOOK3S_64
u8 slb_max; /* highest used guest slb entry */
struct {
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
index 9c9ba3d59b1b..a90e09188777 100644
--- a/arch/powerpc/include/asm/kvm_booke.h
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -93,4 +93,8 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
return vcpu->arch.fault_dear;
}
+static inline ulong kvmppc_get_msr(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.shared->msr;
+}
#endif /* __ASM_KVM_BOOKE_H__ */
diff --git a/arch/powerpc/include/asm/kvm_e500.h b/arch/powerpc/include/asm/kvm_e500.h
index 7a2a565f88c4..adbfca9dd100 100644
--- a/arch/powerpc/include/asm/kvm_e500.h
+++ b/arch/powerpc/include/asm/kvm_e500.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
*
* Author: Yu Liu, <yu.liu@freescale.com>
*
@@ -29,17 +29,25 @@ struct tlbe{
u32 mas7;
};
+#define E500_TLB_VALID 1
+#define E500_TLB_DIRTY 2
+
+struct tlbe_priv {
+ pfn_t pfn;
+ unsigned int flags; /* E500_TLB_* */
+};
+
+struct vcpu_id_table;
+
struct kvmppc_vcpu_e500 {
/* Unmodified copy of the guest's TLB. */
- struct tlbe *guest_tlb[E500_TLB_NUM];
- /* TLB that's actually used when the guest is running. */
- struct tlbe *shadow_tlb[E500_TLB_NUM];
- /* Pages which are referenced in the shadow TLB. */
- struct page **shadow_pages[E500_TLB_NUM];
+ struct tlbe *gtlb_arch[E500_TLB_NUM];
- unsigned int guest_tlb_size[E500_TLB_NUM];
- unsigned int shadow_tlb_size[E500_TLB_NUM];
- unsigned int guest_tlb_nv[E500_TLB_NUM];
+ /* KVM internal information associated with each guest TLB entry */
+ struct tlbe_priv *gtlb_priv[E500_TLB_NUM];
+
+ unsigned int gtlb_size[E500_TLB_NUM];
+ unsigned int gtlb_nv[E500_TLB_NUM];
u32 host_pid[E500_PID_NUM];
u32 pid[E500_PID_NUM];
@@ -53,6 +61,10 @@ struct kvmppc_vcpu_e500 {
u32 mas5;
u32 mas6;
u32 mas7;
+
+ /* vcpu id table */
+ struct vcpu_id_table *idt;
+
u32 l1csr0;
u32 l1csr1;
u32 hid0;
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 186f150b9b89..cc22b282d755 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -25,15 +25,23 @@
#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/kvm_types.h>
+#include <linux/threads.h>
+#include <linux/spinlock.h>
#include <linux/kvm_para.h>
+#include <linux/list.h>
+#include <linux/atomic.h>
#include <asm/kvm_asm.h>
+#include <asm/processor.h>
-#define KVM_MAX_VCPUS 1
+#define KVM_MAX_VCPUS NR_CPUS
+#define KVM_MAX_VCORES NR_CPUS
#define KVM_MEMORY_SLOTS 32
/* memory slots that does not exposed to userspace */
#define KVM_PRIVATE_MEM_SLOTS 4
+#ifdef CONFIG_KVM_MMIO
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+#endif
/* We don't currently support large pages. */
#define KVM_HPAGE_GFN_SHIFT(x) 0
@@ -57,6 +65,10 @@ struct kvm;
struct kvm_run;
struct kvm_vcpu;
+struct lppaca;
+struct slb_shadow;
+struct dtl;
+
struct kvm_vm_stat {
u32 remote_tlb_flush;
};
@@ -133,9 +145,74 @@ struct kvmppc_exit_timing {
};
};
+struct kvmppc_pginfo {
+ unsigned long pfn;
+ atomic_t refcnt;
+};
+
+struct kvmppc_spapr_tce_table {
+ struct list_head list;
+ struct kvm *kvm;
+ u64 liobn;
+ u32 window_size;
+ struct page *pages[0];
+};
+
+struct kvmppc_rma_info {
+ void *base_virt;
+ unsigned long base_pfn;
+ unsigned long npages;
+ struct list_head list;
+ atomic_t use_count;
+};
+
struct kvm_arch {
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+ unsigned long hpt_virt;
+ unsigned long ram_npages;
+ unsigned long ram_psize;
+ unsigned long ram_porder;
+ struct kvmppc_pginfo *ram_pginfo;
+ unsigned int lpid;
+ unsigned int host_lpid;
+ unsigned long host_lpcr;
+ unsigned long sdr1;
+ unsigned long host_sdr1;
+ int tlbie_lock;
+ int n_rma_pages;
+ unsigned long lpcr;
+ unsigned long rmor;
+ struct kvmppc_rma_info *rma;
+ struct list_head spapr_tce_tables;
+ unsigned short last_vcpu[NR_CPUS];
+ struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
+#endif /* CONFIG_KVM_BOOK3S_64_HV */
};
+/*
+ * Struct for a virtual core.
+ * Note: entry_exit_count combines an entry count in the bottom 8 bits
+ * and an exit count in the next 8 bits. This is so that we can
+ * atomically increment the entry count iff the exit count is 0
+ * without taking the lock.
+ */
+struct kvmppc_vcore {
+ int n_runnable;
+ int n_blocked;
+ int num_threads;
+ int entry_exit_count;
+ int n_woken;
+ int nap_count;
+ u16 pcpu;
+ u8 vcore_running;
+ u8 in_guest;
+ struct list_head runnable_threads;
+ spinlock_t lock;
+};
+
+#define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff)
+#define VCORE_EXIT_COUNT(vc) ((vc)->entry_exit_count >> 8)
+
struct kvmppc_pte {
ulong eaddr;
u64 vpage;
@@ -163,16 +240,18 @@ struct kvmppc_mmu {
bool (*is_dcbz32)(struct kvm_vcpu *vcpu);
};
-struct hpte_cache {
- struct hlist_node list_pte;
- struct hlist_node list_pte_long;
- struct hlist_node list_vpte;
- struct hlist_node list_vpte_long;
- struct rcu_head rcu_head;
- u64 host_va;
- u64 pfn;
- ulong slot;
- struct kvmppc_pte pte;
+struct kvmppc_slb {
+ u64 esid;
+ u64 vsid;
+ u64 orige;
+ u64 origv;
+ bool valid : 1;
+ bool Ks : 1;
+ bool Kp : 1;
+ bool nx : 1;
+ bool large : 1; /* PTEs are 16MB */
+ bool tb : 1; /* 1TB segment */
+ bool class : 1;
};
struct kvm_vcpu_arch {
@@ -187,6 +266,9 @@ struct kvm_vcpu_arch {
ulong highmem_handler;
ulong rmcall;
ulong host_paca_phys;
+ struct kvmppc_slb slb[64];
+ int slb_max; /* 1 + index of last valid entry in slb[] */
+ int slb_nr; /* total number of entries in SLB */
struct kvmppc_mmu mmu;
#endif
@@ -195,13 +277,19 @@ struct kvm_vcpu_arch {
u64 fpr[32];
u64 fpscr;
+#ifdef CONFIG_SPE
+ ulong evr[32];
+ ulong spefscr;
+ ulong host_spefscr;
+ u64 acc;
+#endif
#ifdef CONFIG_ALTIVEC
vector128 vr[32];
vector128 vscr;
#endif
#ifdef CONFIG_VSX
- u64 vsr[32];
+ u64 vsr[64];
#endif
#ifdef CONFIG_PPC_BOOK3S
@@ -209,22 +297,27 @@ struct kvm_vcpu_arch {
u32 qpr[32];
#endif
-#ifdef CONFIG_BOOKE
ulong pc;
ulong ctr;
ulong lr;
ulong xer;
u32 cr;
-#endif
#ifdef CONFIG_PPC_BOOK3S
- ulong shadow_msr;
ulong hflags;
ulong guest_owned_ext;
+ ulong purr;
+ ulong spurr;
+ ulong dscr;
+ ulong amr;
+ ulong uamor;
+ u32 ctrl;
+ ulong dabr;
#endif
u32 vrsave; /* also USPRG0 */
u32 mmucr;
+ ulong shadow_msr;
ulong sprg4;
ulong sprg5;
ulong sprg6;
@@ -249,6 +342,7 @@ struct kvm_vcpu_arch {
u32 pvr;
u32 shadow_pid;
+ u32 shadow_pid1;
u32 pid;
u32 swap_pid;
@@ -258,6 +352,9 @@ struct kvm_vcpu_arch {
u32 dbcr1;
u32 dbsr;
+ u64 mmcr[3];
+ u32 pmc[8];
+
#ifdef CONFIG_KVM_EXIT_TIMING
struct mutex exit_timing_lock;
struct kvmppc_exit_timing timing_exit;
@@ -272,8 +369,12 @@ struct kvm_vcpu_arch {
struct dentry *debugfs_exit_timing;
#endif
+#ifdef CONFIG_PPC_BOOK3S
+ ulong fault_dar;
+ u32 fault_dsisr;
+#endif
+
#ifdef CONFIG_BOOKE
- u32 last_inst;
ulong fault_dear;
ulong fault_esr;
ulong queued_dear;
@@ -288,25 +389,47 @@ struct kvm_vcpu_arch {
u8 dcr_is_write;
u8 osi_needed;
u8 osi_enabled;
+ u8 hcall_needed;
u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */
struct hrtimer dec_timer;
struct tasklet_struct tasklet;
u64 dec_jiffies;
+ u64 dec_expires;
unsigned long pending_exceptions;
+ u16 last_cpu;
+ u8 ceded;
+ u8 prodded;
+ u32 last_inst;
+
+ struct lppaca *vpa;
+ struct slb_shadow *slb_shadow;
+ struct dtl *dtl;
+ struct dtl *dtl_end;
+
+ struct kvmppc_vcore *vcore;
+ int ret;
+ int trap;
+ int state;
+ int ptid;
+ wait_queue_head_t cpu_run;
+
struct kvm_vcpu_arch_shared *shared;
unsigned long magic_page_pa; /* phys addr to map the magic page to */
unsigned long magic_page_ea; /* effect. addr to map the magic page to */
-#ifdef CONFIG_PPC_BOOK3S
- struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE];
- struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
- struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
- struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG];
- int hpte_cache_count;
- spinlock_t mmu_lock;
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+ struct kvm_vcpu_arch_shared shregs;
+
+ struct list_head run_list;
+ struct task_struct *run_task;
+ struct kvm_run *kvm_run;
#endif
};
+#define KVMPPC_VCPU_BUSY_IN_HOST 0
+#define KVMPPC_VCPU_BLOCKED 1
+#define KVMPPC_VCPU_RUNNABLE 2
+
#endif /* __POWERPC_KVM_HOST_H__ */
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 9345238edecf..d121f49d62b8 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -33,6 +33,9 @@
#else
#include <asm/kvm_booke.h>
#endif
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+#include <asm/paca.h>
+#endif
enum emulation_result {
EMULATE_DONE, /* no further processing */
@@ -42,6 +45,7 @@ enum emulation_result {
EMULATE_AGAIN, /* something went wrong. go again */
};
+extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
extern char kvmppc_handlers_start[];
extern unsigned long kvmppc_handler_len;
@@ -109,6 +113,27 @@ extern void kvmppc_booke_exit(void);
extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
+extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
+
+extern long kvmppc_alloc_hpt(struct kvm *kvm);
+extern void kvmppc_free_hpt(struct kvm *kvm);
+extern long kvmppc_prepare_vrma(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem);
+extern void kvmppc_map_vrma(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem);
+extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
+extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
+ struct kvm_create_spapr_tce *args);
+extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm,
+ struct kvm_allocate_rma *rma);
+extern struct kvmppc_rma_info *kvm_alloc_rma(void);
+extern void kvm_release_rma(struct kvmppc_rma_info *ri);
+extern int kvmppc_core_init_vm(struct kvm *kvm);
+extern void kvmppc_core_destroy_vm(struct kvm *kvm);
+extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem);
+extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem);
/*
* Cuts out inst bits with ordering according to spec.
@@ -151,4 +176,20 @@ int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
+{
+ paca[cpu].kvm_hstate.xics_phys = addr;
+}
+
+extern void kvm_rma_init(void);
+
+#else
+static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
+{}
+
+static inline void kvm_rma_init(void)
+{}
+#endif
+
#endif /* __POWERPC_KVM_PPC_H__ */
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index d865bd909c7d..b445e0af4c2b 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -90,13 +90,19 @@ extern char initial_stab[];
#define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
#define HPTE_R_TS ASM_CONST(0x4000000000000000)
+#define HPTE_R_KEY_HI ASM_CONST(0x3000000000000000)
#define HPTE_R_RPN_SHIFT 12
-#define HPTE_R_RPN ASM_CONST(0x3ffffffffffff000)
-#define HPTE_R_FLAGS ASM_CONST(0x00000000000003ff)
+#define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000)
#define HPTE_R_PP ASM_CONST(0x0000000000000003)
#define HPTE_R_N ASM_CONST(0x0000000000000004)
+#define HPTE_R_G ASM_CONST(0x0000000000000008)
+#define HPTE_R_M ASM_CONST(0x0000000000000010)
+#define HPTE_R_I ASM_CONST(0x0000000000000020)
+#define HPTE_R_W ASM_CONST(0x0000000000000040)
+#define HPTE_R_WIMG ASM_CONST(0x0000000000000078)
#define HPTE_R_C ASM_CONST(0x0000000000000080)
#define HPTE_R_R ASM_CONST(0x0000000000000100)
+#define HPTE_R_KEY_LO ASM_CONST(0x0000000000000e00)
#define HPTE_V_1TB_SEG ASM_CONST(0x4000000000000000)
#define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000)
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 74126765106a..a6da12859959 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -147,9 +147,12 @@ struct paca_struct {
struct dtl_entry *dtl_curr; /* pointer corresponding to dtl_ridx */
#ifdef CONFIG_KVM_BOOK3S_HANDLER
+#ifdef CONFIG_KVM_BOOK3S_PR
/* We use this to store guest state in */
struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
#endif
+ struct kvmppc_host_state kvm_hstate;
+#endif
};
extern struct paca_struct *paca;
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 1b422381fc16..368f72f79808 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -150,18 +150,22 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
#define REST_16VSRSU(n,b,base) REST_8VSRSU(n,b,base); REST_8VSRSU(n+8,b,base)
#define REST_32VSRSU(n,b,base) REST_16VSRSU(n,b,base); REST_16VSRSU(n+16,b,base)
-#define SAVE_EVR(n,s,base) evmergehi s,s,n; stw s,THREAD_EVR0+4*(n)(base)
-#define SAVE_2EVRS(n,s,base) SAVE_EVR(n,s,base); SAVE_EVR(n+1,s,base)
-#define SAVE_4EVRS(n,s,base) SAVE_2EVRS(n,s,base); SAVE_2EVRS(n+2,s,base)
-#define SAVE_8EVRS(n,s,base) SAVE_4EVRS(n,s,base); SAVE_4EVRS(n+4,s,base)
-#define SAVE_16EVRS(n,s,base) SAVE_8EVRS(n,s,base); SAVE_8EVRS(n+8,s,base)
-#define SAVE_32EVRS(n,s,base) SAVE_16EVRS(n,s,base); SAVE_16EVRS(n+16,s,base)
-#define REST_EVR(n,s,base) lwz s,THREAD_EVR0+4*(n)(base); evmergelo n,s,n
-#define REST_2EVRS(n,s,base) REST_EVR(n,s,base); REST_EVR(n+1,s,base)
-#define REST_4EVRS(n,s,base) REST_2EVRS(n,s,base); REST_2EVRS(n+2,s,base)
-#define REST_8EVRS(n,s,base) REST_4EVRS(n,s,base); REST_4EVRS(n+4,s,base)
-#define REST_16EVRS(n,s,base) REST_8EVRS(n,s,base); REST_8EVRS(n+8,s,base)
-#define REST_32EVRS(n,s,base) REST_16EVRS(n,s,base); REST_16EVRS(n+16,s,base)
+/*
+ * b = base register for addressing, o = base offset from register of 1st EVR
+ * n = first EVR, s = scratch
+ */
+#define SAVE_EVR(n,s,b,o) evmergehi s,s,n; stw s,o+4*(n)(b)
+#define SAVE_2EVRS(n,s,b,o) SAVE_EVR(n,s,b,o); SAVE_EVR(n+1,s,b,o)
+#define SAVE_4EVRS(n,s,b,o) SAVE_2EVRS(n,s,b,o); SAVE_2EVRS(n+2,s,b,o)
+#define SAVE_8EVRS(n,s,b,o) SAVE_4EVRS(n,s,b,o); SAVE_4EVRS(n+4,s,b,o)
+#define SAVE_16EVRS(n,s,b,o) SAVE_8EVRS(n,s,b,o); SAVE_8EVRS(n+8,s,b,o)
+#define SAVE_32EVRS(n,s,b,o) SAVE_16EVRS(n,s,b,o); SAVE_16EVRS(n+16,s,b,o)
+#define REST_EVR(n,s,b,o) lwz s,o+4*(n)(b); evmergelo n,s,n
+#define REST_2EVRS(n,s,b,o) REST_EVR(n,s,b,o); REST_EVR(n+1,s,b,o)
+#define REST_4EVRS(n,s,b,o) REST_2EVRS(n,s,b,o); REST_2EVRS(n+2,s,b,o)
+#define REST_8EVRS(n,s,b,o) REST_4EVRS(n,s,b,o); REST_4EVRS(n+4,s,b,o)
+#define REST_16EVRS(n,s,b,o) REST_8EVRS(n,s,b,o); REST_8EVRS(n+8,s,b,o)
+#define REST_32EVRS(n,s,b,o) REST_16EVRS(n,s,b,o); REST_16EVRS(n+16,s,b,o)
/* Macros to adjust thread priority for hardware multithreading */
#define HMT_VERY_LOW or 31,31,31 # very low priority
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index c5cae0dd176c..ddbe57ae8584 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -189,6 +189,9 @@
#define SPRN_CTR 0x009 /* Count Register */
#define SPRN_DSCR 0x11
#define SPRN_CFAR 0x1c /* Come From Address Register */
+#define SPRN_AMR 0x1d /* Authority Mask Register */
+#define SPRN_UAMOR 0x9d /* User Authority Mask Override Register */
+#define SPRN_AMOR 0x15d /* Authority Mask Override Register */
#define SPRN_ACOP 0x1F /* Available Coprocessor Register */
#define SPRN_CTRLF 0x088
#define SPRN_CTRLT 0x098
@@ -232,22 +235,28 @@
#define LPCR_VPM0 (1ul << (63-0))
#define LPCR_VPM1 (1ul << (63-1))
#define LPCR_ISL (1ul << (63-2))
+#define LPCR_VC_SH (63-2)
#define LPCR_DPFD_SH (63-11)
#define LPCR_VRMA_L (1ul << (63-12))
#define LPCR_VRMA_LP0 (1ul << (63-15))
#define LPCR_VRMA_LP1 (1ul << (63-16))
+#define LPCR_VRMASD_SH (63-16)
#define LPCR_RMLS 0x1C000000 /* impl dependent rmo limit sel */
+#define LPCR_RMLS_SH (63-37)
#define LPCR_ILE 0x02000000 /* !HV irqs set MSR:LE */
#define LPCR_PECE 0x00007000 /* powersave exit cause enable */
#define LPCR_PECE0 0x00004000 /* ext. exceptions can cause exit */
#define LPCR_PECE1 0x00002000 /* decrementer can cause exit */
#define LPCR_PECE2 0x00001000 /* machine check etc can cause exit */
#define LPCR_MER 0x00000800 /* Mediated External Exception */
+#define LPCR_LPES 0x0000000c
#define LPCR_LPES0 0x00000008 /* LPAR Env selector 0 */
#define LPCR_LPES1 0x00000004 /* LPAR Env selector 1 */
+#define LPCR_LPES_SH 2
#define LPCR_RMI 0x00000002 /* real mode is cache inhibit */
#define LPCR_HDICE 0x00000001 /* Hyp Decr enable (HV,PR,EE) */
#define SPRN_LPID 0x13F /* Logical Partition Identifier */
+#define LPID_RSVD 0x3ff /* Reserved LPID for partn switching */
#define SPRN_HMER 0x150 /* Hardware m? error recovery */
#define SPRN_HMEER 0x151 /* Hardware m? enable error recovery */
#define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */
@@ -298,6 +307,7 @@
#define SPRN_HASH1 0x3D2 /* Primary Hash Address Register */
#define SPRN_HASH2 0x3D3 /* Secondary Hash Address Resgister */
#define SPRN_HID0 0x3F0 /* Hardware Implementation Register 0 */
+#define HID0_HDICE_SH (63 - 23) /* 970 HDEC interrupt enable */
#define HID0_EMCP (1<<31) /* Enable Machine Check pin */
#define HID0_EBA (1<<29) /* Enable Bus Address Parity */
#define HID0_EBD (1<<28) /* Enable Bus Data Parity */
@@ -353,6 +363,13 @@
#define SPRN_IABR2 0x3FA /* 83xx */
#define SPRN_IBCR 0x135 /* 83xx Insn Breakpoint Control Reg */
#define SPRN_HID4 0x3F4 /* 970 HID4 */
+#define HID4_LPES0 (1ul << (63-0)) /* LPAR env. sel. bit 0 */
+#define HID4_RMLS2_SH (63 - 2) /* Real mode limit bottom 2 bits */
+#define HID4_LPID5_SH (63 - 6) /* partition ID bottom 4 bits */
+#define HID4_RMOR_SH (63 - 22) /* real mode offset (16 bits) */
+#define HID4_LPES1 (1 << (63-57)) /* LPAR env. sel. bit 1 */
+#define HID4_RMLS0_SH (63 - 58) /* Real mode limit top bit */
+#define HID4_LPID1_SH 0 /* partition ID top 2 bits */
#define SPRN_HID4_GEKKO 0x3F3 /* Gekko HID4 */
#define SPRN_HID5 0x3F6 /* 970 HID5 */
#define SPRN_HID6 0x3F9 /* BE HID 6 */
@@ -802,28 +819,28 @@
mfspr rX,SPRN_SPRG_PACA; \
FTR_SECTION_ELSE_NESTED(66); \
mfspr rX,SPRN_SPRG_HPACA; \
- ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE_206, 66)
+ ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE, 66)
#define SET_PACA(rX) \
BEGIN_FTR_SECTION_NESTED(66); \
mtspr SPRN_SPRG_PACA,rX; \
FTR_SECTION_ELSE_NESTED(66); \
mtspr SPRN_SPRG_HPACA,rX; \
- ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE_206, 66)
+ ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE, 66)
#define GET_SCRATCH0(rX) \
BEGIN_FTR_SECTION_NESTED(66); \
mfspr rX,SPRN_SPRG_SCRATCH0; \
FTR_SECTION_ELSE_NESTED(66); \
mfspr rX,SPRN_SPRG_HSCRATCH0; \
- ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE_206, 66)
+ ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE, 66)
#define SET_SCRATCH0(rX) \
BEGIN_FTR_SECTION_NESTED(66); \
mtspr SPRN_SPRG_SCRATCH0,rX; \
FTR_SECTION_ELSE_NESTED(66); \
mtspr SPRN_SPRG_HSCRATCH0,rX; \
- ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE_206, 66)
+ ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE, 66)
#else /* CONFIG_PPC_BOOK3S_64 */
#define GET_SCRATCH0(rX) mfspr rX,SPRN_SPRG_SCRATCH0
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 0f0ad9fa01c1..9ec0b39f9ddc 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -318,6 +318,7 @@
#define ESR_ILK 0x00100000 /* Instr. Cache Locking */
#define ESR_PUO 0x00040000 /* Unimplemented Operation exception */
#define ESR_BO 0x00020000 /* Byte Ordering */
+#define ESR_SPV 0x00000080 /* Signal Processing operation */
/* Bit definitions related to the DBCR0. */
#if defined(CONFIG_40x)
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 36e1c8a29be8..54b935f2f5de 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -128,6 +128,7 @@ int main(void)
DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page));
/* paca */
DEFINE(PACA_SIZE, sizeof(struct paca_struct));
+ DEFINE(PACA_LOCK_TOKEN, offsetof(struct paca_struct, lock_token));
DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index));
DEFINE(PACAPROCSTART, offsetof(struct paca_struct, cpu_start));
DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack));
@@ -187,7 +188,9 @@ int main(void)
DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1));
DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int));
DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int));
+ DEFINE(LPPACA_PMCINUSE, offsetof(struct lppaca, pmcregs_in_use));
DEFINE(LPPACA_DTLIDX, offsetof(struct lppaca, dtl_idx));
+ DEFINE(LPPACA_YIELDCOUNT, offsetof(struct lppaca, yield_count));
DEFINE(PACA_DTL_RIDX, offsetof(struct paca_struct, dtl_ridx));
#endif /* CONFIG_PPC_STD_MMU_64 */
DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
@@ -198,11 +201,6 @@ int main(void)
DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time));
DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time));
DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save));
-#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
- DEFINE(PACA_KVM_SVCPU, offsetof(struct paca_struct, shadow_vcpu));
- DEFINE(SVCPU_SLB, offsetof(struct kvmppc_book3s_shadow_vcpu, slb));
- DEFINE(SVCPU_SLB_MAX, offsetof(struct kvmppc_book3s_shadow_vcpu, slb_max));
-#endif
#endif /* CONFIG_PPC64 */
/* RTAS */
@@ -397,67 +395,160 @@ int main(void)
DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave));
+ DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fpr));
+ DEFINE(VCPU_FPSCR, offsetof(struct kvm_vcpu, arch.fpscr));
+#ifdef CONFIG_ALTIVEC
+ DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr));
+ DEFINE(VCPU_VSCR, offsetof(struct kvm_vcpu, arch.vscr));
+#endif
+#ifdef CONFIG_VSX
+ DEFINE(VCPU_VSRS, offsetof(struct kvm_vcpu, arch.vsr));
+#endif
+ DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
+ DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
+ DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
+ DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
+ DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+ DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.shregs.msr));
+ DEFINE(VCPU_SRR0, offsetof(struct kvm_vcpu, arch.shregs.srr0));
+ DEFINE(VCPU_SRR1, offsetof(struct kvm_vcpu, arch.shregs.srr1));
+ DEFINE(VCPU_SPRG0, offsetof(struct kvm_vcpu, arch.shregs.sprg0));
+ DEFINE(VCPU_SPRG1, offsetof(struct kvm_vcpu, arch.shregs.sprg1));
+ DEFINE(VCPU_SPRG2, offsetof(struct kvm_vcpu, arch.shregs.sprg2));
+ DEFINE(VCPU_SPRG3, offsetof(struct kvm_vcpu, arch.shregs.sprg3));
+#endif
DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4));
DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5));
DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6));
DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7));
DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid));
+ DEFINE(VCPU_SHADOW_PID1, offsetof(struct kvm_vcpu, arch.shadow_pid1));
DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared));
DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr));
+ DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr));
/* book3s */
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+ DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid));
+ DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1));
+ DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid));
+ DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr));
+ DEFINE(KVM_HOST_SDR1, offsetof(struct kvm, arch.host_sdr1));
+ DEFINE(KVM_TLBIE_LOCK, offsetof(struct kvm, arch.tlbie_lock));
+ DEFINE(KVM_ONLINE_CPUS, offsetof(struct kvm, online_vcpus.counter));
+ DEFINE(KVM_LAST_VCPU, offsetof(struct kvm, arch.last_vcpu));
+ DEFINE(KVM_LPCR, offsetof(struct kvm, arch.lpcr));
+ DEFINE(KVM_RMOR, offsetof(struct kvm, arch.rmor));
+ DEFINE(VCPU_DSISR, offsetof(struct kvm_vcpu, arch.shregs.dsisr));
+ DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
+#endif
#ifdef CONFIG_PPC_BOOK3S
+ DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
+ DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
DEFINE(VCPU_HOST_RETIP, offsetof(struct kvm_vcpu, arch.host_retip));
DEFINE(VCPU_HOST_MSR, offsetof(struct kvm_vcpu, arch.host_msr));
- DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr));
+ DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr));
+ DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr));
+ DEFINE(VCPU_DSCR, offsetof(struct kvm_vcpu, arch.dscr));
+ DEFINE(VCPU_AMR, offsetof(struct kvm_vcpu, arch.amr));
+ DEFINE(VCPU_UAMOR, offsetof(struct kvm_vcpu, arch.uamor));
+ DEFINE(VCPU_CTRL, offsetof(struct kvm_vcpu, arch.ctrl));
+ DEFINE(VCPU_DABR, offsetof(struct kvm_vcpu, arch.dabr));
DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem));
DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter));
DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler));
DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall));
DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags));
+ DEFINE(VCPU_DEC, offsetof(struct kvm_vcpu, arch.dec));
+ DEFINE(VCPU_DEC_EXPIRES, offsetof(struct kvm_vcpu, arch.dec_expires));
+ DEFINE(VCPU_PENDING_EXC, offsetof(struct kvm_vcpu, arch.pending_exceptions));
+ DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa));
+ DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr));
+ DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc));
+ DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb));
+ DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max));
+ DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr));
+ DEFINE(VCPU_LAST_CPU, offsetof(struct kvm_vcpu, arch.last_cpu));
+ DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr));
+ DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar));
+ DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
+ DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap));
+ DEFINE(VCPU_PTID, offsetof(struct kvm_vcpu, arch.ptid));
+ DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count));
+ DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count));
+ DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest));
DEFINE(VCPU_SVCPU, offsetof(struct kvmppc_vcpu_book3s, shadow_vcpu) -
offsetof(struct kvmppc_vcpu_book3s, vcpu));
- DEFINE(SVCPU_CR, offsetof(struct kvmppc_book3s_shadow_vcpu, cr));
- DEFINE(SVCPU_XER, offsetof(struct kvmppc_book3s_shadow_vcpu, xer));
- DEFINE(SVCPU_CTR, offsetof(struct kvmppc_book3s_shadow_vcpu, ctr));
- DEFINE(SVCPU_LR, offsetof(struct kvmppc_book3s_shadow_vcpu, lr));
- DEFINE(SVCPU_PC, offsetof(struct kvmppc_book3s_shadow_vcpu, pc));
- DEFINE(SVCPU_R0, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[0]));
- DEFINE(SVCPU_R1, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[1]));
- DEFINE(SVCPU_R2, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[2]));
- DEFINE(SVCPU_R3, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[3]));
- DEFINE(SVCPU_R4, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[4]));
- DEFINE(SVCPU_R5, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[5]));
- DEFINE(SVCPU_R6, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[6]));
- DEFINE(SVCPU_R7, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[7]));
- DEFINE(SVCPU_R8, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[8]));
- DEFINE(SVCPU_R9, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[9]));
- DEFINE(SVCPU_R10, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[10]));
- DEFINE(SVCPU_R11, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[11]));
- DEFINE(SVCPU_R12, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[12]));
- DEFINE(SVCPU_R13, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[13]));
- DEFINE(SVCPU_HOST_R1, offsetof(struct kvmppc_book3s_shadow_vcpu, host_r1));
- DEFINE(SVCPU_HOST_R2, offsetof(struct kvmppc_book3s_shadow_vcpu, host_r2));
- DEFINE(SVCPU_VMHANDLER, offsetof(struct kvmppc_book3s_shadow_vcpu,
- vmhandler));
- DEFINE(SVCPU_SCRATCH0, offsetof(struct kvmppc_book3s_shadow_vcpu,
- scratch0));
- DEFINE(SVCPU_SCRATCH1, offsetof(struct kvmppc_book3s_shadow_vcpu,
- scratch1));
- DEFINE(SVCPU_IN_GUEST, offsetof(struct kvmppc_book3s_shadow_vcpu,
- in_guest));
- DEFINE(SVCPU_FAULT_DSISR, offsetof(struct kvmppc_book3s_shadow_vcpu,
- fault_dsisr));
- DEFINE(SVCPU_FAULT_DAR, offsetof(struct kvmppc_book3s_shadow_vcpu,
- fault_dar));
- DEFINE(SVCPU_LAST_INST, offsetof(struct kvmppc_book3s_shadow_vcpu,
- last_inst));
- DEFINE(SVCPU_SHADOW_SRR1, offsetof(struct kvmppc_book3s_shadow_vcpu,
- shadow_srr1));
+ DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige));
+ DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv));
+ DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb));
+
+#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_KVM_BOOK3S_PR
+# define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, shadow_vcpu.f))
+#else
+# define SVCPU_FIELD(x, f)
+#endif
+# define HSTATE_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, kvm_hstate.f))
+#else /* 32-bit */
+# define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct kvmppc_book3s_shadow_vcpu, f))
+# define HSTATE_FIELD(x, f) DEFINE(x, offsetof(struct kvmppc_book3s_shadow_vcpu, hstate.f))
+#endif
+
+ SVCPU_FIELD(SVCPU_CR, cr);
+ SVCPU_FIELD(SVCPU_XER, xer);
+ SVCPU_FIELD(SVCPU_CTR, ctr);
+ SVCPU_FIELD(SVCPU_LR, lr);
+ SVCPU_FIELD(SVCPU_PC, pc);
+ SVCPU_FIELD(SVCPU_R0, gpr[0]);
+ SVCPU_FIELD(SVCPU_R1, gpr[1]);
+ SVCPU_FIELD(SVCPU_R2, gpr[2]);
+ SVCPU_FIELD(SVCPU_R3, gpr[3]);
+ SVCPU_FIELD(SVCPU_R4, gpr[4]);
+ SVCPU_FIELD(SVCPU_R5, gpr[5]);
+ SVCPU_FIELD(SVCPU_R6, gpr[6]);
+ SVCPU_FIELD(SVCPU_R7, gpr[7]);
+ SVCPU_FIELD(SVCPU_R8, gpr[8]);
+ SVCPU_FIELD(SVCPU_R9, gpr[9]);
+ SVCPU_FIELD(SVCPU_R10, gpr[10]);
+ SVCPU_FIELD(SVCPU_R11, gpr[11]);
+ SVCPU_FIELD(SVCPU_R12, gpr[12]);
+ SVCPU_FIELD(SVCPU_R13, gpr[13]);
+ SVCPU_FIELD(SVCPU_FAULT_DSISR, fault_dsisr);
+ SVCPU_FIELD(SVCPU_FAULT_DAR, fault_dar);
+ SVCPU_FIELD(SVCPU_LAST_INST, last_inst);
+ SVCPU_FIELD(SVCPU_SHADOW_SRR1, shadow_srr1);
#ifdef CONFIG_PPC_BOOK3S_32
- DEFINE(SVCPU_SR, offsetof(struct kvmppc_book3s_shadow_vcpu, sr));
+ SVCPU_FIELD(SVCPU_SR, sr);
#endif
-#else
+#ifdef CONFIG_PPC64
+ SVCPU_FIELD(SVCPU_SLB, slb);
+ SVCPU_FIELD(SVCPU_SLB_MAX, slb_max);
+#endif
+
+ HSTATE_FIELD(HSTATE_HOST_R1, host_r1);
+ HSTATE_FIELD(HSTATE_HOST_R2, host_r2);
+ HSTATE_FIELD(HSTATE_HOST_MSR, host_msr);
+ HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler);
+ HSTATE_FIELD(HSTATE_SCRATCH0, scratch0);
+ HSTATE_FIELD(HSTATE_SCRATCH1, scratch1);
+ HSTATE_FIELD(HSTATE_IN_GUEST, in_guest);
+
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+ HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu);
+ HSTATE_FIELD(HSTATE_KVM_VCORE, kvm_vcore);
+ HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys);
+ HSTATE_FIELD(HSTATE_MMCR, host_mmcr);
+ HSTATE_FIELD(HSTATE_PMC, host_pmc);
+ HSTATE_FIELD(HSTATE_PURR, host_purr);
+ HSTATE_FIELD(HSTATE_SPURR, host_spurr);
+ HSTATE_FIELD(HSTATE_DSCR, host_dscr);
+ HSTATE_FIELD(HSTATE_DABR, dabr);
+ HSTATE_FIELD(HSTATE_DECEXP, dec_expires);
+#endif /* CONFIG_KVM_BOOK3S_64_HV */
+
+#else /* CONFIG_PPC_BOOK3S */
DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
@@ -467,7 +558,7 @@ int main(void)
DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear));
DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr));
#endif /* CONFIG_PPC_BOOK3S */
-#endif
+#endif /* CONFIG_KVM */
#ifdef CONFIG_KVM_GUEST
DEFINE(KVM_MAGIC_SCRATCH1, offsetof(struct kvm_vcpu_arch_shared,
@@ -497,6 +588,13 @@ int main(void)
DEFINE(TLBCAM_MAS7, offsetof(struct tlbcam, MAS7));
#endif
+#if defined(CONFIG_KVM) && defined(CONFIG_SPE)
+ DEFINE(VCPU_EVR, offsetof(struct kvm_vcpu, arch.evr[0]));
+ DEFINE(VCPU_ACC, offsetof(struct kvm_vcpu, arch.acc));
+ DEFINE(VCPU_SPEFSCR, offsetof(struct kvm_vcpu, arch.spefscr));
+ DEFINE(VCPU_HOST_SPEFSCR, offsetof(struct kvm_vcpu, arch.host_spefscr));
+#endif
+
#ifdef CONFIG_KVM_EXIT_TIMING
DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu,
arch.timing_exit.tv32.tbu));
diff --git a/arch/powerpc/kernel/cpu_setup_power7.S b/arch/powerpc/kernel/cpu_setup_power7.S
index 4f9a93fcfe07..76797c5105d6 100644
--- a/arch/powerpc/kernel/cpu_setup_power7.S
+++ b/arch/powerpc/kernel/cpu_setup_power7.S
@@ -45,12 +45,12 @@ _GLOBAL(__restore_cpu_power7)
blr
__init_hvmode_206:
- /* Disable CPU_FTR_HVMODE_206 and exit if MSR:HV is not set */
+ /* Disable CPU_FTR_HVMODE and exit if MSR:HV is not set */
mfmsr r3
rldicl. r0,r3,4,63
bnelr
ld r5,CPU_SPEC_FEATURES(r4)
- LOAD_REG_IMMEDIATE(r6,CPU_FTR_HVMODE_206)
+ LOAD_REG_IMMEDIATE(r6,CPU_FTR_HVMODE)
xor r5,r5,r6
std r5,CPU_SPEC_FEATURES(r4)
blr
@@ -61,19 +61,23 @@ __init_LPCR:
* LPES = 0b01 (HSRR0/1 used for 0x500)
* PECE = 0b111
* DPFD = 4
+ * HDICE = 0
+ * VC = 0b100 (VPM0=1, VPM1=0, ISL=0)
+ * VRMASD = 0b10000 (L=1, LP=00)
*
* Other bits untouched for now
*/
mfspr r3,SPRN_LPCR
- ori r3,r3,(LPCR_LPES0|LPCR_LPES1)
- xori r3,r3, LPCR_LPES0
+ li r5,1
+ rldimi r3,r5, LPCR_LPES_SH, 64-LPCR_LPES_SH-2
ori r3,r3,(LPCR_PECE0|LPCR_PECE1|LPCR_PECE2)
- li r5,7
- sldi r5,r5,LPCR_DPFD_SH
- andc r3,r3,r5
li r5,4
- sldi r5,r5,LPCR_DPFD_SH
- or r3,r3,r5
+ rldimi r3,r5, LPCR_DPFD_SH, 64-LPCR_DPFD_SH-3
+ clrrdi r3,r3,1 /* clear HDICE */
+ li r5,4
+ rldimi r3,r5, LPCR_VC_SH, 0
+ li r5,0x10
+ rldimi r3,r5, LPCR_VRMASD_SH, 64-LPCR_VRMASD_SH-5
mtspr SPRN_LPCR,r3
isync
blr
diff --git a/arch/powerpc/kernel/cpu_setup_ppc970.S b/arch/powerpc/kernel/cpu_setup_ppc970.S
index 27f2507279d8..12fac8df01c5 100644
--- a/arch/powerpc/kernel/cpu_setup_ppc970.S
+++ b/arch/powerpc/kernel/cpu_setup_ppc970.S
@@ -76,7 +76,7 @@ _GLOBAL(__setup_cpu_ppc970)
/* Do nothing if not running in HV mode */
mfmsr r0
rldicl. r0,r0,4,63
- beqlr
+ beq no_hv_mode
mfspr r0,SPRN_HID0
li r11,5 /* clear DOZE and SLEEP */
@@ -90,7 +90,7 @@ _GLOBAL(__setup_cpu_ppc970MP)
/* Do nothing if not running in HV mode */
mfmsr r0
rldicl. r0,r0,4,63
- beqlr
+ beq no_hv_mode
mfspr r0,SPRN_HID0
li r11,0x15 /* clear DOZE and SLEEP */
@@ -109,6 +109,14 @@ load_hids:
sync
isync
+ /* Try to set LPES = 01 in HID4 */
+ mfspr r0,SPRN_HID4
+ clrldi r0,r0,1 /* clear LPES0 */
+ ori r0,r0,HID4_LPES1 /* set LPES1 */
+ sync
+ mtspr SPRN_HID4,r0
+ isync
+
/* Save away cpu state */
LOAD_REG_ADDR(r5,cpu_state_storage)
@@ -117,11 +125,21 @@ load_hids:
std r3,CS_HID0(r5)
mfspr r3,SPRN_HID1
std r3,CS_HID1(r5)
- mfspr r3,SPRN_HID4
- std r3,CS_HID4(r5)
+ mfspr r4,SPRN_HID4
+ std r4,CS_HID4(r5)
mfspr r3,SPRN_HID5
std r3,CS_HID5(r5)
+ /* See if we successfully set LPES1 to 1; if not we are in Apple mode */
+ andi. r4,r4,HID4_LPES1
+ bnelr
+
+no_hv_mode:
+ /* Disable CPU_FTR_HVMODE and exit, since we don't have HV mode */
+ ld r5,CPU_SPEC_FEATURES(r4)
+ LOAD_REG_IMMEDIATE(r6,CPU_FTR_HVMODE)
+ andc r5,r5,r6
+ std r5,CPU_SPEC_FEATURES(r4)
blr
/* Called with no MMU context (typically MSR:IR/DR off) to
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index a85f4874cba7..41b02c792aa3 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -40,7 +40,6 @@ __start_interrupts:
.globl system_reset_pSeries;
system_reset_pSeries:
HMT_MEDIUM;
- DO_KVM 0x100;
SET_SCRATCH0(r13)
#ifdef CONFIG_PPC_P7_NAP
BEGIN_FTR_SECTION
@@ -50,82 +49,73 @@ BEGIN_FTR_SECTION
* state loss at this time.
*/
mfspr r13,SPRN_SRR1
- rlwinm r13,r13,47-31,30,31
- cmpwi cr0,r13,1
- bne 1f
- b .power7_wakeup_noloss
-1: cmpwi cr0,r13,2
- bne 1f
- b .power7_wakeup_loss
+ rlwinm. r13,r13,47-31,30,31
+ beq 9f
+
+ /* waking up from powersave (nap) state */
+ cmpwi cr1,r13,2
/* Total loss of HV state is fatal, we could try to use the
* PIR to locate a PACA, then use an emergency stack etc...
* but for now, let's just stay stuck here
*/
-1: cmpwi cr0,r13,3
- beq .
-END_FTR_SECTION_IFSET(CPU_FTR_HVMODE_206)
+ bgt cr1,.
+ GET_PACA(r13)
+
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+ lbz r0,PACAPROCSTART(r13)
+ cmpwi r0,0x80
+ bne 1f
+ li r0,0
+ stb r0,PACAPROCSTART(r13)
+ b kvm_start_guest
+1:
+#endif
+
+ beq cr1,2f
+ b .power7_wakeup_noloss
+2: b .power7_wakeup_loss
+9:
+END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
#endif /* CONFIG_PPC_P7_NAP */
- EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD)
+ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
+ NOTEST, 0x100)
. = 0x200
-_machine_check_pSeries:
- HMT_MEDIUM
- DO_KVM 0x200
- SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common, EXC_STD)
+machine_check_pSeries_1:
+ /* This is moved out of line as it can be patched by FW, but
+ * some code path might still want to branch into the original
+ * vector
+ */
+ b machine_check_pSeries
. = 0x300
.globl data_access_pSeries
data_access_pSeries:
HMT_MEDIUM
- DO_KVM 0x300
SET_SCRATCH0(r13)
+#ifndef CONFIG_POWER4_ONLY
BEGIN_FTR_SECTION
- GET_PACA(r13)
- std r9,PACA_EXSLB+EX_R9(r13)
- std r10,PACA_EXSLB+EX_R10(r13)
- mfspr r10,SPRN_DAR
- mfspr r9,SPRN_DSISR
- srdi r10,r10,60
- rlwimi r10,r9,16,0x20
- mfcr r9
- cmpwi r10,0x2c
- beq do_stab_bolted_pSeries
- ld r10,PACA_EXSLB+EX_R10(r13)
- std r11,PACA_EXGEN+EX_R11(r13)
- ld r11,PACA_EXSLB+EX_R9(r13)
- std r12,PACA_EXGEN+EX_R12(r13)
- GET_SCRATCH0(r12)
- std r10,PACA_EXGEN+EX_R10(r13)
- std r11,PACA_EXGEN+EX_R9(r13)
- std r12,PACA_EXGEN+EX_R13(r13)
- EXCEPTION_PROLOG_PSERIES_1(data_access_common, EXC_STD)
-FTR_SECTION_ELSE
- EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD)
-ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_SLB)
+ b data_access_check_stab
+data_access_not_stab:
+END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
+#endif
+ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD,
+ KVMTEST_PR, 0x300)
. = 0x380
.globl data_access_slb_pSeries
data_access_slb_pSeries:
HMT_MEDIUM
- DO_KVM 0x380
SET_SCRATCH0(r13)
- GET_PACA(r13)
+ EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x380)
std r3,PACA_EXSLB+EX_R3(r13)
mfspr r3,SPRN_DAR
- std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
- mfcr r9
#ifdef __DISABLED__
/* Keep that around for when we re-implement dynamic VSIDs */
cmpdi r3,0
bge slb_miss_user_pseries
#endif /* __DISABLED__ */
- std r10,PACA_EXSLB+EX_R10(r13)
- std r11,PACA_EXSLB+EX_R11(r13)
- std r12,PACA_EXSLB+EX_R12(r13)
- GET_SCRATCH0(r10)
- std r10,PACA_EXSLB+EX_R13(r13)
- mfspr r12,SPRN_SRR1 /* and SRR1 */
+ mfspr r12,SPRN_SRR1
#ifndef CONFIG_RELOCATABLE
b .slb_miss_realmode
#else
@@ -147,24 +137,16 @@ data_access_slb_pSeries:
.globl instruction_access_slb_pSeries
instruction_access_slb_pSeries:
HMT_MEDIUM
- DO_KVM 0x480
SET_SCRATCH0(r13)
- GET_PACA(r13)
+ EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480)
std r3,PACA_EXSLB+EX_R3(r13)
mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
- std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
- mfcr r9
#ifdef __DISABLED__
/* Keep that around for when we re-implement dynamic VSIDs */
cmpdi r3,0
bge slb_miss_user_pseries
#endif /* __DISABLED__ */
- std r10,PACA_EXSLB+EX_R10(r13)
- std r11,PACA_EXSLB+EX_R11(r13)
- std r12,PACA_EXSLB+EX_R12(r13)
- GET_SCRATCH0(r10)
- std r10,PACA_EXSLB+EX_R13(r13)
- mfspr r12,SPRN_SRR1 /* and SRR1 */
+ mfspr r12,SPRN_SRR1
#ifndef CONFIG_RELOCATABLE
b .slb_miss_realmode
#else
@@ -184,26 +166,46 @@ instruction_access_slb_pSeries:
hardware_interrupt_pSeries:
hardware_interrupt_hv:
BEGIN_FTR_SECTION
- _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt, EXC_STD)
+ _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt,
+ EXC_HV, SOFTEN_TEST_HV)
+ KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502)
FTR_SECTION_ELSE
- _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt, EXC_HV)
- ALT_FTR_SECTION_END_IFCLR(CPU_FTR_HVMODE_206)
+ _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt,
+ EXC_STD, SOFTEN_TEST_HV_201)
+ KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500)
+ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
STD_EXCEPTION_PSERIES(0x600, 0x600, alignment)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x600)
+
STD_EXCEPTION_PSERIES(0x700, 0x700, program_check)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x700)
+
STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800)
MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer)
- MASKABLE_EXCEPTION_HV(0x980, 0x980, decrementer)
+ MASKABLE_EXCEPTION_HV(0x980, 0x982, decrementer)
STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00)
+
STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xb00)
. = 0xc00
.globl system_call_pSeries
system_call_pSeries:
HMT_MEDIUM
- DO_KVM 0xc00
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+ SET_SCRATCH0(r13)
+ GET_PACA(r13)
+ std r9,PACA_EXGEN+EX_R9(r13)
+ std r10,PACA_EXGEN+EX_R10(r13)
+ mfcr r9
+ KVMTEST(0xc00)
+ GET_SCRATCH0(r13)
+#endif
BEGIN_FTR_SECTION
cmpdi r0,0x1ebe
beq- 1f
@@ -220,6 +222,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
rfid
b . /* prevent speculative execution */
+ KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00)
+
/* Fast LE/BE switch system call */
1: mfspr r12,SPRN_SRR1
xori r12,r12,MSR_LE
@@ -228,6 +232,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
b .
STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xd00)
/* At 0xe??? we have a bunch of hypervisor exceptions, we branch
* out of line to handle them
@@ -262,30 +267,93 @@ vsx_unavailable_pSeries_1:
#ifdef CONFIG_CBE_RAS
STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
+ KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1202)
#endif /* CONFIG_CBE_RAS */
+
STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint)
+ KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300)
+
#ifdef CONFIG_CBE_RAS
STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance)
+ KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1602)
#endif /* CONFIG_CBE_RAS */
+
STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x1700)
+
#ifdef CONFIG_CBE_RAS
STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal)
+ KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1802)
#endif /* CONFIG_CBE_RAS */
. = 0x3000
/*** Out of line interrupts support ***/
+ /* moved from 0x200 */
+machine_check_pSeries:
+ .globl machine_check_fwnmi
+machine_check_fwnmi:
+ HMT_MEDIUM
+ SET_SCRATCH0(r13) /* save r13 */
+ EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common,
+ EXC_STD, KVMTEST, 0x200)
+ KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200)
+
+#ifndef CONFIG_POWER4_ONLY
+ /* moved from 0x300 */
+data_access_check_stab:
+ GET_PACA(r13)
+ std r9,PACA_EXSLB+EX_R9(r13)
+ std r10,PACA_EXSLB+EX_R10(r13)
+ mfspr r10,SPRN_DAR
+ mfspr r9,SPRN_DSISR
+ srdi r10,r10,60
+ rlwimi r10,r9,16,0x20
+#ifdef CONFIG_KVM_BOOK3S_PR
+ lbz r9,HSTATE_IN_GUEST(r13)
+ rlwimi r10,r9,8,0x300
+#endif
+ mfcr r9
+ cmpwi r10,0x2c
+ beq do_stab_bolted_pSeries
+ mtcrf 0x80,r9
+ ld r9,PACA_EXSLB+EX_R9(r13)
+ ld r10,PACA_EXSLB+EX_R10(r13)
+ b data_access_not_stab
+do_stab_bolted_pSeries:
+ std r11,PACA_EXSLB+EX_R11(r13)
+ std r12,PACA_EXSLB+EX_R12(r13)
+ GET_SCRATCH0(r10)
+ std r10,PACA_EXSLB+EX_R13(r13)
+ EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD)
+#endif /* CONFIG_POWER4_ONLY */
+
+ KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x300)
+ KVM_HANDLER_PR_SKIP(PACA_EXSLB, EXC_STD, 0x380)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400)
+ KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900)
+ KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982)
+
+ .align 7
/* moved from 0xe00 */
- STD_EXCEPTION_HV(., 0xe00, h_data_storage)
- STD_EXCEPTION_HV(., 0xe20, h_instr_storage)
- STD_EXCEPTION_HV(., 0xe40, emulation_assist)
- STD_EXCEPTION_HV(., 0xe60, hmi_exception) /* need to flush cache ? */
+ STD_EXCEPTION_HV(., 0xe02, h_data_storage)
+ KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0xe02)
+ STD_EXCEPTION_HV(., 0xe22, h_instr_storage)
+ KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe22)
+ STD_EXCEPTION_HV(., 0xe42, emulation_assist)
+ KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe42)
+ STD_EXCEPTION_HV(., 0xe62, hmi_exception) /* need to flush cache ? */
+ KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe62)
/* moved from 0xf00 */
STD_EXCEPTION_PSERIES(., 0xf00, performance_monitor)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf00)
STD_EXCEPTION_PSERIES(., 0xf20, altivec_unavailable)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20)
STD_EXCEPTION_PSERIES(., 0xf40, vsx_unavailable)
+ KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40)
/*
* An interrupt came in while soft-disabled; clear EE in SRR1,
@@ -317,14 +385,6 @@ masked_Hinterrupt:
hrfid
b .
- .align 7
-do_stab_bolted_pSeries:
- std r11,PACA_EXSLB+EX_R11(r13)
- std r12,PACA_EXSLB+EX_R12(r13)
- GET_SCRATCH0(r10)
- std r10,PACA_EXSLB+EX_R13(r13)
- EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD)
-
#ifdef CONFIG_PPC_PSERIES
/*
* Vectors for the FWNMI option. Share common code.
@@ -334,14 +394,8 @@ do_stab_bolted_pSeries:
system_reset_fwnmi:
HMT_MEDIUM
SET_SCRATCH0(r13) /* save r13 */
- EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD)
-
- .globl machine_check_fwnmi
- .align 7
-machine_check_fwnmi:
- HMT_MEDIUM
- SET_SCRATCH0(r13) /* save r13 */
- EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common, EXC_STD)
+ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
+ NOTEST, 0x100)
#endif /* CONFIG_PPC_PSERIES */
@@ -376,7 +430,11 @@ slb_miss_user_pseries:
/* KVM's trampoline code needs to be close to the interrupt handlers */
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+#ifdef CONFIG_KVM_BOOK3S_PR
#include "../kvm/book3s_rmhandlers.S"
+#else
+#include "../kvm/book3s_hv_rmhandlers.S"
+#endif
#endif
.align 7
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 5ecf54cfa7d4..fe37dd0dfd17 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -656,7 +656,7 @@ load_up_spe:
cmpi 0,r4,0
beq 1f
addi r4,r4,THREAD /* want THREAD of last_task_used_spe */
- SAVE_32EVRS(0,r10,r4)
+ SAVE_32EVRS(0,r10,r4,THREAD_EVR0)
evxor evr10, evr10, evr10 /* clear out evr10 */
evmwumiaa evr10, evr10, evr10 /* evr10 <- ACC = 0 * 0 + ACC */
li r5,THREAD_ACC
@@ -676,7 +676,7 @@ load_up_spe:
stw r4,THREAD_USED_SPE(r5)
evlddx evr4,r10,r5
evmra evr4,evr4
- REST_32EVRS(0,r10,r5)
+ REST_32EVRS(0,r10,r5,THREAD_EVR0)
#ifndef CONFIG_SMP
subi r4,r5,THREAD
stw r4,last_task_used_spe@l(r3)
@@ -787,13 +787,11 @@ _GLOBAL(giveup_spe)
addi r3,r3,THREAD /* want THREAD of task */
lwz r5,PT_REGS(r3)
cmpi 0,r5,0
- SAVE_32EVRS(0, r4, r3)
+ SAVE_32EVRS(0, r4, r3, THREAD_EVR0)
evxor evr6, evr6, evr6 /* clear out evr6 */
evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */
li r4,THREAD_ACC
evstddx evr6, r4, r3 /* save off accumulator */
- mfspr r6,SPRN_SPEFSCR
- stw r6,THREAD_SPEFSCR(r3) /* save spefscr register value */
beq 1f
lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
lis r3,MSR_SPE@h
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index f8f0bc7f1d4f..3a70845a51c7 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -73,7 +73,6 @@ _GLOBAL(power7_idle)
b .
_GLOBAL(power7_wakeup_loss)
- GET_PACA(r13)
ld r1,PACAR1(r13)
REST_NVGPRS(r1)
REST_GPR(2, r1)
@@ -87,7 +86,6 @@ _GLOBAL(power7_wakeup_loss)
rfid
_GLOBAL(power7_wakeup_noloss)
- GET_PACA(r13)
ld r1,PACAR1(r13)
ld r4,_MSR(r1)
ld r5,_NIP(r1)
diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
index 49cee9df225b..a1cd701b5753 100644
--- a/arch/powerpc/kernel/module.c
+++ b/arch/powerpc/kernel/module.c
@@ -31,20 +31,6 @@
LIST_HEAD(module_bug_list);
-void *module_alloc(unsigned long size)
-{
- if (size == 0)
- return NULL;
-
- return vmalloc_exec(size);
-}
-
-/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
-{
- vfree(module_region);
-}
-
static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
const char *name)
@@ -93,7 +79,3 @@ int module_finalize(const Elf_Ehdr *hdr,
return 0;
}
-
-void module_arch_cleanup(struct module *mod)
-{
-}
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
index f832773fc28e..0b6d79617d7b 100644
--- a/arch/powerpc/kernel/module_32.c
+++ b/arch/powerpc/kernel/module_32.c
@@ -174,17 +174,6 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
return 0;
}
-int apply_relocate(Elf32_Shdr *sechdrs,
- const char *strtab,
- unsigned int symindex,
- unsigned int relsec,
- struct module *module)
-{
- printk(KERN_ERR "%s: Non-ADD RELOCATION unsupported\n",
- module->name);
- return -ENOEXEC;
-}
-
static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val)
{
if (entry->jump[0] == 0x3d600000 + ((val + 0x8000) >> 16)
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index 8fbb12508bf3..9f44a775a106 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -243,16 +243,6 @@ int module_frob_arch_sections(Elf64_Ehdr *hdr,
return 0;
}
-int apply_relocate(Elf64_Shdr *sechdrs,
- const char *strtab,
- unsigned int symindex,
- unsigned int relsec,
- struct module *me)
-{
- printk(KERN_ERR "%s: Non-ADD RELOCATION unsupported\n", me->name);
- return -ENOEXEC;
-}
-
/* r2 is the TOC pointer: it actually points 0x8000 into the TOC (this
gives the value maximum span in an instruction which uses a signed
offset) */
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index efeb88184182..0a5a899846bb 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -167,7 +167,7 @@ void setup_paca(struct paca_struct *new_paca)
* if we do a GET_PACA() before the feature fixups have been
* applied
*/
- if (cpu_has_feature(CPU_FTR_HVMODE_206))
+ if (cpu_has_feature(CPU_FTR_HVMODE))
mtspr(SPRN_SPRG_HPACA, local_paca);
#endif
mtspr(SPRN_SPRG_PACA, local_paca);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 91e52df3d81d..ec2d0edeb134 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -96,6 +96,7 @@ void flush_fp_to_thread(struct task_struct *tsk)
preempt_enable();
}
}
+EXPORT_SYMBOL_GPL(flush_fp_to_thread);
void enable_kernel_fp(void)
{
@@ -145,6 +146,7 @@ void flush_altivec_to_thread(struct task_struct *tsk)
preempt_enable();
}
}
+EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX
@@ -186,6 +188,7 @@ void flush_vsx_to_thread(struct task_struct *tsk)
preempt_enable();
}
}
+EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
#endif /* CONFIG_VSX */
#ifdef CONFIG_SPE
@@ -213,6 +216,7 @@ void flush_spe_to_thread(struct task_struct *tsk)
#ifdef CONFIG_SMP
BUG_ON(tsk != current);
#endif
+ tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
giveup_spe(tsk);
}
preempt_enable();
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 79fca2651b65..22051ef04bd9 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -375,6 +375,9 @@ void __init check_for_initrd(void)
int threads_per_core, threads_shift;
cpumask_t threads_core_mask;
+EXPORT_SYMBOL_GPL(threads_per_core);
+EXPORT_SYMBOL_GPL(threads_shift);
+EXPORT_SYMBOL_GPL(threads_core_mask);
static void __init cpu_init_thread_core_maps(int tpc)
{
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index a88bf2713d41..532054f24ecb 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -63,6 +63,7 @@
#include <asm/kexec.h>
#include <asm/mmu_context.h>
#include <asm/code-patching.h>
+#include <asm/kvm_ppc.h>
#include "setup.h"
@@ -580,6 +581,8 @@ void __init setup_arch(char **cmdline_p)
/* Initialize the MMU context management stuff */
mmu_context_init();
+ kvm_rma_init();
+
ppc64_boot_msg(0x15, "Setup Done");
}
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 8ebc6700b98d..09a85a9045d6 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -243,6 +243,7 @@ void smp_send_reschedule(int cpu)
if (likely(smp_ops))
smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE);
}
+EXPORT_SYMBOL_GPL(smp_send_reschedule);
void arch_send_call_function_single_ipi(int cpu)
{
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 1a0141426cda..f19d9777d3c1 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -1387,10 +1387,7 @@ void SPEFloatingPointException(struct pt_regs *regs)
int code = 0;
int err;
- preempt_disable();
- if (regs->msr & MSR_SPE)
- giveup_spe(current);
- preempt_enable();
+ flush_spe_to_thread(current);
spefscr = current->thread.spefscr;
fpexc_mode = current->thread.fpexc_mode;
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
index 5f3cff83e089..33aa715dab28 100644
--- a/arch/powerpc/kvm/44x_tlb.c
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -387,8 +387,10 @@ static void kvmppc_44x_invalidate(struct kvm_vcpu *vcpu,
}
}
-void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
+void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
{
+ int usermode = vcpu->arch.shared->msr & MSR_PR;
+
vcpu->arch.shadow_pid = !usermode;
}
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index b7baff78f90c..78133deb4b64 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -20,7 +20,6 @@ config KVM
bool
select PREEMPT_NOTIFIERS
select ANON_INODES
- select KVM_MMIO
config KVM_BOOK3S_HANDLER
bool
@@ -28,16 +27,22 @@ config KVM_BOOK3S_HANDLER
config KVM_BOOK3S_32_HANDLER
bool
select KVM_BOOK3S_HANDLER
+ select KVM_MMIO
config KVM_BOOK3S_64_HANDLER
bool
select KVM_BOOK3S_HANDLER
+config KVM_BOOK3S_PR
+ bool
+ select KVM_MMIO
+
config KVM_BOOK3S_32
tristate "KVM support for PowerPC book3s_32 processors"
depends on EXPERIMENTAL && PPC_BOOK3S_32 && !SMP && !PTE_64BIT
select KVM
select KVM_BOOK3S_32_HANDLER
+ select KVM_BOOK3S_PR
---help---
Support running unmodified book3s_32 guest kernels
in virtual machines on book3s_32 host processors.
@@ -50,8 +55,8 @@ config KVM_BOOK3S_32
config KVM_BOOK3S_64
tristate "KVM support for PowerPC book3s_64 processors"
depends on EXPERIMENTAL && PPC_BOOK3S_64
- select KVM
select KVM_BOOK3S_64_HANDLER
+ select KVM
---help---
Support running unmodified book3s_64 and book3s_32 guest kernels
in virtual machines on book3s_64 host processors.
@@ -61,10 +66,34 @@ config KVM_BOOK3S_64
If unsure, say N.
+config KVM_BOOK3S_64_HV
+ bool "KVM support for POWER7 and PPC970 using hypervisor mode in host"
+ depends on KVM_BOOK3S_64
+ ---help---
+ Support running unmodified book3s_64 guest kernels in
+ virtual machines on POWER7 and PPC970 processors that have
+ hypervisor mode available to the host.
+
+ If you say Y here, KVM will use the hardware virtualization
+ facilities of POWER7 (and later) processors, meaning that
+ guest operating systems will run at full hardware speed
+ using supervisor and user modes. However, this also means
+ that KVM is not usable under PowerVM (pHyp), is only usable
+ on POWER7 (or later) processors and PPC970-family processors,
+ and cannot emulate a different processor from the host processor.
+
+ If unsure, say N.
+
+config KVM_BOOK3S_64_PR
+ def_bool y
+ depends on KVM_BOOK3S_64 && !KVM_BOOK3S_64_HV
+ select KVM_BOOK3S_PR
+
config KVM_440
bool "KVM support for PowerPC 440 processors"
depends on EXPERIMENTAL && 44x
select KVM
+ select KVM_MMIO
---help---
Support running unmodified 440 guest kernels in virtual machines on
440 host processors.
@@ -89,6 +118,7 @@ config KVM_E500
bool "KVM support for PowerPC E500 processors"
depends on EXPERIMENTAL && E500
select KVM
+ select KVM_MMIO
---help---
Support running unmodified E500 guest kernels in virtual machines on
E500 host processors.
@@ -99,6 +129,5 @@ config KVM_E500
If unsure, say N.
source drivers/vhost/Kconfig
-source drivers/virtio/Kconfig
endif # VIRTUALIZATION
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 4d6863823f69..08428e2c188d 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -38,24 +38,42 @@ kvm-e500-objs := \
e500_emulate.o
kvm-objs-$(CONFIG_KVM_E500) := $(kvm-e500-objs)
-kvm-book3s_64-objs := \
- $(common-objs-y) \
+kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \
+ ../../../virt/kvm/coalesced_mmio.o \
fpu.o \
book3s_paired_singles.o \
- book3s.o \
+ book3s_pr.o \
book3s_emulate.o \
book3s_interrupts.o \
book3s_mmu_hpte.o \
book3s_64_mmu_host.o \
book3s_64_mmu.o \
book3s_32_mmu.o
-kvm-objs-$(CONFIG_KVM_BOOK3S_64) := $(kvm-book3s_64-objs)
+
+kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \
+ book3s_hv.o \
+ book3s_hv_interrupts.o \
+ book3s_64_mmu_hv.o
+kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \
+ book3s_hv_rm_mmu.o \
+ book3s_64_vio_hv.o \
+ book3s_hv_builtin.o
+
+kvm-book3s_64-module-objs := \
+ ../../../virt/kvm/kvm_main.o \
+ powerpc.o \
+ emulate.o \
+ book3s.o \
+ $(kvm-book3s_64-objs-y)
+
+kvm-objs-$(CONFIG_KVM_BOOK3S_64) := $(kvm-book3s_64-module-objs)
kvm-book3s_32-objs := \
$(common-objs-y) \
fpu.o \
book3s_paired_singles.o \
book3s.o \
+ book3s_pr.o \
book3s_emulate.o \
book3s_interrupts.o \
book3s_mmu_hpte.o \
@@ -70,3 +88,4 @@ obj-$(CONFIG_KVM_E500) += kvm.o
obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o
obj-$(CONFIG_KVM_BOOK3S_32) += kvm.o
+obj-y += $(kvm-book3s_64-builtin-objs-y)
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 0f95b5cce033..f68a34d16035 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -17,7 +17,6 @@
#include <linux/kvm_host.h>
#include <linux/err.h>
#include <linux/slab.h>
-#include "trace.h"
#include <asm/reg.h>
#include <asm/cputable.h>
@@ -28,25 +27,17 @@
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
#include <asm/mmu_context.h>
+#include <asm/page.h>
#include <linux/gfp.h>
#include <linux/sched.h>
#include <linux/vmalloc.h>
#include <linux/highmem.h>
+#include "trace.h"
+
#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
/* #define EXIT_DEBUG */
-/* #define DEBUG_EXT */
-
-static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
- ulong msr);
-
-/* Some compatibility defines */
-#ifdef CONFIG_PPC_BOOK3S_32
-#define MSR_USER32 MSR_USER
-#define MSR_USER64 MSR_USER
-#define HW_PAGE_SIZE PAGE_SIZE
-#endif
struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "exits", VCPU_STAT(sum_exits) },
@@ -77,100 +68,11 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
{
}
-void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
-{
-#ifdef CONFIG_PPC_BOOK3S_64
- memcpy(to_svcpu(vcpu)->slb, to_book3s(vcpu)->slb_shadow, sizeof(to_svcpu(vcpu)->slb));
- memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu,
- sizeof(get_paca()->shadow_vcpu));
- to_svcpu(vcpu)->slb_max = to_book3s(vcpu)->slb_shadow_max;
-#endif
-
-#ifdef CONFIG_PPC_BOOK3S_32
- current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu;
-#endif
-}
-
-void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
-{
-#ifdef CONFIG_PPC_BOOK3S_64
- memcpy(to_book3s(vcpu)->slb_shadow, to_svcpu(vcpu)->slb, sizeof(to_svcpu(vcpu)->slb));
- memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
- sizeof(get_paca()->shadow_vcpu));
- to_book3s(vcpu)->slb_shadow_max = to_svcpu(vcpu)->slb_max;
-#endif
-
- kvmppc_giveup_ext(vcpu, MSR_FP);
- kvmppc_giveup_ext(vcpu, MSR_VEC);
- kvmppc_giveup_ext(vcpu, MSR_VSX);
-}
-
-static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
-{
- ulong smsr = vcpu->arch.shared->msr;
-
- /* Guest MSR values */
- smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_DE;
- /* Process MSR values */
- smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
- /* External providers the guest reserved */
- smsr |= (vcpu->arch.shared->msr & vcpu->arch.guest_owned_ext);
- /* 64-bit Process MSR values */
-#ifdef CONFIG_PPC_BOOK3S_64
- smsr |= MSR_ISF | MSR_HV;
-#endif
- vcpu->arch.shadow_msr = smsr;
-}
-
-void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
-{
- ulong old_msr = vcpu->arch.shared->msr;
-
-#ifdef EXIT_DEBUG
- printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
-#endif
-
- msr &= to_book3s(vcpu)->msr_mask;
- vcpu->arch.shared->msr = msr;
- kvmppc_recalc_shadow_msr(vcpu);
-
- if (msr & MSR_POW) {
- if (!vcpu->arch.pending_exceptions) {
- kvm_vcpu_block(vcpu);
- vcpu->stat.halt_wakeup++;
-
- /* Unset POW bit after we woke up */
- msr &= ~MSR_POW;
- vcpu->arch.shared->msr = msr;
- }
- }
-
- if ((vcpu->arch.shared->msr & (MSR_PR|MSR_IR|MSR_DR)) !=
- (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
- kvmppc_mmu_flush_segments(vcpu);
- kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
-
- /* Preload magic page segment when in kernel mode */
- if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) {
- struct kvm_vcpu_arch *a = &vcpu->arch;
-
- if (msr & MSR_DR)
- kvmppc_mmu_map_segment(vcpu, a->magic_page_ea);
- else
- kvmppc_mmu_map_segment(vcpu, a->magic_page_pa);
- }
- }
-
- /* Preload FPU if it's enabled */
- if (vcpu->arch.shared->msr & MSR_FP)
- kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
-}
-
void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
{
vcpu->arch.shared->srr0 = kvmppc_get_pc(vcpu);
vcpu->arch.shared->srr1 = vcpu->arch.shared->msr | flags;
- kvmppc_set_pc(vcpu, to_book3s(vcpu)->hior + vec);
+ kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec);
vcpu->arch.mmu.reset_msr(vcpu);
}
@@ -204,11 +106,13 @@ static int kvmppc_book3s_vec2irqprio(unsigned int vec)
static void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
unsigned int vec)
{
+ unsigned long old_pending = vcpu->arch.pending_exceptions;
+
clear_bit(kvmppc_book3s_vec2irqprio(vec),
&vcpu->arch.pending_exceptions);
- if (!vcpu->arch.pending_exceptions)
- vcpu->arch.shared->int_pending = 0;
+ kvmppc_update_int_pending(vcpu, vcpu->arch.pending_exceptions,
+ old_pending);
}
void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
@@ -225,8 +129,8 @@ void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
{
- to_book3s(vcpu)->prog_flags = flags;
- kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_PROGRAM);
+ /* might as well deliver this straight away */
+ kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags);
}
void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
@@ -266,21 +170,7 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
{
int deliver = 1;
int vec = 0;
- ulong flags = 0ULL;
- ulong crit_raw = vcpu->arch.shared->critical;
- ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
- bool crit;
-
- /* Truncate crit indicators in 32 bit mode */
- if (!(vcpu->arch.shared->msr & MSR_SF)) {
- crit_raw &= 0xffffffff;
- crit_r1 &= 0xffffffff;
- }
-
- /* Critical section when crit == r1 */
- crit = (crit_raw == crit_r1);
- /* ... and we're in supervisor mode */
- crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
+ bool crit = kvmppc_critical_section(vcpu);
switch (priority) {
case BOOK3S_IRQPRIO_DECREMENTER:
@@ -315,7 +205,6 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
break;
case BOOK3S_IRQPRIO_PROGRAM:
vec = BOOK3S_INTERRUPT_PROGRAM;
- flags = to_book3s(vcpu)->prog_flags;
break;
case BOOK3S_IRQPRIO_VSX:
vec = BOOK3S_INTERRUPT_VSX;
@@ -346,7 +235,7 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
#endif
if (deliver)
- kvmppc_inject_interrupt(vcpu, vec, flags);
+ kvmppc_inject_interrupt(vcpu, vec, 0);
return deliver;
}
@@ -392,64 +281,7 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
}
/* Tell the guest about our interrupt status */
- if (*pending)
- vcpu->arch.shared->int_pending = 1;
- else if (old_pending)
- vcpu->arch.shared->int_pending = 0;
-}
-
-void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
-{
- u32 host_pvr;
-
- vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
- vcpu->arch.pvr = pvr;
-#ifdef CONFIG_PPC_BOOK3S_64
- if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
- kvmppc_mmu_book3s_64_init(vcpu);
- to_book3s(vcpu)->hior = 0xfff00000;
- to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
- } else
-#endif
- {
- kvmppc_mmu_book3s_32_init(vcpu);
- to_book3s(vcpu)->hior = 0;
- to_book3s(vcpu)->msr_mask = 0xffffffffULL;
- }
-
- /* If we are in hypervisor level on 970, we can tell the CPU to
- * treat DCBZ as 32 bytes store */
- vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
- if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
- !strcmp(cur_cpu_spec->platform, "ppc970"))
- vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
-
- /* Cell performs badly if MSR_FEx are set. So let's hope nobody
- really needs them in a VM on Cell and force disable them. */
- if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
- to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
-
-#ifdef CONFIG_PPC_BOOK3S_32
- /* 32 bit Book3S always has 32 byte dcbz */
- vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
-#endif
-
- /* On some CPUs we can execute paired single operations natively */
- asm ( "mfpvr %0" : "=r"(host_pvr));
- switch (host_pvr) {
- case 0x00080200: /* lonestar 2.0 */
- case 0x00088202: /* lonestar 2.2 */
- case 0x70000100: /* gekko 1.0 */
- case 0x00080100: /* gekko 2.0 */
- case 0x00083203: /* gekko 2.3a */
- case 0x00083213: /* gekko 2.3b */
- case 0x00083204: /* gekko 2.4 */
- case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
- case 0x00087200: /* broadway */
- vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS;
- /* Enable HID2.PSE - in case we need it later */
- mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29));
- }
+ kvmppc_update_int_pending(vcpu, *pending, old_pending);
}
pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
@@ -471,44 +303,6 @@ pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
return gfn_to_pfn(vcpu->kvm, gfn);
}
-/* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
- * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
- * emulate 32 bytes dcbz length.
- *
- * The Book3s_64 inventors also realized this case and implemented a special bit
- * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
- *
- * My approach here is to patch the dcbz instruction on executing pages.
- */
-static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
-{
- struct page *hpage;
- u64 hpage_offset;
- u32 *page;
- int i;
-
- hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
- if (is_error_page(hpage)) {
- kvm_release_page_clean(hpage);
- return;
- }
-
- hpage_offset = pte->raddr & ~PAGE_MASK;
- hpage_offset &= ~0xFFFULL;
- hpage_offset /= 4;
-
- get_page(hpage);
- page = kmap_atomic(hpage, KM_USER0);
-
- /* patch dcbz into reserved instruction, so we trap */
- for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
- if ((page[i] & 0xff0007ff) == INS_DCBZ)
- page[i] &= 0xfffffff7;
-
- kunmap_atomic(page, KM_USER0);
- put_page(hpage);
-}
-
static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data,
struct kvmppc_pte *pte)
{
@@ -606,519 +400,6 @@ mmio:
return EMULATE_DO_MMIO;
}
-static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
-{
- ulong mp_pa = vcpu->arch.magic_page_pa;
-
- if (unlikely(mp_pa) &&
- unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) {
- return 1;
- }
-
- return kvm_is_visible_gfn(vcpu->kvm, gfn);
-}
-
-int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
- ulong eaddr, int vec)
-{
- bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
- int r = RESUME_GUEST;
- int relocated;
- int page_found = 0;
- struct kvmppc_pte pte;
- bool is_mmio = false;
- bool dr = (vcpu->arch.shared->msr & MSR_DR) ? true : false;
- bool ir = (vcpu->arch.shared->msr & MSR_IR) ? true : false;
- u64 vsid;
-
- relocated = data ? dr : ir;
-
- /* Resolve real address if translation turned on */
- if (relocated) {
- page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data);
- } else {
- pte.may_execute = true;
- pte.may_read = true;
- pte.may_write = true;
- pte.raddr = eaddr & KVM_PAM;
- pte.eaddr = eaddr;
- pte.vpage = eaddr >> 12;
- }
-
- switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
- case 0:
- pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
- break;
- case MSR_DR:
- case MSR_IR:
- vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
-
- if ((vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) == MSR_DR)
- pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
- else
- pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
- pte.vpage |= vsid;
-
- if (vsid == -1)
- page_found = -EINVAL;
- break;
- }
-
- if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
- (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
- /*
- * If we do the dcbz hack, we have to NX on every execution,
- * so we can patch the executing code. This renders our guest
- * NX-less.
- */
- pte.may_execute = !data;
- }
-
- if (page_found == -ENOENT) {
- /* Page not found in guest PTE entries */
- vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
- vcpu->arch.shared->dsisr = to_svcpu(vcpu)->fault_dsisr;
- vcpu->arch.shared->msr |=
- (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
- kvmppc_book3s_queue_irqprio(vcpu, vec);
- } else if (page_found == -EPERM) {
- /* Storage protection */
- vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
- vcpu->arch.shared->dsisr =
- to_svcpu(vcpu)->fault_dsisr & ~DSISR_NOHPTE;
- vcpu->arch.shared->dsisr |= DSISR_PROTFAULT;
- vcpu->arch.shared->msr |=
- (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
- kvmppc_book3s_queue_irqprio(vcpu, vec);
- } else if (page_found == -EINVAL) {
- /* Page not found in guest SLB */
- vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
- kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
- } else if (!is_mmio &&
- kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
- /* The guest's PTE is not mapped yet. Map on the host */
- kvmppc_mmu_map_page(vcpu, &pte);
- if (data)
- vcpu->stat.sp_storage++;
- else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
- (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
- kvmppc_patch_dcbz(vcpu, &pte);
- } else {
- /* MMIO */
- vcpu->stat.mmio_exits++;
- vcpu->arch.paddr_accessed = pte.raddr;
- r = kvmppc_emulate_mmio(run, vcpu);
- if ( r == RESUME_HOST_NV )
- r = RESUME_HOST;
- }
-
- return r;
-}
-
-static inline int get_fpr_index(int i)
-{
-#ifdef CONFIG_VSX
- i *= 2;
-#endif
- return i;
-}
-
-/* Give up external provider (FPU, Altivec, VSX) */
-void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
-{
- struct thread_struct *t = &current->thread;
- u64 *vcpu_fpr = vcpu->arch.fpr;
-#ifdef CONFIG_VSX
- u64 *vcpu_vsx = vcpu->arch.vsr;
-#endif
- u64 *thread_fpr = (u64*)t->fpr;
- int i;
-
- if (!(vcpu->arch.guest_owned_ext & msr))
- return;
-
-#ifdef DEBUG_EXT
- printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
-#endif
-
- switch (msr) {
- case MSR_FP:
- giveup_fpu(current);
- for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
- vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
-
- vcpu->arch.fpscr = t->fpscr.val;
- break;
- case MSR_VEC:
-#ifdef CONFIG_ALTIVEC
- giveup_altivec(current);
- memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr));
- vcpu->arch.vscr = t->vscr;
-#endif
- break;
- case MSR_VSX:
-#ifdef CONFIG_VSX
- __giveup_vsx(current);
- for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++)
- vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1];
-#endif
- break;
- default:
- BUG();
- }
-
- vcpu->arch.guest_owned_ext &= ~msr;
- current->thread.regs->msr &= ~msr;
- kvmppc_recalc_shadow_msr(vcpu);
-}
-
-static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
-{
- ulong srr0 = kvmppc_get_pc(vcpu);
- u32 last_inst = kvmppc_get_last_inst(vcpu);
- int ret;
-
- ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
- if (ret == -ENOENT) {
- ulong msr = vcpu->arch.shared->msr;
-
- msr = kvmppc_set_field(msr, 33, 33, 1);
- msr = kvmppc_set_field(msr, 34, 36, 0);
- vcpu->arch.shared->msr = kvmppc_set_field(msr, 42, 47, 0);
- kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
- return EMULATE_AGAIN;
- }
-
- return EMULATE_DONE;
-}
-
-static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr)
-{
-
- /* Need to do paired single emulation? */
- if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE))
- return EMULATE_DONE;
-
- /* Read out the instruction */
- if (kvmppc_read_inst(vcpu) == EMULATE_DONE)
- /* Need to emulate */
- return EMULATE_FAIL;
-
- return EMULATE_AGAIN;
-}
-
-/* Handle external providers (FPU, Altivec, VSX) */
-static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
- ulong msr)
-{
- struct thread_struct *t = &current->thread;
- u64 *vcpu_fpr = vcpu->arch.fpr;
-#ifdef CONFIG_VSX
- u64 *vcpu_vsx = vcpu->arch.vsr;
-#endif
- u64 *thread_fpr = (u64*)t->fpr;
- int i;
-
- /* When we have paired singles, we emulate in software */
- if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
- return RESUME_GUEST;
-
- if (!(vcpu->arch.shared->msr & msr)) {
- kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
- return RESUME_GUEST;
- }
-
- /* We already own the ext */
- if (vcpu->arch.guest_owned_ext & msr) {
- return RESUME_GUEST;
- }
-
-#ifdef DEBUG_EXT
- printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
-#endif
-
- current->thread.regs->msr |= msr;
-
- switch (msr) {
- case MSR_FP:
- for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
- thread_fpr[get_fpr_index(i)] = vcpu_fpr[i];
-
- t->fpscr.val = vcpu->arch.fpscr;
- t->fpexc_mode = 0;
- kvmppc_load_up_fpu();
- break;
- case MSR_VEC:
-#ifdef CONFIG_ALTIVEC
- memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
- t->vscr = vcpu->arch.vscr;
- t->vrsave = -1;
- kvmppc_load_up_altivec();
-#endif
- break;
- case MSR_VSX:
-#ifdef CONFIG_VSX
- for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++)
- thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
- kvmppc_load_up_vsx();
-#endif
- break;
- default:
- BUG();
- }
-
- vcpu->arch.guest_owned_ext |= msr;
-
- kvmppc_recalc_shadow_msr(vcpu);
-
- return RESUME_GUEST;
-}
-
-int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
- unsigned int exit_nr)
-{
- int r = RESUME_HOST;
-
- vcpu->stat.sum_exits++;
-
- run->exit_reason = KVM_EXIT_UNKNOWN;
- run->ready_for_interrupt_injection = 1;
-
- trace_kvm_book3s_exit(exit_nr, vcpu);
- kvm_resched(vcpu);
- switch (exit_nr) {
- case BOOK3S_INTERRUPT_INST_STORAGE:
- vcpu->stat.pf_instruc++;
-
-#ifdef CONFIG_PPC_BOOK3S_32
- /* We set segments as unused segments when invalidating them. So
- * treat the respective fault as segment fault. */
- if (to_svcpu(vcpu)->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT]
- == SR_INVALID) {
- kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
- r = RESUME_GUEST;
- break;
- }
-#endif
-
- /* only care about PTEG not found errors, but leave NX alone */
- if (to_svcpu(vcpu)->shadow_srr1 & 0x40000000) {
- r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
- vcpu->stat.sp_instruc++;
- } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
- (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
- /*
- * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
- * so we can't use the NX bit inside the guest. Let's cross our fingers,
- * that no guest that needs the dcbz hack does NX.
- */
- kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
- r = RESUME_GUEST;
- } else {
- vcpu->arch.shared->msr |=
- to_svcpu(vcpu)->shadow_srr1 & 0x58000000;
- kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
- r = RESUME_GUEST;
- }
- break;
- case BOOK3S_INTERRUPT_DATA_STORAGE:
- {
- ulong dar = kvmppc_get_fault_dar(vcpu);
- vcpu->stat.pf_storage++;
-
-#ifdef CONFIG_PPC_BOOK3S_32
- /* We set segments as unused segments when invalidating them. So
- * treat the respective fault as segment fault. */
- if ((to_svcpu(vcpu)->sr[dar >> SID_SHIFT]) == SR_INVALID) {
- kvmppc_mmu_map_segment(vcpu, dar);
- r = RESUME_GUEST;
- break;
- }
-#endif
-
- /* The only case we need to handle is missing shadow PTEs */
- if (to_svcpu(vcpu)->fault_dsisr & DSISR_NOHPTE) {
- r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
- } else {
- vcpu->arch.shared->dar = dar;
- vcpu->arch.shared->dsisr = to_svcpu(vcpu)->fault_dsisr;
- kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
- r = RESUME_GUEST;
- }
- break;
- }
- case BOOK3S_INTERRUPT_DATA_SEGMENT:
- if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
- vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
- kvmppc_book3s_queue_irqprio(vcpu,
- BOOK3S_INTERRUPT_DATA_SEGMENT);
- }
- r = RESUME_GUEST;
- break;
- case BOOK3S_INTERRUPT_INST_SEGMENT:
- if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
- kvmppc_book3s_queue_irqprio(vcpu,
- BOOK3S_INTERRUPT_INST_SEGMENT);
- }
- r = RESUME_GUEST;
- break;
- /* We're good on these - the host merely wanted to get our attention */
- case BOOK3S_INTERRUPT_DECREMENTER:
- vcpu->stat.dec_exits++;
- r = RESUME_GUEST;
- break;
- case BOOK3S_INTERRUPT_EXTERNAL:
- vcpu->stat.ext_intr_exits++;
- r = RESUME_GUEST;
- break;
- case BOOK3S_INTERRUPT_PERFMON:
- r = RESUME_GUEST;
- break;
- case BOOK3S_INTERRUPT_PROGRAM:
- {
- enum emulation_result er;
- ulong flags;
-
-program_interrupt:
- flags = to_svcpu(vcpu)->shadow_srr1 & 0x1f0000ull;
-
- if (vcpu->arch.shared->msr & MSR_PR) {
-#ifdef EXIT_DEBUG
- printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
-#endif
- if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) !=
- (INS_DCBZ & 0xfffffff7)) {
- kvmppc_core_queue_program(vcpu, flags);
- r = RESUME_GUEST;
- break;
- }
- }
-
- vcpu->stat.emulated_inst_exits++;
- er = kvmppc_emulate_instruction(run, vcpu);
- switch (er) {
- case EMULATE_DONE:
- r = RESUME_GUEST_NV;
- break;
- case EMULATE_AGAIN:
- r = RESUME_GUEST;
- break;
- case EMULATE_FAIL:
- printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
- __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
- kvmppc_core_queue_program(vcpu, flags);
- r = RESUME_GUEST;
- break;
- case EMULATE_DO_MMIO:
- run->exit_reason = KVM_EXIT_MMIO;
- r = RESUME_HOST_NV;
- break;
- default:
- BUG();
- }
- break;
- }
- case BOOK3S_INTERRUPT_SYSCALL:
- if (vcpu->arch.osi_enabled &&
- (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
- (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
- /* MOL hypercalls */
- u64 *gprs = run->osi.gprs;
- int i;
-
- run->exit_reason = KVM_EXIT_OSI;
- for (i = 0; i < 32; i++)
- gprs[i] = kvmppc_get_gpr(vcpu, i);
- vcpu->arch.osi_needed = 1;
- r = RESUME_HOST_NV;
- } else if (!(vcpu->arch.shared->msr & MSR_PR) &&
- (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
- /* KVM PV hypercalls */
- kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
- r = RESUME_GUEST;
- } else {
- /* Guest syscalls */
- vcpu->stat.syscall_exits++;
- kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
- r = RESUME_GUEST;
- }
- break;
- case BOOK3S_INTERRUPT_FP_UNAVAIL:
- case BOOK3S_INTERRUPT_ALTIVEC:
- case BOOK3S_INTERRUPT_VSX:
- {
- int ext_msr = 0;
-
- switch (exit_nr) {
- case BOOK3S_INTERRUPT_FP_UNAVAIL: ext_msr = MSR_FP; break;
- case BOOK3S_INTERRUPT_ALTIVEC: ext_msr = MSR_VEC; break;
- case BOOK3S_INTERRUPT_VSX: ext_msr = MSR_VSX; break;
- }
-
- switch (kvmppc_check_ext(vcpu, exit_nr)) {
- case EMULATE_DONE:
- /* everything ok - let's enable the ext */
- r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
- break;
- case EMULATE_FAIL:
- /* we need to emulate this instruction */
- goto program_interrupt;
- break;
- default:
- /* nothing to worry about - go again */
- break;
- }
- break;
- }
- case BOOK3S_INTERRUPT_ALIGNMENT:
- if (kvmppc_read_inst(vcpu) == EMULATE_DONE) {
- vcpu->arch.shared->dsisr = kvmppc_alignment_dsisr(vcpu,
- kvmppc_get_last_inst(vcpu));
- vcpu->arch.shared->dar = kvmppc_alignment_dar(vcpu,
- kvmppc_get_last_inst(vcpu));
- kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
- }
- r = RESUME_GUEST;
- break;
- case BOOK3S_INTERRUPT_MACHINE_CHECK:
- case BOOK3S_INTERRUPT_TRACE:
- kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
- r = RESUME_GUEST;
- break;
- default:
- /* Ugh - bork here! What did we get? */
- printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
- exit_nr, kvmppc_get_pc(vcpu), to_svcpu(vcpu)->shadow_srr1);
- r = RESUME_HOST;
- BUG();
- break;
- }
-
-
- if (!(r & RESUME_HOST)) {
- /* To avoid clobbering exit_reason, only check for signals if
- * we aren't already exiting to userspace for some other
- * reason. */
- if (signal_pending(current)) {
-#ifdef EXIT_DEBUG
- printk(KERN_EMERG "KVM: Going back to host\n");
-#endif
- vcpu->stat.signal_exits++;
- run->exit_reason = KVM_EXIT_INTR;
- r = -EINTR;
- } else {
- /* In case an interrupt came in that was triggered
- * from userspace (like DEC), we need to check what
- * to inject now! */
- kvmppc_core_deliver_interrupts(vcpu);
- }
- }
-
- trace_kvm_book3s_reenter(r, vcpu);
-
- return r;
-}
-
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
return 0;
@@ -1179,69 +460,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
return 0;
}
-int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
- struct kvm_sregs *sregs)
-{
- struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
- int i;
-
- sregs->pvr = vcpu->arch.pvr;
-
- sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
- if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
- for (i = 0; i < 64; i++) {
- sregs->u.s.ppc64.slb[i].slbe = vcpu3s->slb[i].orige | i;
- sregs->u.s.ppc64.slb[i].slbv = vcpu3s->slb[i].origv;
- }
- } else {
- for (i = 0; i < 16; i++)
- sregs->u.s.ppc32.sr[i] = vcpu->arch.shared->sr[i];
-
- for (i = 0; i < 8; i++) {
- sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
- sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
- }
- }
-
- return 0;
-}
-
-int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
- struct kvm_sregs *sregs)
-{
- struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
- int i;
-
- kvmppc_set_pvr(vcpu, sregs->pvr);
-
- vcpu3s->sdr1 = sregs->u.s.sdr1;
- if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
- for (i = 0; i < 64; i++) {
- vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv,
- sregs->u.s.ppc64.slb[i].slbe);
- }
- } else {
- for (i = 0; i < 16; i++) {
- vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
- }
- for (i = 0; i < 8; i++) {
- kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
- (u32)sregs->u.s.ppc32.ibat[i]);
- kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
- (u32)(sregs->u.s.ppc32.ibat[i] >> 32));
- kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
- (u32)sregs->u.s.ppc32.dbat[i]);
- kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
- (u32)(sregs->u.s.ppc32.dbat[i] >> 32));
- }
- }
-
- /* Flush the MMU after messing with the segments */
- kvmppc_mmu_pte_flush(vcpu, 0, 0);
-
- return 0;
-}
-
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
return -ENOTSUPP;
@@ -1296,202 +514,3 @@ out:
mutex_unlock(&kvm->slots_lock);
return r;
}
-
-int kvmppc_core_check_processor_compat(void)
-{
- return 0;
-}
-
-struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
-{
- struct kvmppc_vcpu_book3s *vcpu_book3s;
- struct kvm_vcpu *vcpu;
- int err = -ENOMEM;
- unsigned long p;
-
- vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
- if (!vcpu_book3s)
- goto out;
-
- vcpu_book3s->shadow_vcpu = (struct kvmppc_book3s_shadow_vcpu *)
- kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL);
- if (!vcpu_book3s->shadow_vcpu)
- goto free_vcpu;
-
- vcpu = &vcpu_book3s->vcpu;
- err = kvm_vcpu_init(vcpu, kvm, id);
- if (err)
- goto free_shadow_vcpu;
-
- p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
- /* the real shared page fills the last 4k of our page */
- vcpu->arch.shared = (void*)(p + PAGE_SIZE - 4096);
- if (!p)
- goto uninit_vcpu;
-
- vcpu->arch.host_retip = kvm_return_point;
- vcpu->arch.host_msr = mfmsr();
-#ifdef CONFIG_PPC_BOOK3S_64
- /* default to book3s_64 (970fx) */
- vcpu->arch.pvr = 0x3C0301;
-#else
- /* default to book3s_32 (750) */
- vcpu->arch.pvr = 0x84202;
-#endif
- kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
- vcpu_book3s->slb_nr = 64;
-
- /* remember where some real-mode handlers are */
- vcpu->arch.trampoline_lowmem = kvmppc_trampoline_lowmem;
- vcpu->arch.trampoline_enter = kvmppc_trampoline_enter;
- vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem;
-#ifdef CONFIG_PPC_BOOK3S_64
- vcpu->arch.rmcall = *(ulong*)kvmppc_rmcall;
-#else
- vcpu->arch.rmcall = (ulong)kvmppc_rmcall;
-#endif
-
- vcpu->arch.shadow_msr = MSR_USER64;
-
- err = kvmppc_mmu_init(vcpu);
- if (err < 0)
- goto uninit_vcpu;
-
- return vcpu;
-
-uninit_vcpu:
- kvm_vcpu_uninit(vcpu);
-free_shadow_vcpu:
- kfree(vcpu_book3s->shadow_vcpu);
-free_vcpu:
- vfree(vcpu_book3s);
-out:
- return ERR_PTR(err);
-}
-
-void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
-{
- struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
-
- free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
- kvm_vcpu_uninit(vcpu);
- kfree(vcpu_book3s->shadow_vcpu);
- vfree(vcpu_book3s);
-}
-
-extern int __kvmppc_vcpu_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
-int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
-{
- int ret;
- double fpr[32][TS_FPRWIDTH];
- unsigned int fpscr;
- int fpexc_mode;
-#ifdef CONFIG_ALTIVEC
- vector128 vr[32];
- vector128 vscr;
- unsigned long uninitialized_var(vrsave);
- int used_vr;
-#endif
-#ifdef CONFIG_VSX
- int used_vsr;
-#endif
- ulong ext_msr;
-
- /* No need to go into the guest when all we do is going out */
- if (signal_pending(current)) {
- kvm_run->exit_reason = KVM_EXIT_INTR;
- return -EINTR;
- }
-
- /* Save FPU state in stack */
- if (current->thread.regs->msr & MSR_FP)
- giveup_fpu(current);
- memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
- fpscr = current->thread.fpscr.val;
- fpexc_mode = current->thread.fpexc_mode;
-
-#ifdef CONFIG_ALTIVEC
- /* Save Altivec state in stack */
- used_vr = current->thread.used_vr;
- if (used_vr) {
- if (current->thread.regs->msr & MSR_VEC)
- giveup_altivec(current);
- memcpy(vr, current->thread.vr, sizeof(current->thread.vr));
- vscr = current->thread.vscr;
- vrsave = current->thread.vrsave;
- }
-#endif
-
-#ifdef CONFIG_VSX
- /* Save VSX state in stack */
- used_vsr = current->thread.used_vsr;
- if (used_vsr && (current->thread.regs->msr & MSR_VSX))
- __giveup_vsx(current);
-#endif
-
- /* Remember the MSR with disabled extensions */
- ext_msr = current->thread.regs->msr;
-
- /* XXX we get called with irq disabled - change that! */
- local_irq_enable();
-
- /* Preload FPU if it's enabled */
- if (vcpu->arch.shared->msr & MSR_FP)
- kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
-
- ret = __kvmppc_vcpu_entry(kvm_run, vcpu);
-
- local_irq_disable();
-
- current->thread.regs->msr = ext_msr;
-
- /* Make sure we save the guest FPU/Altivec/VSX state */
- kvmppc_giveup_ext(vcpu, MSR_FP);
- kvmppc_giveup_ext(vcpu, MSR_VEC);
- kvmppc_giveup_ext(vcpu, MSR_VSX);
-
- /* Restore FPU state from stack */
- memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
- current->thread.fpscr.val = fpscr;
- current->thread.fpexc_mode = fpexc_mode;
-
-#ifdef CONFIG_ALTIVEC
- /* Restore Altivec state from stack */
- if (used_vr && current->thread.used_vr) {
- memcpy(current->thread.vr, vr, sizeof(current->thread.vr));
- current->thread.vscr = vscr;
- current->thread.vrsave = vrsave;
- }
- current->thread.used_vr = used_vr;
-#endif
-
-#ifdef CONFIG_VSX
- current->thread.used_vsr = used_vsr;
-#endif
-
- return ret;
-}
-
-static int kvmppc_book3s_init(void)
-{
- int r;
-
- r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0,
- THIS_MODULE);
-
- if (r)
- return r;
-
- r = kvmppc_mmu_hpte_sysinit();
-
- return r;
-}
-
-static void kvmppc_book3s_exit(void)
-{
- kvmppc_mmu_hpte_sysexit();
- kvm_exit();
-}
-
-module_init(kvmppc_book3s_init);
-module_exit(kvmppc_book3s_exit);
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index d7889ef3211e..c6d3e194b6b4 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -41,36 +41,36 @@ static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu)
}
static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe(
- struct kvmppc_vcpu_book3s *vcpu_book3s,
+ struct kvm_vcpu *vcpu,
gva_t eaddr)
{
int i;
u64 esid = GET_ESID(eaddr);
u64 esid_1t = GET_ESID_1T(eaddr);
- for (i = 0; i < vcpu_book3s->slb_nr; i++) {
+ for (i = 0; i < vcpu->arch.slb_nr; i++) {
u64 cmp_esid = esid;
- if (!vcpu_book3s->slb[i].valid)
+ if (!vcpu->arch.slb[i].valid)
continue;
- if (vcpu_book3s->slb[i].tb)
+ if (vcpu->arch.slb[i].tb)
cmp_esid = esid_1t;
- if (vcpu_book3s->slb[i].esid == cmp_esid)
- return &vcpu_book3s->slb[i];
+ if (vcpu->arch.slb[i].esid == cmp_esid)
+ return &vcpu->arch.slb[i];
}
dprintk("KVM: No SLB entry found for 0x%lx [%llx | %llx]\n",
eaddr, esid, esid_1t);
- for (i = 0; i < vcpu_book3s->slb_nr; i++) {
- if (vcpu_book3s->slb[i].vsid)
+ for (i = 0; i < vcpu->arch.slb_nr; i++) {
+ if (vcpu->arch.slb[i].vsid)
dprintk(" %d: %c%c%c %llx %llx\n", i,
- vcpu_book3s->slb[i].valid ? 'v' : ' ',
- vcpu_book3s->slb[i].large ? 'l' : ' ',
- vcpu_book3s->slb[i].tb ? 't' : ' ',
- vcpu_book3s->slb[i].esid,
- vcpu_book3s->slb[i].vsid);
+ vcpu->arch.slb[i].valid ? 'v' : ' ',
+ vcpu->arch.slb[i].large ? 'l' : ' ',
+ vcpu->arch.slb[i].tb ? 't' : ' ',
+ vcpu->arch.slb[i].esid,
+ vcpu->arch.slb[i].vsid);
}
return NULL;
@@ -81,7 +81,7 @@ static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
{
struct kvmppc_slb *slb;
- slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), eaddr);
+ slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
if (!slb)
return 0;
@@ -180,7 +180,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
return 0;
}
- slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu_book3s, eaddr);
+ slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
if (!slbe)
goto no_seg_found;
@@ -320,10 +320,10 @@ static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
esid_1t = GET_ESID_1T(rb);
slb_nr = rb & 0xfff;
- if (slb_nr > vcpu_book3s->slb_nr)
+ if (slb_nr > vcpu->arch.slb_nr)
return;
- slbe = &vcpu_book3s->slb[slb_nr];
+ slbe = &vcpu->arch.slb[slb_nr];
slbe->large = (rs & SLB_VSID_L) ? 1 : 0;
slbe->tb = (rs & SLB_VSID_B_1T) ? 1 : 0;
@@ -344,38 +344,35 @@ static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
static u64 kvmppc_mmu_book3s_64_slbmfee(struct kvm_vcpu *vcpu, u64 slb_nr)
{
- struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
struct kvmppc_slb *slbe;
- if (slb_nr > vcpu_book3s->slb_nr)
+ if (slb_nr > vcpu->arch.slb_nr)
return 0;
- slbe = &vcpu_book3s->slb[slb_nr];
+ slbe = &vcpu->arch.slb[slb_nr];
return slbe->orige;
}
static u64 kvmppc_mmu_book3s_64_slbmfev(struct kvm_vcpu *vcpu, u64 slb_nr)
{
- struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
struct kvmppc_slb *slbe;
- if (slb_nr > vcpu_book3s->slb_nr)
+ if (slb_nr > vcpu->arch.slb_nr)
return 0;
- slbe = &vcpu_book3s->slb[slb_nr];
+ slbe = &vcpu->arch.slb[slb_nr];
return slbe->origv;
}
static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea)
{
- struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
struct kvmppc_slb *slbe;
dprintk("KVM MMU: slbie(0x%llx)\n", ea);
- slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu_book3s, ea);
+ slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
if (!slbe)
return;
@@ -389,13 +386,12 @@ static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea)
static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu)
{
- struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
int i;
dprintk("KVM MMU: slbia()\n");
- for (i = 1; i < vcpu_book3s->slb_nr; i++)
- vcpu_book3s->slb[i].valid = false;
+ for (i = 1; i < vcpu->arch.slb_nr; i++)
+ vcpu->arch.slb[i].valid = false;
if (vcpu->arch.shared->msr & MSR_IR) {
kvmppc_mmu_flush_segments(vcpu);
@@ -464,7 +460,7 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
ulong mp_ea = vcpu->arch.magic_page_ea;
if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
- slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea);
+ slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
if (slb)
gvsid = slb->vsid;
}
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
new file mode 100644
index 000000000000..bc3a2ea94217
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -0,0 +1,180 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/highmem.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+#include <linux/hugetlb.h>
+
+#include <asm/tlbflush.h>
+#include <asm/kvm_ppc.h>
+#include <asm/kvm_book3s.h>
+#include <asm/mmu-hash64.h>
+#include <asm/hvcall.h>
+#include <asm/synch.h>
+#include <asm/ppc-opcode.h>
+#include <asm/cputable.h>
+
+/* For now use fixed-size 16MB page table */
+#define HPT_ORDER 24
+#define HPT_NPTEG (1ul << (HPT_ORDER - 7)) /* 128B per pteg */
+#define HPT_HASH_MASK (HPT_NPTEG - 1)
+
+/* Pages in the VRMA are 16MB pages */
+#define VRMA_PAGE_ORDER 24
+#define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */
+
+/* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
+#define MAX_LPID_970 63
+#define NR_LPIDS (LPID_RSVD + 1)
+unsigned long lpid_inuse[BITS_TO_LONGS(NR_LPIDS)];
+
+long kvmppc_alloc_hpt(struct kvm *kvm)
+{
+ unsigned long hpt;
+ unsigned long lpid;
+
+ hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT|__GFP_NOWARN,
+ HPT_ORDER - PAGE_SHIFT);
+ if (!hpt) {
+ pr_err("kvm_alloc_hpt: Couldn't alloc HPT\n");
+ return -ENOMEM;
+ }
+ kvm->arch.hpt_virt = hpt;
+
+ do {
+ lpid = find_first_zero_bit(lpid_inuse, NR_LPIDS);
+ if (lpid >= NR_LPIDS) {
+ pr_err("kvm_alloc_hpt: No LPIDs free\n");
+ free_pages(hpt, HPT_ORDER - PAGE_SHIFT);
+ return -ENOMEM;
+ }
+ } while (test_and_set_bit(lpid, lpid_inuse));
+
+ kvm->arch.sdr1 = __pa(hpt) | (HPT_ORDER - 18);
+ kvm->arch.lpid = lpid;
+
+ pr_info("KVM guest htab at %lx, LPID %lx\n", hpt, lpid);
+ return 0;
+}
+
+void kvmppc_free_hpt(struct kvm *kvm)
+{
+ clear_bit(kvm->arch.lpid, lpid_inuse);
+ free_pages(kvm->arch.hpt_virt, HPT_ORDER - PAGE_SHIFT);
+}
+
+void kvmppc_map_vrma(struct kvm *kvm, struct kvm_userspace_memory_region *mem)
+{
+ unsigned long i;
+ unsigned long npages = kvm->arch.ram_npages;
+ unsigned long pfn;
+ unsigned long *hpte;
+ unsigned long hash;
+ struct kvmppc_pginfo *pginfo = kvm->arch.ram_pginfo;
+
+ if (!pginfo)
+ return;
+
+ /* VRMA can't be > 1TB */
+ if (npages > 1ul << (40 - kvm->arch.ram_porder))
+ npages = 1ul << (40 - kvm->arch.ram_porder);
+ /* Can't use more than 1 HPTE per HPTEG */
+ if (npages > HPT_NPTEG)
+ npages = HPT_NPTEG;
+
+ for (i = 0; i < npages; ++i) {
+ pfn = pginfo[i].pfn;
+ if (!pfn)
+ break;
+ /* can't use hpt_hash since va > 64 bits */
+ hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & HPT_HASH_MASK;
+ /*
+ * We assume that the hash table is empty and no
+ * vcpus are using it at this stage. Since we create
+ * at most one HPTE per HPTEG, we just assume entry 7
+ * is available and use it.
+ */
+ hpte = (unsigned long *) (kvm->arch.hpt_virt + (hash << 7));
+ hpte += 7 * 2;
+ /* HPTE low word - RPN, protection, etc. */
+ hpte[1] = (pfn << PAGE_SHIFT) | HPTE_R_R | HPTE_R_C |
+ HPTE_R_M | PP_RWXX;
+ wmb();
+ hpte[0] = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) |
+ (i << (VRMA_PAGE_ORDER - 16)) | HPTE_V_BOLTED |
+ HPTE_V_LARGE | HPTE_V_VALID;
+ }
+}
+
+int kvmppc_mmu_hv_init(void)
+{
+ unsigned long host_lpid, rsvd_lpid;
+
+ if (!cpu_has_feature(CPU_FTR_HVMODE))
+ return -EINVAL;
+
+ memset(lpid_inuse, 0, sizeof(lpid_inuse));
+
+ if (cpu_has_feature(CPU_FTR_ARCH_206)) {
+ host_lpid = mfspr(SPRN_LPID); /* POWER7 */
+ rsvd_lpid = LPID_RSVD;
+ } else {
+ host_lpid = 0; /* PPC970 */
+ rsvd_lpid = MAX_LPID_970;
+ }
+
+ set_bit(host_lpid, lpid_inuse);
+ /* rsvd_lpid is reserved for use in partition switching */
+ set_bit(rsvd_lpid, lpid_inuse);
+
+ return 0;
+}
+
+void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
+{
+}
+
+static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
+{
+ kvmppc_set_msr(vcpu, MSR_SF | MSR_ME);
+}
+
+static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
+ struct kvmppc_pte *gpte, bool data)
+{
+ return -ENOENT;
+}
+
+void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
+
+ if (cpu_has_feature(CPU_FTR_ARCH_206))
+ vcpu->arch.slb_nr = 32; /* POWER7 */
+ else
+ vcpu->arch.slb_nr = 64;
+
+ mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate;
+ mmu->reset_msr = kvmppc_mmu_book3s_64_hv_reset_msr;
+
+ vcpu->arch.hflags |= BOOK3S_HFLAG_SLB;
+}
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
new file mode 100644
index 000000000000..ea0f8c537c28
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -0,0 +1,73 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
+ * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/highmem.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+#include <linux/hugetlb.h>
+#include <linux/list.h>
+
+#include <asm/tlbflush.h>
+#include <asm/kvm_ppc.h>
+#include <asm/kvm_book3s.h>
+#include <asm/mmu-hash64.h>
+#include <asm/hvcall.h>
+#include <asm/synch.h>
+#include <asm/ppc-opcode.h>
+#include <asm/kvm_host.h>
+#include <asm/udbg.h>
+
+#define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64))
+
+long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
+ unsigned long ioba, unsigned long tce)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvmppc_spapr_tce_table *stt;
+
+ /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
+ /* liobn, ioba, tce); */
+
+ list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
+ if (stt->liobn == liobn) {
+ unsigned long idx = ioba >> SPAPR_TCE_SHIFT;
+ struct page *page;
+ u64 *tbl;
+
+ /* udbg_printf("H_PUT_TCE: liobn 0x%lx => stt=%p window_size=0x%x\n", */
+ /* liobn, stt, stt->window_size); */
+ if (ioba >= stt->window_size)
+ return H_PARAMETER;
+
+ page = stt->pages[idx / TCES_PER_PAGE];
+ tbl = (u64 *)page_address(page);
+
+ /* FIXME: Need to validate the TCE itself */
+ /* udbg_printf("tce @ %p\n", &tbl[idx % TCES_PER_PAGE]); */
+ tbl[idx % TCES_PER_PAGE] = tce;
+ return H_SUCCESS;
+ }
+ }
+
+ /* Didn't find the liobn, punt it to userspace */
+ return H_TOO_HARD;
+}
diff --git a/arch/powerpc/kvm/book3s_exports.c b/arch/powerpc/kvm/book3s_exports.c
index 1dd5a1ddfd0d..88c8f26add02 100644
--- a/arch/powerpc/kvm/book3s_exports.c
+++ b/arch/powerpc/kvm/book3s_exports.c
@@ -20,8 +20,11 @@
#include <linux/module.h>
#include <asm/kvm_book3s.h>
-EXPORT_SYMBOL_GPL(kvmppc_trampoline_enter);
-EXPORT_SYMBOL_GPL(kvmppc_trampoline_lowmem);
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+EXPORT_SYMBOL_GPL(kvmppc_hv_entry_trampoline);
+#else
+EXPORT_SYMBOL_GPL(kvmppc_handler_trampoline_enter);
+EXPORT_SYMBOL_GPL(kvmppc_handler_lowmem_trampoline);
EXPORT_SYMBOL_GPL(kvmppc_rmcall);
EXPORT_SYMBOL_GPL(kvmppc_load_up_fpu);
#ifdef CONFIG_ALTIVEC
@@ -30,3 +33,5 @@ EXPORT_SYMBOL_GPL(kvmppc_load_up_altivec);
#ifdef CONFIG_VSX
EXPORT_SYMBOL_GPL(kvmppc_load_up_vsx);
#endif
+#endif
+
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
new file mode 100644
index 000000000000..cc0d7f1b19ab
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -0,0 +1,1269 @@
+/*
+ * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
+ * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
+ *
+ * Authors:
+ * Paul Mackerras <paulus@au1.ibm.com>
+ * Alexander Graf <agraf@suse.de>
+ * Kevin Wolf <mail@kevin-wolf.de>
+ *
+ * Description: KVM functions specific to running on Book 3S
+ * processors in hypervisor mode (specifically POWER7 and later).
+ *
+ * This file is derived from arch/powerpc/kvm/book3s.c,
+ * by Alexander Graf <agraf@suse.de>.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kvm_host.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/preempt.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/anon_inodes.h>
+#include <linux/cpumask.h>
+#include <linux/spinlock.h>
+#include <linux/page-flags.h>
+
+#include <asm/reg.h>
+#include <asm/cputable.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/kvm_ppc.h>
+#include <asm/kvm_book3s.h>
+#include <asm/mmu_context.h>
+#include <asm/lppaca.h>
+#include <asm/processor.h>
+#include <asm/cputhreads.h>
+#include <asm/page.h>
+#include <linux/gfp.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+#include <linux/highmem.h>
+
+/*
+ * For now, limit memory to 64GB and require it to be large pages.
+ * This value is chosen because it makes the ram_pginfo array be
+ * 64kB in size, which is about as large as we want to be trying
+ * to allocate with kmalloc.
+ */
+#define MAX_MEM_ORDER 36
+
+#define LARGE_PAGE_ORDER 24 /* 16MB pages */
+
+/* #define EXIT_DEBUG */
+/* #define EXIT_DEBUG_SIMPLE */
+/* #define EXIT_DEBUG_INT */
+
+void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+ local_paca->kvm_hstate.kvm_vcpu = vcpu;
+ local_paca->kvm_hstate.kvm_vcore = vcpu->arch.vcore;
+}
+
+void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
+{
+}
+
+static void kvmppc_vcpu_blocked(struct kvm_vcpu *vcpu);
+static void kvmppc_vcpu_unblocked(struct kvm_vcpu *vcpu);
+
+void kvmppc_vcpu_block(struct kvm_vcpu *vcpu)
+{
+ u64 now;
+ unsigned long dec_nsec;
+
+ now = get_tb();
+ if (now >= vcpu->arch.dec_expires && !kvmppc_core_pending_dec(vcpu))
+ kvmppc_core_queue_dec(vcpu);
+ if (vcpu->arch.pending_exceptions)
+ return;
+ if (vcpu->arch.dec_expires != ~(u64)0) {
+ dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC /
+ tb_ticks_per_sec;
+ hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec),
+ HRTIMER_MODE_REL);
+ }
+
+ kvmppc_vcpu_blocked(vcpu);
+
+ kvm_vcpu_block(vcpu);
+ vcpu->stat.halt_wakeup++;
+
+ if (vcpu->arch.dec_expires != ~(u64)0)
+ hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
+
+ kvmppc_vcpu_unblocked(vcpu);
+}
+
+void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
+{
+ vcpu->arch.shregs.msr = msr;
+}
+
+void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
+{
+ vcpu->arch.pvr = pvr;
+}
+
+void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
+{
+ int r;
+
+ pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id);
+ pr_err("pc = %.16lx msr = %.16llx trap = %x\n",
+ vcpu->arch.pc, vcpu->arch.shregs.msr, vcpu->arch.trap);
+ for (r = 0; r < 16; ++r)
+ pr_err("r%2d = %.16lx r%d = %.16lx\n",
+ r, kvmppc_get_gpr(vcpu, r),
+ r+16, kvmppc_get_gpr(vcpu, r+16));
+ pr_err("ctr = %.16lx lr = %.16lx\n",
+ vcpu->arch.ctr, vcpu->arch.lr);
+ pr_err("srr0 = %.16llx srr1 = %.16llx\n",
+ vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1);
+ pr_err("sprg0 = %.16llx sprg1 = %.16llx\n",
+ vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1);
+ pr_err("sprg2 = %.16llx sprg3 = %.16llx\n",
+ vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
+ pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n",
+ vcpu->arch.cr, vcpu->arch.xer, vcpu->arch.shregs.dsisr);
+ pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
+ pr_err("fault dar = %.16lx dsisr = %.8x\n",
+ vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
+ pr_err("SLB (%d entries):\n", vcpu->arch.slb_max);
+ for (r = 0; r < vcpu->arch.slb_max; ++r)
+ pr_err(" ESID = %.16llx VSID = %.16llx\n",
+ vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv);
+ pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n",
+ vcpu->kvm->arch.lpcr, vcpu->kvm->arch.sdr1,
+ vcpu->arch.last_inst);
+}
+
+struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
+{
+ int r;
+ struct kvm_vcpu *v, *ret = NULL;
+
+ mutex_lock(&kvm->lock);
+ kvm_for_each_vcpu(r, v, kvm) {
+ if (v->vcpu_id == id) {
+ ret = v;
+ break;
+ }
+ }
+ mutex_unlock(&kvm->lock);
+ return ret;
+}
+
+static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
+{
+ vpa->shared_proc = 1;
+ vpa->yield_count = 1;
+}
+
+static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
+ unsigned long flags,
+ unsigned long vcpuid, unsigned long vpa)
+{
+ struct kvm *kvm = vcpu->kvm;
+ unsigned long pg_index, ra, len;
+ unsigned long pg_offset;
+ void *va;
+ struct kvm_vcpu *tvcpu;
+
+ tvcpu = kvmppc_find_vcpu(kvm, vcpuid);
+ if (!tvcpu)
+ return H_PARAMETER;
+
+ flags >>= 63 - 18;
+ flags &= 7;
+ if (flags == 0 || flags == 4)
+ return H_PARAMETER;
+ if (flags < 4) {
+ if (vpa & 0x7f)
+ return H_PARAMETER;
+ /* registering new area; convert logical addr to real */
+ pg_index = vpa >> kvm->arch.ram_porder;
+ pg_offset = vpa & (kvm->arch.ram_psize - 1);
+ if (pg_index >= kvm->arch.ram_npages)
+ return H_PARAMETER;
+ if (kvm->arch.ram_pginfo[pg_index].pfn == 0)
+ return H_PARAMETER;
+ ra = kvm->arch.ram_pginfo[pg_index].pfn << PAGE_SHIFT;
+ ra |= pg_offset;
+ va = __va(ra);
+ if (flags <= 1)
+ len = *(unsigned short *)(va + 4);
+ else
+ len = *(unsigned int *)(va + 4);
+ if (pg_offset + len > kvm->arch.ram_psize)
+ return H_PARAMETER;
+ switch (flags) {
+ case 1: /* register VPA */
+ if (len < 640)
+ return H_PARAMETER;
+ tvcpu->arch.vpa = va;
+ init_vpa(vcpu, va);
+ break;
+ case 2: /* register DTL */
+ if (len < 48)
+ return H_PARAMETER;
+ if (!tvcpu->arch.vpa)
+ return H_RESOURCE;
+ len -= len % 48;
+ tvcpu->arch.dtl = va;
+ tvcpu->arch.dtl_end = va + len;
+ break;
+ case 3: /* register SLB shadow buffer */
+ if (len < 8)
+ return H_PARAMETER;
+ if (!tvcpu->arch.vpa)
+ return H_RESOURCE;
+ tvcpu->arch.slb_shadow = va;
+ len = (len - 16) / 16;
+ tvcpu->arch.slb_shadow = va;
+ break;
+ }
+ } else {
+ switch (flags) {
+ case 5: /* unregister VPA */
+ if (tvcpu->arch.slb_shadow || tvcpu->arch.dtl)
+ return H_RESOURCE;
+ tvcpu->arch.vpa = NULL;
+ break;
+ case 6: /* unregister DTL */
+ tvcpu->arch.dtl = NULL;
+ break;
+ case 7: /* unregister SLB shadow buffer */
+ tvcpu->arch.slb_shadow = NULL;
+ break;
+ }
+ }
+ return H_SUCCESS;
+}
+
+int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
+{
+ unsigned long req = kvmppc_get_gpr(vcpu, 3);
+ unsigned long target, ret = H_SUCCESS;
+ struct kvm_vcpu *tvcpu;
+
+ switch (req) {
+ case H_CEDE:
+ vcpu->arch.shregs.msr |= MSR_EE;
+ vcpu->arch.ceded = 1;
+ smp_mb();
+ if (!vcpu->arch.prodded)
+ kvmppc_vcpu_block(vcpu);
+ else
+ vcpu->arch.prodded = 0;
+ smp_mb();
+ vcpu->arch.ceded = 0;
+ break;
+ case H_PROD:
+ target = kvmppc_get_gpr(vcpu, 4);
+ tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
+ if (!tvcpu) {
+ ret = H_PARAMETER;
+ break;
+ }
+ tvcpu->arch.prodded = 1;
+ smp_mb();
+ if (vcpu->arch.ceded) {
+ if (waitqueue_active(&vcpu->wq)) {
+ wake_up_interruptible(&vcpu->wq);
+ vcpu->stat.halt_wakeup++;
+ }
+ }
+ break;
+ case H_CONFER:
+ break;
+ case H_REGISTER_VPA:
+ ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5),
+ kvmppc_get_gpr(vcpu, 6));
+ break;
+ default:
+ return RESUME_HOST;
+ }
+ kvmppc_set_gpr(vcpu, 3, ret);
+ vcpu->arch.hcall_needed = 0;
+ return RESUME_GUEST;
+}
+
+static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ struct task_struct *tsk)
+{
+ int r = RESUME_HOST;
+
+ vcpu->stat.sum_exits++;
+
+ run->exit_reason = KVM_EXIT_UNKNOWN;
+ run->ready_for_interrupt_injection = 1;
+ switch (vcpu->arch.trap) {
+ /* We're good on these - the host merely wanted to get our attention */
+ case BOOK3S_INTERRUPT_HV_DECREMENTER:
+ vcpu->stat.dec_exits++;
+ r = RESUME_GUEST;
+ break;
+ case BOOK3S_INTERRUPT_EXTERNAL:
+ vcpu->stat.ext_intr_exits++;
+ r = RESUME_GUEST;
+ break;
+ case BOOK3S_INTERRUPT_PERFMON:
+ r = RESUME_GUEST;
+ break;
+ case BOOK3S_INTERRUPT_PROGRAM:
+ {
+ ulong flags;
+ /*
+ * Normally program interrupts are delivered directly
+ * to the guest by the hardware, but we can get here
+ * as a result of a hypervisor emulation interrupt
+ * (e40) getting turned into a 700 by BML RTAS.
+ */
+ flags = vcpu->arch.shregs.msr & 0x1f0000ull;
+ kvmppc_core_queue_program(vcpu, flags);
+ r = RESUME_GUEST;
+ break;
+ }
+ case BOOK3S_INTERRUPT_SYSCALL:
+ {
+ /* hcall - punt to userspace */
+ int i;
+
+ if (vcpu->arch.shregs.msr & MSR_PR) {
+ /* sc 1 from userspace - reflect to guest syscall */
+ kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_SYSCALL);
+ r = RESUME_GUEST;
+ break;
+ }
+ run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3);
+ for (i = 0; i < 9; ++i)
+ run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i);
+ run->exit_reason = KVM_EXIT_PAPR_HCALL;
+ vcpu->arch.hcall_needed = 1;
+ r = RESUME_HOST;
+ break;
+ }
+ /*
+ * We get these next two if the guest does a bad real-mode access,
+ * as we have enabled VRMA (virtualized real mode area) mode in the
+ * LPCR. We just generate an appropriate DSI/ISI to the guest.
+ */
+ case BOOK3S_INTERRUPT_H_DATA_STORAGE:
+ vcpu->arch.shregs.dsisr = vcpu->arch.fault_dsisr;
+ vcpu->arch.shregs.dar = vcpu->arch.fault_dar;
+ kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE, 0);
+ r = RESUME_GUEST;
+ break;
+ case BOOK3S_INTERRUPT_H_INST_STORAGE:
+ kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_INST_STORAGE,
+ 0x08000000);
+ r = RESUME_GUEST;
+ break;
+ /*
+ * This occurs if the guest executes an illegal instruction.
+ * We just generate a program interrupt to the guest, since
+ * we don't emulate any guest instructions at this stage.
+ */
+ case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
+ kvmppc_core_queue_program(vcpu, 0x80000);
+ r = RESUME_GUEST;
+ break;
+ default:
+ kvmppc_dump_regs(vcpu);
+ printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
+ vcpu->arch.trap, kvmppc_get_pc(vcpu),
+ vcpu->arch.shregs.msr);
+ r = RESUME_HOST;
+ BUG();
+ break;
+ }
+
+
+ if (!(r & RESUME_HOST)) {
+ /* To avoid clobbering exit_reason, only check for signals if
+ * we aren't already exiting to userspace for some other
+ * reason. */
+ if (signal_pending(tsk)) {
+ vcpu->stat.signal_exits++;
+ run->exit_reason = KVM_EXIT_INTR;
+ r = -EINTR;
+ } else {
+ kvmppc_core_deliver_interrupts(vcpu);
+ }
+ }
+
+ return r;
+}
+
+int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
+{
+ int i;
+
+ sregs->pvr = vcpu->arch.pvr;
+
+ memset(sregs, 0, sizeof(struct kvm_sregs));
+ for (i = 0; i < vcpu->arch.slb_max; i++) {
+ sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige;
+ sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
+ }
+
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
+{
+ int i, j;
+
+ kvmppc_set_pvr(vcpu, sregs->pvr);
+
+ j = 0;
+ for (i = 0; i < vcpu->arch.slb_nr; i++) {
+ if (sregs->u.s.ppc64.slb[i].slbe & SLB_ESID_V) {
+ vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe;
+ vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv;
+ ++j;
+ }
+ }
+ vcpu->arch.slb_max = j;
+
+ return 0;
+}
+
+int kvmppc_core_check_processor_compat(void)
+{
+ if (cpu_has_feature(CPU_FTR_HVMODE))
+ return 0;
+ return -EIO;
+}
+
+struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
+{
+ struct kvm_vcpu *vcpu;
+ int err = -EINVAL;
+ int core;
+ struct kvmppc_vcore *vcore;
+
+ core = id / threads_per_core;
+ if (core >= KVM_MAX_VCORES)
+ goto out;
+
+ err = -ENOMEM;
+ vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
+ if (!vcpu)
+ goto out;
+
+ err = kvm_vcpu_init(vcpu, kvm, id);
+ if (err)
+ goto free_vcpu;
+
+ vcpu->arch.shared = &vcpu->arch.shregs;
+ vcpu->arch.last_cpu = -1;
+ vcpu->arch.mmcr[0] = MMCR0_FC;
+ vcpu->arch.ctrl = CTRL_RUNLATCH;
+ /* default to host PVR, since we can't spoof it */
+ vcpu->arch.pvr = mfspr(SPRN_PVR);
+ kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
+
+ kvmppc_mmu_book3s_hv_init(vcpu);
+
+ /*
+ * Some vcpus may start out in stopped state. If we initialize
+ * them to busy-in-host state they will stop other vcpus in the
+ * vcore from running. Instead we initialize them to blocked
+ * state, effectively considering them to be stopped until we
+ * see the first run ioctl for them.
+ */
+ vcpu->arch.state = KVMPPC_VCPU_BLOCKED;
+
+ init_waitqueue_head(&vcpu->arch.cpu_run);
+
+ mutex_lock(&kvm->lock);
+ vcore = kvm->arch.vcores[core];
+ if (!vcore) {
+ vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL);
+ if (vcore) {
+ INIT_LIST_HEAD(&vcore->runnable_threads);
+ spin_lock_init(&vcore->lock);
+ }
+ kvm->arch.vcores[core] = vcore;
+ }
+ mutex_unlock(&kvm->lock);
+
+ if (!vcore)
+ goto free_vcpu;
+
+ spin_lock(&vcore->lock);
+ ++vcore->num_threads;
+ ++vcore->n_blocked;
+ spin_unlock(&vcore->lock);
+ vcpu->arch.vcore = vcore;
+
+ return vcpu;
+
+free_vcpu:
+ kfree(vcpu);
+out:
+ return ERR_PTR(err);
+}
+
+void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
+{
+ kvm_vcpu_uninit(vcpu);
+ kfree(vcpu);
+}
+
+static void kvmppc_vcpu_blocked(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+
+ spin_lock(&vc->lock);
+ vcpu->arch.state = KVMPPC_VCPU_BLOCKED;
+ ++vc->n_blocked;
+ if (vc->n_runnable > 0 &&
+ vc->n_runnable + vc->n_blocked == vc->num_threads) {
+ vcpu = list_first_entry(&vc->runnable_threads, struct kvm_vcpu,
+ arch.run_list);
+ wake_up(&vcpu->arch.cpu_run);
+ }
+ spin_unlock(&vc->lock);
+}
+
+static void kvmppc_vcpu_unblocked(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+
+ spin_lock(&vc->lock);
+ vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
+ --vc->n_blocked;
+ spin_unlock(&vc->lock);
+}
+
+extern int __kvmppc_vcore_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
+extern void xics_wake_cpu(int cpu);
+
+static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
+ struct kvm_vcpu *vcpu)
+{
+ struct kvm_vcpu *v;
+
+ if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
+ return;
+ vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
+ --vc->n_runnable;
+ /* decrement the physical thread id of each following vcpu */
+ v = vcpu;
+ list_for_each_entry_continue(v, &vc->runnable_threads, arch.run_list)
+ --v->arch.ptid;
+ list_del(&vcpu->arch.run_list);
+}
+
+static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
+{
+ int cpu;
+ struct paca_struct *tpaca;
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+
+ cpu = vc->pcpu + vcpu->arch.ptid;
+ tpaca = &paca[cpu];
+ tpaca->kvm_hstate.kvm_vcpu = vcpu;
+ tpaca->kvm_hstate.kvm_vcore = vc;
+ smp_wmb();
+#ifdef CONFIG_PPC_ICP_NATIVE
+ if (vcpu->arch.ptid) {
+ tpaca->cpu_start = 0x80;
+ tpaca->kvm_hstate.in_guest = KVM_GUEST_MODE_GUEST;
+ wmb();
+ xics_wake_cpu(cpu);
+ ++vc->n_woken;
+ }
+#endif
+}
+
+static void kvmppc_wait_for_nap(struct kvmppc_vcore *vc)
+{
+ int i;
+
+ HMT_low();
+ i = 0;
+ while (vc->nap_count < vc->n_woken) {
+ if (++i >= 1000000) {
+ pr_err("kvmppc_wait_for_nap timeout %d %d\n",
+ vc->nap_count, vc->n_woken);
+ break;
+ }
+ cpu_relax();
+ }
+ HMT_medium();
+}
+
+/*
+ * Check that we are on thread 0 and that any other threads in
+ * this core are off-line.
+ */
+static int on_primary_thread(void)
+{
+ int cpu = smp_processor_id();
+ int thr = cpu_thread_in_core(cpu);
+
+ if (thr)
+ return 0;
+ while (++thr < threads_per_core)
+ if (cpu_online(cpu + thr))
+ return 0;
+ return 1;
+}
+
+/*
+ * Run a set of guest threads on a physical core.
+ * Called with vc->lock held.
+ */
+static int kvmppc_run_core(struct kvmppc_vcore *vc)
+{
+ struct kvm_vcpu *vcpu, *vnext;
+ long ret;
+ u64 now;
+
+ /* don't start if any threads have a signal pending */
+ list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
+ if (signal_pending(vcpu->arch.run_task))
+ return 0;
+
+ /*
+ * Make sure we are running on thread 0, and that
+ * secondary threads are offline.
+ * XXX we should also block attempts to bring any
+ * secondary threads online.
+ */
+ if (threads_per_core > 1 && !on_primary_thread()) {
+ list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
+ vcpu->arch.ret = -EBUSY;
+ goto out;
+ }
+
+ vc->n_woken = 0;
+ vc->nap_count = 0;
+ vc->entry_exit_count = 0;
+ vc->vcore_running = 1;
+ vc->in_guest = 0;
+ vc->pcpu = smp_processor_id();
+ list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
+ kvmppc_start_thread(vcpu);
+ vcpu = list_first_entry(&vc->runnable_threads, struct kvm_vcpu,
+ arch.run_list);
+
+ spin_unlock(&vc->lock);
+
+ preempt_disable();
+ kvm_guest_enter();
+ __kvmppc_vcore_entry(NULL, vcpu);
+
+ /* wait for secondary threads to finish writing their state to memory */
+ spin_lock(&vc->lock);
+ if (vc->nap_count < vc->n_woken)
+ kvmppc_wait_for_nap(vc);
+ /* prevent other vcpu threads from doing kvmppc_start_thread() now */
+ vc->vcore_running = 2;
+ spin_unlock(&vc->lock);
+
+ /* make sure updates to secondary vcpu structs are visible now */
+ smp_mb();
+ kvm_guest_exit();
+
+ preempt_enable();
+ kvm_resched(vcpu);
+
+ now = get_tb();
+ list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
+ /* cancel pending dec exception if dec is positive */
+ if (now < vcpu->arch.dec_expires &&
+ kvmppc_core_pending_dec(vcpu))
+ kvmppc_core_dequeue_dec(vcpu);
+ if (!vcpu->arch.trap) {
+ if (signal_pending(vcpu->arch.run_task)) {
+ vcpu->arch.kvm_run->exit_reason = KVM_EXIT_INTR;
+ vcpu->arch.ret = -EINTR;
+ }
+ continue; /* didn't get to run */
+ }
+ ret = kvmppc_handle_exit(vcpu->arch.kvm_run, vcpu,
+ vcpu->arch.run_task);
+ vcpu->arch.ret = ret;
+ vcpu->arch.trap = 0;
+ }
+
+ spin_lock(&vc->lock);
+ out:
+ vc->vcore_running = 0;
+ list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
+ arch.run_list) {
+ if (vcpu->arch.ret != RESUME_GUEST) {
+ kvmppc_remove_runnable(vc, vcpu);
+ wake_up(&vcpu->arch.cpu_run);
+ }
+ }
+
+ return 1;
+}
+
+static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
+{
+ int ptid;
+ int wait_state;
+ struct kvmppc_vcore *vc;
+ DEFINE_WAIT(wait);
+
+ /* No need to go into the guest when all we do is going out */
+ if (signal_pending(current)) {
+ kvm_run->exit_reason = KVM_EXIT_INTR;
+ return -EINTR;
+ }
+
+ /* On PPC970, check that we have an RMA region */
+ if (!vcpu->kvm->arch.rma && cpu_has_feature(CPU_FTR_ARCH_201))
+ return -EPERM;
+
+ kvm_run->exit_reason = 0;
+ vcpu->arch.ret = RESUME_GUEST;
+ vcpu->arch.trap = 0;
+
+ flush_fp_to_thread(current);
+ flush_altivec_to_thread(current);
+ flush_vsx_to_thread(current);
+
+ /*
+ * Synchronize with other threads in this virtual core
+ */
+ vc = vcpu->arch.vcore;
+ spin_lock(&vc->lock);
+ /* This happens the first time this is called for a vcpu */
+ if (vcpu->arch.state == KVMPPC_VCPU_BLOCKED)
+ --vc->n_blocked;
+ vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
+ ptid = vc->n_runnable;
+ vcpu->arch.run_task = current;
+ vcpu->arch.kvm_run = kvm_run;
+ vcpu->arch.ptid = ptid;
+ list_add_tail(&vcpu->arch.run_list, &vc->runnable_threads);
+ ++vc->n_runnable;
+
+ wait_state = TASK_INTERRUPTIBLE;
+ while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
+ if (signal_pending(current)) {
+ if (!vc->vcore_running) {
+ kvm_run->exit_reason = KVM_EXIT_INTR;
+ vcpu->arch.ret = -EINTR;
+ break;
+ }
+ /* have to wait for vcore to stop executing guest */
+ wait_state = TASK_UNINTERRUPTIBLE;
+ smp_send_reschedule(vc->pcpu);
+ }
+
+ if (!vc->vcore_running &&
+ vc->n_runnable + vc->n_blocked == vc->num_threads) {
+ /* we can run now */
+ if (kvmppc_run_core(vc))
+ continue;
+ }
+
+ if (vc->vcore_running == 1 && VCORE_EXIT_COUNT(vc) == 0)
+ kvmppc_start_thread(vcpu);
+
+ /* wait for other threads to come in, or wait for vcore */
+ prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state);
+ spin_unlock(&vc->lock);
+ schedule();
+ finish_wait(&vcpu->arch.cpu_run, &wait);
+ spin_lock(&vc->lock);
+ }
+
+ if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE)
+ kvmppc_remove_runnable(vc, vcpu);
+ spin_unlock(&vc->lock);
+
+ return vcpu->arch.ret;
+}
+
+int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ int r;
+
+ do {
+ r = kvmppc_run_vcpu(run, vcpu);
+
+ if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
+ !(vcpu->arch.shregs.msr & MSR_PR)) {
+ r = kvmppc_pseries_do_hcall(vcpu);
+ kvmppc_core_deliver_interrupts(vcpu);
+ }
+ } while (r == RESUME_GUEST);
+ return r;
+}
+
+static long kvmppc_stt_npages(unsigned long window_size)
+{
+ return ALIGN((window_size >> SPAPR_TCE_SHIFT)
+ * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
+}
+
+static void release_spapr_tce_table(struct kvmppc_spapr_tce_table *stt)
+{
+ struct kvm *kvm = stt->kvm;
+ int i;
+
+ mutex_lock(&kvm->lock);
+ list_del(&stt->list);
+ for (i = 0; i < kvmppc_stt_npages(stt->window_size); i++)
+ __free_page(stt->pages[i]);
+ kfree(stt);
+ mutex_unlock(&kvm->lock);
+
+ kvm_put_kvm(kvm);
+}
+
+static int kvm_spapr_tce_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct kvmppc_spapr_tce_table *stt = vma->vm_file->private_data;
+ struct page *page;
+
+ if (vmf->pgoff >= kvmppc_stt_npages(stt->window_size))
+ return VM_FAULT_SIGBUS;
+
+ page = stt->pages[vmf->pgoff];
+ get_page(page);
+ vmf->page = page;
+ return 0;
+}
+
+static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
+ .fault = kvm_spapr_tce_fault,
+};
+
+static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ vma->vm_ops = &kvm_spapr_tce_vm_ops;
+ return 0;
+}
+
+static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
+{
+ struct kvmppc_spapr_tce_table *stt = filp->private_data;
+
+ release_spapr_tce_table(stt);
+ return 0;
+}
+
+static struct file_operations kvm_spapr_tce_fops = {
+ .mmap = kvm_spapr_tce_mmap,
+ .release = kvm_spapr_tce_release,
+};
+
+long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
+ struct kvm_create_spapr_tce *args)
+{
+ struct kvmppc_spapr_tce_table *stt = NULL;
+ long npages;
+ int ret = -ENOMEM;
+ int i;
+
+ /* Check this LIOBN hasn't been previously allocated */
+ list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
+ if (stt->liobn == args->liobn)
+ return -EBUSY;
+ }
+
+ npages = kvmppc_stt_npages(args->window_size);
+
+ stt = kzalloc(sizeof(*stt) + npages* sizeof(struct page *),
+ GFP_KERNEL);
+ if (!stt)
+ goto fail;
+
+ stt->liobn = args->liobn;
+ stt->window_size = args->window_size;
+ stt->kvm = kvm;
+
+ for (i = 0; i < npages; i++) {
+ stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!stt->pages[i])
+ goto fail;
+ }
+
+ kvm_get_kvm(kvm);
+
+ mutex_lock(&kvm->lock);
+ list_add(&stt->list, &kvm->arch.spapr_tce_tables);
+
+ mutex_unlock(&kvm->lock);
+
+ return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
+ stt, O_RDWR);
+
+fail:
+ if (stt) {
+ for (i = 0; i < npages; i++)
+ if (stt->pages[i])
+ __free_page(stt->pages[i]);
+
+ kfree(stt);
+ }
+ return ret;
+}
+
+/* Work out RMLS (real mode limit selector) field value for a given RMA size.
+ Assumes POWER7 or PPC970. */
+static inline int lpcr_rmls(unsigned long rma_size)
+{
+ switch (rma_size) {
+ case 32ul << 20: /* 32 MB */
+ if (cpu_has_feature(CPU_FTR_ARCH_206))
+ return 8; /* only supported on POWER7 */
+ return -1;
+ case 64ul << 20: /* 64 MB */
+ return 3;
+ case 128ul << 20: /* 128 MB */
+ return 7;
+ case 256ul << 20: /* 256 MB */
+ return 4;
+ case 1ul << 30: /* 1 GB */
+ return 2;
+ case 16ul << 30: /* 16 GB */
+ return 1;
+ case 256ul << 30: /* 256 GB */
+ return 0;
+ default:
+ return -1;
+ }
+}
+
+static int kvm_rma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct kvmppc_rma_info *ri = vma->vm_file->private_data;
+ struct page *page;
+
+ if (vmf->pgoff >= ri->npages)
+ return VM_FAULT_SIGBUS;
+
+ page = pfn_to_page(ri->base_pfn + vmf->pgoff);
+ get_page(page);
+ vmf->page = page;
+ return 0;
+}
+
+static const struct vm_operations_struct kvm_rma_vm_ops = {
+ .fault = kvm_rma_fault,
+};
+
+static int kvm_rma_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ vma->vm_flags |= VM_RESERVED;
+ vma->vm_ops = &kvm_rma_vm_ops;
+ return 0;
+}
+
+static int kvm_rma_release(struct inode *inode, struct file *filp)
+{
+ struct kvmppc_rma_info *ri = filp->private_data;
+
+ kvm_release_rma(ri);
+ return 0;
+}
+
+static struct file_operations kvm_rma_fops = {
+ .mmap = kvm_rma_mmap,
+ .release = kvm_rma_release,
+};
+
+long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret)
+{
+ struct kvmppc_rma_info *ri;
+ long fd;
+
+ ri = kvm_alloc_rma();
+ if (!ri)
+ return -ENOMEM;
+
+ fd = anon_inode_getfd("kvm-rma", &kvm_rma_fops, ri, O_RDWR);
+ if (fd < 0)
+ kvm_release_rma(ri);
+
+ ret->rma_size = ri->npages << PAGE_SHIFT;
+ return fd;
+}
+
+static struct page *hva_to_page(unsigned long addr)
+{
+ struct page *page[1];
+ int npages;
+
+ might_sleep();
+
+ npages = get_user_pages_fast(addr, 1, 1, page);
+
+ if (unlikely(npages != 1))
+ return 0;
+
+ return page[0];
+}
+
+int kvmppc_core_prepare_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem)
+{
+ unsigned long psize, porder;
+ unsigned long i, npages, totalpages;
+ unsigned long pg_ix;
+ struct kvmppc_pginfo *pginfo;
+ unsigned long hva;
+ struct kvmppc_rma_info *ri = NULL;
+ struct page *page;
+
+ /* For now, only allow 16MB pages */
+ porder = LARGE_PAGE_ORDER;
+ psize = 1ul << porder;
+ if ((mem->memory_size & (psize - 1)) ||
+ (mem->guest_phys_addr & (psize - 1))) {
+ pr_err("bad memory_size=%llx @ %llx\n",
+ mem->memory_size, mem->guest_phys_addr);
+ return -EINVAL;
+ }
+
+ npages = mem->memory_size >> porder;
+ totalpages = (mem->guest_phys_addr + mem->memory_size) >> porder;
+
+ /* More memory than we have space to track? */
+ if (totalpages > (1ul << (MAX_MEM_ORDER - LARGE_PAGE_ORDER)))
+ return -EINVAL;
+
+ /* Do we already have an RMA registered? */
+ if (mem->guest_phys_addr == 0 && kvm->arch.rma)
+ return -EINVAL;
+
+ if (totalpages > kvm->arch.ram_npages)
+ kvm->arch.ram_npages = totalpages;
+
+ /* Is this one of our preallocated RMAs? */
+ if (mem->guest_phys_addr == 0) {
+ struct vm_area_struct *vma;
+
+ down_read(&current->mm->mmap_sem);
+ vma = find_vma(current->mm, mem->userspace_addr);
+ if (vma && vma->vm_file &&
+ vma->vm_file->f_op == &kvm_rma_fops &&
+ mem->userspace_addr == vma->vm_start)
+ ri = vma->vm_file->private_data;
+ up_read(&current->mm->mmap_sem);
+ if (!ri && cpu_has_feature(CPU_FTR_ARCH_201)) {
+ pr_err("CPU requires an RMO\n");
+ return -EINVAL;
+ }
+ }
+
+ if (ri) {
+ unsigned long rma_size;
+ unsigned long lpcr;
+ long rmls;
+
+ rma_size = ri->npages << PAGE_SHIFT;
+ if (rma_size > mem->memory_size)
+ rma_size = mem->memory_size;
+ rmls = lpcr_rmls(rma_size);
+ if (rmls < 0) {
+ pr_err("Can't use RMA of 0x%lx bytes\n", rma_size);
+ return -EINVAL;
+ }
+ atomic_inc(&ri->use_count);
+ kvm->arch.rma = ri;
+ kvm->arch.n_rma_pages = rma_size >> porder;
+
+ /* Update LPCR and RMOR */
+ lpcr = kvm->arch.lpcr;
+ if (cpu_has_feature(CPU_FTR_ARCH_201)) {
+ /* PPC970; insert RMLS value (split field) in HID4 */
+ lpcr &= ~((1ul << HID4_RMLS0_SH) |
+ (3ul << HID4_RMLS2_SH));
+ lpcr |= ((rmls >> 2) << HID4_RMLS0_SH) |
+ ((rmls & 3) << HID4_RMLS2_SH);
+ /* RMOR is also in HID4 */
+ lpcr |= ((ri->base_pfn >> (26 - PAGE_SHIFT)) & 0xffff)
+ << HID4_RMOR_SH;
+ } else {
+ /* POWER7 */
+ lpcr &= ~(LPCR_VPM0 | LPCR_VRMA_L);
+ lpcr |= rmls << LPCR_RMLS_SH;
+ kvm->arch.rmor = kvm->arch.rma->base_pfn << PAGE_SHIFT;
+ }
+ kvm->arch.lpcr = lpcr;
+ pr_info("Using RMO at %lx size %lx (LPCR = %lx)\n",
+ ri->base_pfn << PAGE_SHIFT, rma_size, lpcr);
+ }
+
+ pg_ix = mem->guest_phys_addr >> porder;
+ pginfo = kvm->arch.ram_pginfo + pg_ix;
+ for (i = 0; i < npages; ++i, ++pg_ix) {
+ if (ri && pg_ix < kvm->arch.n_rma_pages) {
+ pginfo[i].pfn = ri->base_pfn +
+ (pg_ix << (porder - PAGE_SHIFT));
+ continue;
+ }
+ hva = mem->userspace_addr + (i << porder);
+ page = hva_to_page(hva);
+ if (!page) {
+ pr_err("oops, no pfn for hva %lx\n", hva);
+ goto err;
+ }
+ /* Check it's a 16MB page */
+ if (!PageHead(page) ||
+ compound_order(page) != (LARGE_PAGE_ORDER - PAGE_SHIFT)) {
+ pr_err("page at %lx isn't 16MB (o=%d)\n",
+ hva, compound_order(page));
+ goto err;
+ }
+ pginfo[i].pfn = page_to_pfn(page);
+ }
+
+ return 0;
+
+ err:
+ return -EINVAL;
+}
+
+void kvmppc_core_commit_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem)
+{
+ if (mem->guest_phys_addr == 0 && mem->memory_size != 0 &&
+ !kvm->arch.rma)
+ kvmppc_map_vrma(kvm, mem);
+}
+
+int kvmppc_core_init_vm(struct kvm *kvm)
+{
+ long r;
+ unsigned long npages = 1ul << (MAX_MEM_ORDER - LARGE_PAGE_ORDER);
+ long err = -ENOMEM;
+ unsigned long lpcr;
+
+ /* Allocate hashed page table */
+ r = kvmppc_alloc_hpt(kvm);
+ if (r)
+ return r;
+
+ INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
+
+ kvm->arch.ram_pginfo = kzalloc(npages * sizeof(struct kvmppc_pginfo),
+ GFP_KERNEL);
+ if (!kvm->arch.ram_pginfo) {
+ pr_err("kvmppc_core_init_vm: couldn't alloc %lu bytes\n",
+ npages * sizeof(struct kvmppc_pginfo));
+ goto out_free;
+ }
+
+ kvm->arch.ram_npages = 0;
+ kvm->arch.ram_psize = 1ul << LARGE_PAGE_ORDER;
+ kvm->arch.ram_porder = LARGE_PAGE_ORDER;
+ kvm->arch.rma = NULL;
+ kvm->arch.n_rma_pages = 0;
+
+ kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
+
+ if (cpu_has_feature(CPU_FTR_ARCH_201)) {
+ /* PPC970; HID4 is effectively the LPCR */
+ unsigned long lpid = kvm->arch.lpid;
+ kvm->arch.host_lpid = 0;
+ kvm->arch.host_lpcr = lpcr = mfspr(SPRN_HID4);
+ lpcr &= ~((3 << HID4_LPID1_SH) | (0xful << HID4_LPID5_SH));
+ lpcr |= ((lpid >> 4) << HID4_LPID1_SH) |
+ ((lpid & 0xf) << HID4_LPID5_SH);
+ } else {
+ /* POWER7; init LPCR for virtual RMA mode */
+ kvm->arch.host_lpid = mfspr(SPRN_LPID);
+ kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
+ lpcr &= LPCR_PECE | LPCR_LPES;
+ lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
+ LPCR_VPM0 | LPCR_VRMA_L;
+ }
+ kvm->arch.lpcr = lpcr;
+
+ return 0;
+
+ out_free:
+ kvmppc_free_hpt(kvm);
+ return err;
+}
+
+void kvmppc_core_destroy_vm(struct kvm *kvm)
+{
+ struct kvmppc_pginfo *pginfo;
+ unsigned long i;
+
+ if (kvm->arch.ram_pginfo) {
+ pginfo = kvm->arch.ram_pginfo;
+ kvm->arch.ram_pginfo = NULL;
+ for (i = kvm->arch.n_rma_pages; i < kvm->arch.ram_npages; ++i)
+ if (pginfo[i].pfn)
+ put_page(pfn_to_page(pginfo[i].pfn));
+ kfree(pginfo);
+ }
+ if (kvm->arch.rma) {
+ kvm_release_rma(kvm->arch.rma);
+ kvm->arch.rma = NULL;
+ }
+
+ kvmppc_free_hpt(kvm);
+ WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
+}
+
+/* These are stubs for now */
+void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
+{
+}
+
+/* We don't need to emulate any privileged instructions or dcbz */
+int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int inst, int *advance)
+{
+ return EMULATE_FAIL;
+}
+
+int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
+{
+ return EMULATE_FAIL;
+}
+
+int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
+{
+ return EMULATE_FAIL;
+}
+
+static int kvmppc_book3s_hv_init(void)
+{
+ int r;
+
+ r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
+
+ if (r)
+ return r;
+
+ r = kvmppc_mmu_hv_init();
+
+ return r;
+}
+
+static void kvmppc_book3s_hv_exit(void)
+{
+ kvm_exit();
+}
+
+module_init(kvmppc_book3s_hv_init);
+module_exit(kvmppc_book3s_hv_exit);
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
new file mode 100644
index 000000000000..d43120355eec
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -0,0 +1,155 @@
+/*
+ * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kvm_host.h>
+#include <linux/preempt.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/bootmem.h>
+#include <linux/init.h>
+
+#include <asm/cputable.h>
+#include <asm/kvm_ppc.h>
+#include <asm/kvm_book3s.h>
+
+/*
+ * This maintains a list of RMAs (real mode areas) for KVM guests to use.
+ * Each RMA has to be physically contiguous and of a size that the
+ * hardware supports. PPC970 and POWER7 support 64MB, 128MB and 256MB,
+ * and other larger sizes. Since we are unlikely to be allocate that
+ * much physically contiguous memory after the system is up and running,
+ * we preallocate a set of RMAs in early boot for KVM to use.
+ */
+static unsigned long kvm_rma_size = 64 << 20; /* 64MB */
+static unsigned long kvm_rma_count;
+
+static int __init early_parse_rma_size(char *p)
+{
+ if (!p)
+ return 1;
+
+ kvm_rma_size = memparse(p, &p);
+
+ return 0;
+}
+early_param("kvm_rma_size", early_parse_rma_size);
+
+static int __init early_parse_rma_count(char *p)
+{
+ if (!p)
+ return 1;
+
+ kvm_rma_count = simple_strtoul(p, NULL, 0);
+
+ return 0;
+}
+early_param("kvm_rma_count", early_parse_rma_count);
+
+static struct kvmppc_rma_info *rma_info;
+static LIST_HEAD(free_rmas);
+static DEFINE_SPINLOCK(rma_lock);
+
+/* Work out RMLS (real mode limit selector) field value for a given RMA size.
+ Assumes POWER7 or PPC970. */
+static inline int lpcr_rmls(unsigned long rma_size)
+{
+ switch (rma_size) {
+ case 32ul << 20: /* 32 MB */
+ if (cpu_has_feature(CPU_FTR_ARCH_206))
+ return 8; /* only supported on POWER7 */
+ return -1;
+ case 64ul << 20: /* 64 MB */
+ return 3;
+ case 128ul << 20: /* 128 MB */
+ return 7;
+ case 256ul << 20: /* 256 MB */
+ return 4;
+ case 1ul << 30: /* 1 GB */
+ return 2;
+ case 16ul << 30: /* 16 GB */
+ return 1;
+ case 256ul << 30: /* 256 GB */
+ return 0;
+ default:
+ return -1;
+ }
+}
+
+/*
+ * Called at boot time while the bootmem allocator is active,
+ * to allocate contiguous physical memory for the real memory
+ * areas for guests.
+ */
+void kvm_rma_init(void)
+{
+ unsigned long i;
+ unsigned long j, npages;
+ void *rma;
+ struct page *pg;
+
+ /* Only do this on PPC970 in HV mode */
+ if (!cpu_has_feature(CPU_FTR_HVMODE) ||
+ !cpu_has_feature(CPU_FTR_ARCH_201))
+ return;
+
+ if (!kvm_rma_size || !kvm_rma_count)
+ return;
+
+ /* Check that the requested size is one supported in hardware */
+ if (lpcr_rmls(kvm_rma_size) < 0) {
+ pr_err("RMA size of 0x%lx not supported\n", kvm_rma_size);
+ return;
+ }
+
+ npages = kvm_rma_size >> PAGE_SHIFT;
+ rma_info = alloc_bootmem(kvm_rma_count * sizeof(struct kvmppc_rma_info));
+ for (i = 0; i < kvm_rma_count; ++i) {
+ rma = alloc_bootmem_align(kvm_rma_size, kvm_rma_size);
+ pr_info("Allocated KVM RMA at %p (%ld MB)\n", rma,
+ kvm_rma_size >> 20);
+ rma_info[i].base_virt = rma;
+ rma_info[i].base_pfn = __pa(rma) >> PAGE_SHIFT;
+ rma_info[i].npages = npages;
+ list_add_tail(&rma_info[i].list, &free_rmas);
+ atomic_set(&rma_info[i].use_count, 0);
+
+ pg = pfn_to_page(rma_info[i].base_pfn);
+ for (j = 0; j < npages; ++j) {
+ atomic_inc(&pg->_count);
+ ++pg;
+ }
+ }
+}
+
+struct kvmppc_rma_info *kvm_alloc_rma(void)
+{
+ struct kvmppc_rma_info *ri;
+
+ ri = NULL;
+ spin_lock(&rma_lock);
+ if (!list_empty(&free_rmas)) {
+ ri = list_first_entry(&free_rmas, struct kvmppc_rma_info, list);
+ list_del(&ri->list);
+ atomic_inc(&ri->use_count);
+ }
+ spin_unlock(&rma_lock);
+ return ri;
+}
+EXPORT_SYMBOL_GPL(kvm_alloc_rma);
+
+void kvm_release_rma(struct kvmppc_rma_info *ri)
+{
+ if (atomic_dec_and_test(&ri->use_count)) {
+ spin_lock(&rma_lock);
+ list_add_tail(&ri->list, &free_rmas);
+ spin_unlock(&rma_lock);
+
+ }
+}
+EXPORT_SYMBOL_GPL(kvm_release_rma);
+
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
new file mode 100644
index 000000000000..3f7b674dd4bf
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -0,0 +1,166 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
+ *
+ * Derived from book3s_interrupts.S, which is:
+ * Copyright SUSE Linux Products GmbH 2009
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+#include <asm/ppc_asm.h>
+#include <asm/kvm_asm.h>
+#include <asm/reg.h>
+#include <asm/page.h>
+#include <asm/asm-offsets.h>
+#include <asm/exception-64s.h>
+#include <asm/ppc-opcode.h>
+
+/*****************************************************************************
+ * *
+ * Guest entry / exit code that is in kernel module memory (vmalloc) *
+ * *
+ ****************************************************************************/
+
+/* Registers:
+ * r4: vcpu pointer
+ */
+_GLOBAL(__kvmppc_vcore_entry)
+
+ /* Write correct stack frame */
+ mflr r0
+ std r0,PPC_LR_STKOFF(r1)
+
+ /* Save host state to the stack */
+ stdu r1, -SWITCH_FRAME_SIZE(r1)
+
+ /* Save non-volatile registers (r14 - r31) */
+ SAVE_NVGPRS(r1)
+
+ /* Save host DSCR */
+BEGIN_FTR_SECTION
+ mfspr r3, SPRN_DSCR
+ std r3, HSTATE_DSCR(r13)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+
+ /* Save host DABR */
+ mfspr r3, SPRN_DABR
+ std r3, HSTATE_DABR(r13)
+
+ /* Hard-disable interrupts */
+ mfmsr r10
+ std r10, HSTATE_HOST_MSR(r13)
+ rldicl r10,r10,48,1
+ rotldi r10,r10,16
+ mtmsrd r10,1
+
+ /* Save host PMU registers and load guest PMU registers */
+ /* R4 is live here (vcpu pointer) but not r3 or r5 */
+ li r3, 1
+ sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
+ mfspr r7, SPRN_MMCR0 /* save MMCR0 */
+ mtspr SPRN_MMCR0, r3 /* freeze all counters, disable interrupts */
+ isync
+ ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
+ lbz r5, LPPACA_PMCINUSE(r3)
+ cmpwi r5, 0
+ beq 31f /* skip if not */
+ mfspr r5, SPRN_MMCR1
+ mfspr r6, SPRN_MMCRA
+ std r7, HSTATE_MMCR(r13)
+ std r5, HSTATE_MMCR + 8(r13)
+ std r6, HSTATE_MMCR + 16(r13)
+ mfspr r3, SPRN_PMC1
+ mfspr r5, SPRN_PMC2
+ mfspr r6, SPRN_PMC3
+ mfspr r7, SPRN_PMC4
+ mfspr r8, SPRN_PMC5
+ mfspr r9, SPRN_PMC6
+BEGIN_FTR_SECTION
+ mfspr r10, SPRN_PMC7
+ mfspr r11, SPRN_PMC8
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+ stw r3, HSTATE_PMC(r13)
+ stw r5, HSTATE_PMC + 4(r13)
+ stw r6, HSTATE_PMC + 8(r13)
+ stw r7, HSTATE_PMC + 12(r13)
+ stw r8, HSTATE_PMC + 16(r13)
+ stw r9, HSTATE_PMC + 20(r13)
+BEGIN_FTR_SECTION
+ stw r10, HSTATE_PMC + 24(r13)
+ stw r11, HSTATE_PMC + 28(r13)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+31:
+
+ /*
+ * Put whatever is in the decrementer into the
+ * hypervisor decrementer.
+ */
+ mfspr r8,SPRN_DEC
+ mftb r7
+ mtspr SPRN_HDEC,r8
+ extsw r8,r8
+ add r8,r8,r7
+ std r8,HSTATE_DECEXP(r13)
+
+ /*
+ * On PPC970, if the guest vcpu has an external interrupt pending,
+ * send ourselves an IPI so as to interrupt the guest once it
+ * enables interrupts. (It must have interrupts disabled,
+ * otherwise we would already have delivered the interrupt.)
+ */
+BEGIN_FTR_SECTION
+ ld r0, VCPU_PENDING_EXC(r4)
+ li r7, (1 << BOOK3S_IRQPRIO_EXTERNAL)
+ oris r7, r7, (1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h
+ and. r0, r0, r7
+ beq 32f
+ mr r31, r4
+ lhz r3, PACAPACAINDEX(r13)
+ bl smp_send_reschedule
+ nop
+ mr r4, r31
+32:
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+
+ /* Jump to partition switch code */
+ bl .kvmppc_hv_entry_trampoline
+ nop
+
+/*
+ * We return here in virtual mode after the guest exits
+ * with something that we can't handle in real mode.
+ * Interrupts are enabled again at this point.
+ */
+
+.global kvmppc_handler_highmem
+kvmppc_handler_highmem:
+
+ /*
+ * Register usage at this point:
+ *
+ * R1 = host R1
+ * R2 = host R2
+ * R12 = exit handler id
+ * R13 = PACA
+ */
+
+ /* Restore non-volatile host registers (r14 - r31) */
+ REST_NVGPRS(r1)
+
+ addi r1, r1, SWITCH_FRAME_SIZE
+ ld r0, PPC_LR_STKOFF(r1)
+ mtlr r0
+ blr
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
new file mode 100644
index 000000000000..fcfe6b055558
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -0,0 +1,370 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/hugetlb.h>
+
+#include <asm/tlbflush.h>
+#include <asm/kvm_ppc.h>
+#include <asm/kvm_book3s.h>
+#include <asm/mmu-hash64.h>
+#include <asm/hvcall.h>
+#include <asm/synch.h>
+#include <asm/ppc-opcode.h>
+
+/* For now use fixed-size 16MB page table */
+#define HPT_ORDER 24
+#define HPT_NPTEG (1ul << (HPT_ORDER - 7)) /* 128B per pteg */
+#define HPT_HASH_MASK (HPT_NPTEG - 1)
+
+#define HPTE_V_HVLOCK 0x40UL
+
+static inline long lock_hpte(unsigned long *hpte, unsigned long bits)
+{
+ unsigned long tmp, old;
+
+ asm volatile(" ldarx %0,0,%2\n"
+ " and. %1,%0,%3\n"
+ " bne 2f\n"
+ " ori %0,%0,%4\n"
+ " stdcx. %0,0,%2\n"
+ " beq+ 2f\n"
+ " li %1,%3\n"
+ "2: isync"
+ : "=&r" (tmp), "=&r" (old)
+ : "r" (hpte), "r" (bits), "i" (HPTE_V_HVLOCK)
+ : "cc", "memory");
+ return old == 0;
+}
+
+long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
+ long pte_index, unsigned long pteh, unsigned long ptel)
+{
+ unsigned long porder;
+ struct kvm *kvm = vcpu->kvm;
+ unsigned long i, lpn, pa;
+ unsigned long *hpte;
+
+ /* only handle 4k, 64k and 16M pages for now */
+ porder = 12;
+ if (pteh & HPTE_V_LARGE) {
+ if (cpu_has_feature(CPU_FTR_ARCH_206) &&
+ (ptel & 0xf000) == 0x1000) {
+ /* 64k page */
+ porder = 16;
+ } else if ((ptel & 0xff000) == 0) {
+ /* 16M page */
+ porder = 24;
+ /* lowest AVA bit must be 0 for 16M pages */
+ if (pteh & 0x80)
+ return H_PARAMETER;
+ } else
+ return H_PARAMETER;
+ }
+ lpn = (ptel & HPTE_R_RPN) >> kvm->arch.ram_porder;
+ if (lpn >= kvm->arch.ram_npages || porder > kvm->arch.ram_porder)
+ return H_PARAMETER;
+ pa = kvm->arch.ram_pginfo[lpn].pfn << PAGE_SHIFT;
+ if (!pa)
+ return H_PARAMETER;
+ /* Check WIMG */
+ if ((ptel & HPTE_R_WIMG) != HPTE_R_M &&
+ (ptel & HPTE_R_WIMG) != (HPTE_R_W | HPTE_R_I | HPTE_R_M))
+ return H_PARAMETER;
+ pteh &= ~0x60UL;
+ ptel &= ~(HPTE_R_PP0 - kvm->arch.ram_psize);
+ ptel |= pa;
+ if (pte_index >= (HPT_NPTEG << 3))
+ return H_PARAMETER;
+ if (likely((flags & H_EXACT) == 0)) {
+ pte_index &= ~7UL;
+ hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
+ for (i = 0; ; ++i) {
+ if (i == 8)
+ return H_PTEG_FULL;
+ if ((*hpte & HPTE_V_VALID) == 0 &&
+ lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID))
+ break;
+ hpte += 2;
+ }
+ } else {
+ i = 0;
+ hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
+ if (!lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID))
+ return H_PTEG_FULL;
+ }
+ hpte[1] = ptel;
+ eieio();
+ hpte[0] = pteh;
+ asm volatile("ptesync" : : : "memory");
+ atomic_inc(&kvm->arch.ram_pginfo[lpn].refcnt);
+ vcpu->arch.gpr[4] = pte_index + i;
+ return H_SUCCESS;
+}
+
+static unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
+ unsigned long pte_index)
+{
+ unsigned long rb, va_low;
+
+ rb = (v & ~0x7fUL) << 16; /* AVA field */
+ va_low = pte_index >> 3;
+ if (v & HPTE_V_SECONDARY)
+ va_low = ~va_low;
+ /* xor vsid from AVA */
+ if (!(v & HPTE_V_1TB_SEG))
+ va_low ^= v >> 12;
+ else
+ va_low ^= v >> 24;
+ va_low &= 0x7ff;
+ if (v & HPTE_V_LARGE) {
+ rb |= 1; /* L field */
+ if (cpu_has_feature(CPU_FTR_ARCH_206) &&
+ (r & 0xff000)) {
+ /* non-16MB large page, must be 64k */
+ /* (masks depend on page size) */
+ rb |= 0x1000; /* page encoding in LP field */
+ rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */
+ rb |= (va_low & 0xfe); /* AVAL field (P7 doesn't seem to care) */
+ }
+ } else {
+ /* 4kB page */
+ rb |= (va_low & 0x7ff) << 12; /* remaining 11b of VA */
+ }
+ rb |= (v >> 54) & 0x300; /* B field */
+ return rb;
+}
+
+#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
+
+static inline int try_lock_tlbie(unsigned int *lock)
+{
+ unsigned int tmp, old;
+ unsigned int token = LOCK_TOKEN;
+
+ asm volatile("1:lwarx %1,0,%2\n"
+ " cmpwi cr0,%1,0\n"
+ " bne 2f\n"
+ " stwcx. %3,0,%2\n"
+ " bne- 1b\n"
+ " isync\n"
+ "2:"
+ : "=&r" (tmp), "=&r" (old)
+ : "r" (lock), "r" (token)
+ : "cc", "memory");
+ return old == 0;
+}
+
+long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
+ unsigned long pte_index, unsigned long avpn,
+ unsigned long va)
+{
+ struct kvm *kvm = vcpu->kvm;
+ unsigned long *hpte;
+ unsigned long v, r, rb;
+
+ if (pte_index >= (HPT_NPTEG << 3))
+ return H_PARAMETER;
+ hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
+ while (!lock_hpte(hpte, HPTE_V_HVLOCK))
+ cpu_relax();
+ if ((hpte[0] & HPTE_V_VALID) == 0 ||
+ ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn) ||
+ ((flags & H_ANDCOND) && (hpte[0] & avpn) != 0)) {
+ hpte[0] &= ~HPTE_V_HVLOCK;
+ return H_NOT_FOUND;
+ }
+ if (atomic_read(&kvm->online_vcpus) == 1)
+ flags |= H_LOCAL;
+ vcpu->arch.gpr[4] = v = hpte[0] & ~HPTE_V_HVLOCK;
+ vcpu->arch.gpr[5] = r = hpte[1];
+ rb = compute_tlbie_rb(v, r, pte_index);
+ hpte[0] = 0;
+ if (!(flags & H_LOCAL)) {
+ while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
+ cpu_relax();
+ asm volatile("ptesync" : : : "memory");
+ asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
+ : : "r" (rb), "r" (kvm->arch.lpid));
+ asm volatile("ptesync" : : : "memory");
+ kvm->arch.tlbie_lock = 0;
+ } else {
+ asm volatile("ptesync" : : : "memory");
+ asm volatile("tlbiel %0" : : "r" (rb));
+ asm volatile("ptesync" : : : "memory");
+ }
+ return H_SUCCESS;
+}
+
+long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = vcpu->kvm;
+ unsigned long *args = &vcpu->arch.gpr[4];
+ unsigned long *hp, tlbrb[4];
+ long int i, found;
+ long int n_inval = 0;
+ unsigned long flags, req, pte_index;
+ long int local = 0;
+ long int ret = H_SUCCESS;
+
+ if (atomic_read(&kvm->online_vcpus) == 1)
+ local = 1;
+ for (i = 0; i < 4; ++i) {
+ pte_index = args[i * 2];
+ flags = pte_index >> 56;
+ pte_index &= ((1ul << 56) - 1);
+ req = flags >> 6;
+ flags &= 3;
+ if (req == 3)
+ break;
+ if (req != 1 || flags == 3 ||
+ pte_index >= (HPT_NPTEG << 3)) {
+ /* parameter error */
+ args[i * 2] = ((0xa0 | flags) << 56) + pte_index;
+ ret = H_PARAMETER;
+ break;
+ }
+ hp = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
+ while (!lock_hpte(hp, HPTE_V_HVLOCK))
+ cpu_relax();
+ found = 0;
+ if (hp[0] & HPTE_V_VALID) {
+ switch (flags & 3) {
+ case 0: /* absolute */
+ found = 1;
+ break;
+ case 1: /* andcond */
+ if (!(hp[0] & args[i * 2 + 1]))
+ found = 1;
+ break;
+ case 2: /* AVPN */
+ if ((hp[0] & ~0x7fUL) == args[i * 2 + 1])
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ hp[0] &= ~HPTE_V_HVLOCK;
+ args[i * 2] = ((0x90 | flags) << 56) + pte_index;
+ continue;
+ }
+ /* insert R and C bits from PTE */
+ flags |= (hp[1] >> 5) & 0x0c;
+ args[i * 2] = ((0x80 | flags) << 56) + pte_index;
+ tlbrb[n_inval++] = compute_tlbie_rb(hp[0], hp[1], pte_index);
+ hp[0] = 0;
+ }
+ if (n_inval == 0)
+ return ret;
+
+ if (!local) {
+ while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
+ cpu_relax();
+ asm volatile("ptesync" : : : "memory");
+ for (i = 0; i < n_inval; ++i)
+ asm volatile(PPC_TLBIE(%1,%0)
+ : : "r" (tlbrb[i]), "r" (kvm->arch.lpid));
+ asm volatile("eieio; tlbsync; ptesync" : : : "memory");
+ kvm->arch.tlbie_lock = 0;
+ } else {
+ asm volatile("ptesync" : : : "memory");
+ for (i = 0; i < n_inval; ++i)
+ asm volatile("tlbiel %0" : : "r" (tlbrb[i]));
+ asm volatile("ptesync" : : : "memory");
+ }
+ return ret;
+}
+
+long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
+ unsigned long pte_index, unsigned long avpn,
+ unsigned long va)
+{
+ struct kvm *kvm = vcpu->kvm;
+ unsigned long *hpte;
+ unsigned long v, r, rb;
+
+ if (pte_index >= (HPT_NPTEG << 3))
+ return H_PARAMETER;
+ hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
+ while (!lock_hpte(hpte, HPTE_V_HVLOCK))
+ cpu_relax();
+ if ((hpte[0] & HPTE_V_VALID) == 0 ||
+ ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn)) {
+ hpte[0] &= ~HPTE_V_HVLOCK;
+ return H_NOT_FOUND;
+ }
+ if (atomic_read(&kvm->online_vcpus) == 1)
+ flags |= H_LOCAL;
+ v = hpte[0];
+ r = hpte[1] & ~(HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
+ HPTE_R_KEY_HI | HPTE_R_KEY_LO);
+ r |= (flags << 55) & HPTE_R_PP0;
+ r |= (flags << 48) & HPTE_R_KEY_HI;
+ r |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
+ rb = compute_tlbie_rb(v, r, pte_index);
+ hpte[0] = v & ~HPTE_V_VALID;
+ if (!(flags & H_LOCAL)) {
+ while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
+ cpu_relax();
+ asm volatile("ptesync" : : : "memory");
+ asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
+ : : "r" (rb), "r" (kvm->arch.lpid));
+ asm volatile("ptesync" : : : "memory");
+ kvm->arch.tlbie_lock = 0;
+ } else {
+ asm volatile("ptesync" : : : "memory");
+ asm volatile("tlbiel %0" : : "r" (rb));
+ asm volatile("ptesync" : : : "memory");
+ }
+ hpte[1] = r;
+ eieio();
+ hpte[0] = v & ~HPTE_V_HVLOCK;
+ asm volatile("ptesync" : : : "memory");
+ return H_SUCCESS;
+}
+
+static unsigned long reverse_xlate(struct kvm *kvm, unsigned long realaddr)
+{
+ long int i;
+ unsigned long offset, rpn;
+
+ offset = realaddr & (kvm->arch.ram_psize - 1);
+ rpn = (realaddr - offset) >> PAGE_SHIFT;
+ for (i = 0; i < kvm->arch.ram_npages; ++i)
+ if (rpn == kvm->arch.ram_pginfo[i].pfn)
+ return (i << PAGE_SHIFT) + offset;
+ return HPTE_R_RPN; /* all 1s in the RPN field */
+}
+
+long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
+ unsigned long pte_index)
+{
+ struct kvm *kvm = vcpu->kvm;
+ unsigned long *hpte, r;
+ int i, n = 1;
+
+ if (pte_index >= (HPT_NPTEG << 3))
+ return H_PARAMETER;
+ if (flags & H_READ_4) {
+ pte_index &= ~3;
+ n = 4;
+ }
+ for (i = 0; i < n; ++i, ++pte_index) {
+ hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
+ r = hpte[1];
+ if ((flags & H_R_XLATE) && (hpte[0] & HPTE_V_VALID))
+ r = reverse_xlate(kvm, r & HPTE_R_RPN) |
+ (r & ~HPTE_R_RPN);
+ vcpu->arch.gpr[4 + i * 2] = hpte[0];
+ vcpu->arch.gpr[5 + i * 2] = r;
+ }
+ return H_SUCCESS;
+}
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
new file mode 100644
index 000000000000..6dd33581a228
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -0,0 +1,1345 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
+ *
+ * Derived from book3s_rmhandlers.S and other files, which are:
+ *
+ * Copyright SUSE Linux Products GmbH 2009
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+#include <asm/ppc_asm.h>
+#include <asm/kvm_asm.h>
+#include <asm/reg.h>
+#include <asm/page.h>
+#include <asm/asm-offsets.h>
+#include <asm/exception-64s.h>
+
+/*****************************************************************************
+ * *
+ * Real Mode handlers that need to be in the linear mapping *
+ * *
+ ****************************************************************************/
+
+ .globl kvmppc_skip_interrupt
+kvmppc_skip_interrupt:
+ mfspr r13,SPRN_SRR0
+ addi r13,r13,4
+ mtspr SPRN_SRR0,r13
+ GET_SCRATCH0(r13)
+ rfid
+ b .
+
+ .globl kvmppc_skip_Hinterrupt
+kvmppc_skip_Hinterrupt:
+ mfspr r13,SPRN_HSRR0
+ addi r13,r13,4
+ mtspr SPRN_HSRR0,r13
+ GET_SCRATCH0(r13)
+ hrfid
+ b .
+
+/*
+ * Call kvmppc_handler_trampoline_enter in real mode.
+ * Must be called with interrupts hard-disabled.
+ *
+ * Input Registers:
+ *
+ * LR = return address to continue at after eventually re-enabling MMU
+ */
+_GLOBAL(kvmppc_hv_entry_trampoline)
+ mfmsr r10
+ LOAD_REG_ADDR(r5, kvmppc_hv_entry)
+ li r0,MSR_RI
+ andc r0,r10,r0
+ li r6,MSR_IR | MSR_DR
+ andc r6,r10,r6
+ mtmsrd r0,1 /* clear RI in MSR */
+ mtsrr0 r5
+ mtsrr1 r6
+ RFI
+
+#define ULONG_SIZE 8
+#define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE))
+
+/******************************************************************************
+ * *
+ * Entry code *
+ * *
+ *****************************************************************************/
+
+#define XICS_XIRR 4
+#define XICS_QIRR 0xc
+
+/*
+ * We come in here when wakened from nap mode on a secondary hw thread.
+ * Relocation is off and most register values are lost.
+ * r13 points to the PACA.
+ */
+ .globl kvm_start_guest
+kvm_start_guest:
+ ld r1,PACAEMERGSP(r13)
+ subi r1,r1,STACK_FRAME_OVERHEAD
+
+ /* get vcpu pointer */
+ ld r4, HSTATE_KVM_VCPU(r13)
+
+ /* We got here with an IPI; clear it */
+ ld r5, HSTATE_XICS_PHYS(r13)
+ li r0, 0xff
+ li r6, XICS_QIRR
+ li r7, XICS_XIRR
+ lwzcix r8, r5, r7 /* ack the interrupt */
+ sync
+ stbcix r0, r5, r6 /* clear it */
+ stwcix r8, r5, r7 /* EOI it */
+
+.global kvmppc_hv_entry
+kvmppc_hv_entry:
+
+ /* Required state:
+ *
+ * R4 = vcpu pointer
+ * MSR = ~IR|DR
+ * R13 = PACA
+ * R1 = host R1
+ * all other volatile GPRS = free
+ */
+ mflr r0
+ std r0, HSTATE_VMHANDLER(r13)
+
+ ld r14, VCPU_GPR(r14)(r4)
+ ld r15, VCPU_GPR(r15)(r4)
+ ld r16, VCPU_GPR(r16)(r4)
+ ld r17, VCPU_GPR(r17)(r4)
+ ld r18, VCPU_GPR(r18)(r4)
+ ld r19, VCPU_GPR(r19)(r4)
+ ld r20, VCPU_GPR(r20)(r4)
+ ld r21, VCPU_GPR(r21)(r4)
+ ld r22, VCPU_GPR(r22)(r4)
+ ld r23, VCPU_GPR(r23)(r4)
+ ld r24, VCPU_GPR(r24)(r4)
+ ld r25, VCPU_GPR(r25)(r4)
+ ld r26, VCPU_GPR(r26)(r4)
+ ld r27, VCPU_GPR(r27)(r4)
+ ld r28, VCPU_GPR(r28)(r4)
+ ld r29, VCPU_GPR(r29)(r4)
+ ld r30, VCPU_GPR(r30)(r4)
+ ld r31, VCPU_GPR(r31)(r4)
+
+ /* Load guest PMU registers */
+ /* R4 is live here (vcpu pointer) */
+ li r3, 1
+ sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
+ mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
+ isync
+ lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
+ lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
+ lwz r6, VCPU_PMC + 8(r4)
+ lwz r7, VCPU_PMC + 12(r4)
+ lwz r8, VCPU_PMC + 16(r4)
+ lwz r9, VCPU_PMC + 20(r4)
+BEGIN_FTR_SECTION
+ lwz r10, VCPU_PMC + 24(r4)
+ lwz r11, VCPU_PMC + 28(r4)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+ mtspr SPRN_PMC1, r3
+ mtspr SPRN_PMC2, r5
+ mtspr SPRN_PMC3, r6
+ mtspr SPRN_PMC4, r7
+ mtspr SPRN_PMC5, r8
+ mtspr SPRN_PMC6, r9
+BEGIN_FTR_SECTION
+ mtspr SPRN_PMC7, r10
+ mtspr SPRN_PMC8, r11
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+ ld r3, VCPU_MMCR(r4)
+ ld r5, VCPU_MMCR + 8(r4)
+ ld r6, VCPU_MMCR + 16(r4)
+ mtspr SPRN_MMCR1, r5
+ mtspr SPRN_MMCRA, r6
+ mtspr SPRN_MMCR0, r3
+ isync
+
+ /* Load up FP, VMX and VSX registers */
+ bl kvmppc_load_fp
+
+BEGIN_FTR_SECTION
+ /* Switch DSCR to guest value */
+ ld r5, VCPU_DSCR(r4)
+ mtspr SPRN_DSCR, r5
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+
+ /*
+ * Set the decrementer to the guest decrementer.
+ */
+ ld r8,VCPU_DEC_EXPIRES(r4)
+ mftb r7
+ subf r3,r7,r8
+ mtspr SPRN_DEC,r3
+ stw r3,VCPU_DEC(r4)
+
+ ld r5, VCPU_SPRG0(r4)
+ ld r6, VCPU_SPRG1(r4)
+ ld r7, VCPU_SPRG2(r4)
+ ld r8, VCPU_SPRG3(r4)
+ mtspr SPRN_SPRG0, r5
+ mtspr SPRN_SPRG1, r6
+ mtspr SPRN_SPRG2, r7
+ mtspr SPRN_SPRG3, r8
+
+ /* Save R1 in the PACA */
+ std r1, HSTATE_HOST_R1(r13)
+
+ /* Increment yield count if they have a VPA */
+ ld r3, VCPU_VPA(r4)
+ cmpdi r3, 0
+ beq 25f
+ lwz r5, LPPACA_YIELDCOUNT(r3)
+ addi r5, r5, 1
+ stw r5, LPPACA_YIELDCOUNT(r3)
+25:
+ /* Load up DAR and DSISR */
+ ld r5, VCPU_DAR(r4)
+ lwz r6, VCPU_DSISR(r4)
+ mtspr SPRN_DAR, r5
+ mtspr SPRN_DSISR, r6
+
+ /* Set partition DABR */
+ li r5,3
+ ld r6,VCPU_DABR(r4)
+ mtspr SPRN_DABRX,r5
+ mtspr SPRN_DABR,r6
+
+BEGIN_FTR_SECTION
+ /* Restore AMR and UAMOR, set AMOR to all 1s */
+ ld r5,VCPU_AMR(r4)
+ ld r6,VCPU_UAMOR(r4)
+ li r7,-1
+ mtspr SPRN_AMR,r5
+ mtspr SPRN_UAMOR,r6
+ mtspr SPRN_AMOR,r7
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+
+ /* Clear out SLB */
+ li r6,0
+ slbmte r6,r6
+ slbia
+ ptesync
+
+BEGIN_FTR_SECTION
+ b 30f
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+ /*
+ * POWER7 host -> guest partition switch code.
+ * We don't have to lock against concurrent tlbies,
+ * but we do have to coordinate across hardware threads.
+ */
+ /* Increment entry count iff exit count is zero. */
+ ld r5,HSTATE_KVM_VCORE(r13)
+ addi r9,r5,VCORE_ENTRY_EXIT
+21: lwarx r3,0,r9
+ cmpwi r3,0x100 /* any threads starting to exit? */
+ bge secondary_too_late /* if so we're too late to the party */
+ addi r3,r3,1
+ stwcx. r3,0,r9
+ bne 21b
+
+ /* Primary thread switches to guest partition. */
+ ld r9,VCPU_KVM(r4) /* pointer to struct kvm */
+ lwz r6,VCPU_PTID(r4)
+ cmpwi r6,0
+ bne 20f
+ ld r6,KVM_SDR1(r9)
+ lwz r7,KVM_LPID(r9)
+ li r0,LPID_RSVD /* switch to reserved LPID */
+ mtspr SPRN_LPID,r0
+ ptesync
+ mtspr SPRN_SDR1,r6 /* switch to partition page table */
+ mtspr SPRN_LPID,r7
+ isync
+ li r0,1
+ stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
+ b 10f
+
+ /* Secondary threads wait for primary to have done partition switch */
+20: lbz r0,VCORE_IN_GUEST(r5)
+ cmpwi r0,0
+ beq 20b
+
+ /* Set LPCR. Set the MER bit if there is a pending external irq. */
+10: ld r8,KVM_LPCR(r9)
+ ld r0,VCPU_PENDING_EXC(r4)
+ li r7,(1 << BOOK3S_IRQPRIO_EXTERNAL)
+ oris r7,r7,(1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h
+ and. r0,r0,r7
+ beq 11f
+ ori r8,r8,LPCR_MER
+11: mtspr SPRN_LPCR,r8
+ ld r8,KVM_RMOR(r9)
+ mtspr SPRN_RMOR,r8
+ isync
+
+ /* Check if HDEC expires soon */
+ mfspr r3,SPRN_HDEC
+ cmpwi r3,10
+ li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
+ mr r9,r4
+ blt hdec_soon
+
+ /*
+ * Invalidate the TLB if we could possibly have stale TLB
+ * entries for this partition on this core due to the use
+ * of tlbiel.
+ * XXX maybe only need this on primary thread?
+ */
+ ld r9,VCPU_KVM(r4) /* pointer to struct kvm */
+ lwz r5,VCPU_VCPUID(r4)
+ lhz r6,PACAPACAINDEX(r13)
+ rldimi r6,r5,0,62 /* XXX map as if threads 1:1 p:v */
+ lhz r8,VCPU_LAST_CPU(r4)
+ sldi r7,r6,1 /* see if this is the same vcpu */
+ add r7,r7,r9 /* as last ran on this pcpu */
+ lhz r0,KVM_LAST_VCPU(r7)
+ cmpw r6,r8 /* on the same cpu core as last time? */
+ bne 3f
+ cmpw r0,r5 /* same vcpu as this core last ran? */
+ beq 1f
+3: sth r6,VCPU_LAST_CPU(r4) /* if not, invalidate partition TLB */
+ sth r5,KVM_LAST_VCPU(r7)
+ li r6,128
+ mtctr r6
+ li r7,0x800 /* IS field = 0b10 */
+ ptesync
+2: tlbiel r7
+ addi r7,r7,0x1000
+ bdnz 2b
+ ptesync
+1:
+
+ /* Save purr/spurr */
+ mfspr r5,SPRN_PURR
+ mfspr r6,SPRN_SPURR
+ std r5,HSTATE_PURR(r13)
+ std r6,HSTATE_SPURR(r13)
+ ld r7,VCPU_PURR(r4)
+ ld r8,VCPU_SPURR(r4)
+ mtspr SPRN_PURR,r7
+ mtspr SPRN_SPURR,r8
+ b 31f
+
+ /*
+ * PPC970 host -> guest partition switch code.
+ * We have to lock against concurrent tlbies,
+ * using native_tlbie_lock to lock against host tlbies
+ * and kvm->arch.tlbie_lock to lock against guest tlbies.
+ * We also have to invalidate the TLB since its
+ * entries aren't tagged with the LPID.
+ */
+30: ld r9,VCPU_KVM(r4) /* pointer to struct kvm */
+
+ /* first take native_tlbie_lock */
+ .section ".toc","aw"
+toc_tlbie_lock:
+ .tc native_tlbie_lock[TC],native_tlbie_lock
+ .previous
+ ld r3,toc_tlbie_lock@toc(2)
+ lwz r8,PACA_LOCK_TOKEN(r13)
+24: lwarx r0,0,r3
+ cmpwi r0,0
+ bne 24b
+ stwcx. r8,0,r3
+ bne 24b
+ isync
+
+ ld r7,KVM_LPCR(r9) /* use kvm->arch.lpcr to store HID4 */
+ li r0,0x18f
+ rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
+ or r0,r7,r0
+ ptesync
+ sync
+ mtspr SPRN_HID4,r0 /* switch to reserved LPID */
+ isync
+ li r0,0
+ stw r0,0(r3) /* drop native_tlbie_lock */
+
+ /* invalidate the whole TLB */
+ li r0,256
+ mtctr r0
+ li r6,0
+25: tlbiel r6
+ addi r6,r6,0x1000
+ bdnz 25b
+ ptesync
+
+ /* Take the guest's tlbie_lock */
+ addi r3,r9,KVM_TLBIE_LOCK
+24: lwarx r0,0,r3
+ cmpwi r0,0
+ bne 24b
+ stwcx. r8,0,r3
+ bne 24b
+ isync
+ ld r6,KVM_SDR1(r9)
+ mtspr SPRN_SDR1,r6 /* switch to partition page table */
+
+ /* Set up HID4 with the guest's LPID etc. */
+ sync
+ mtspr SPRN_HID4,r7
+ isync
+
+ /* drop the guest's tlbie_lock */
+ li r0,0
+ stw r0,0(r3)
+
+ /* Check if HDEC expires soon */
+ mfspr r3,SPRN_HDEC
+ cmpwi r3,10
+ li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
+ mr r9,r4
+ blt hdec_soon
+
+ /* Enable HDEC interrupts */
+ mfspr r0,SPRN_HID0
+ li r3,1
+ rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
+ sync
+ mtspr SPRN_HID0,r0
+ mfspr r0,SPRN_HID0
+ mfspr r0,SPRN_HID0
+ mfspr r0,SPRN_HID0
+ mfspr r0,SPRN_HID0
+ mfspr r0,SPRN_HID0
+ mfspr r0,SPRN_HID0
+
+ /* Load up guest SLB entries */
+31: lwz r5,VCPU_SLB_MAX(r4)
+ cmpwi r5,0
+ beq 9f
+ mtctr r5
+ addi r6,r4,VCPU_SLB
+1: ld r8,VCPU_SLB_E(r6)
+ ld r9,VCPU_SLB_V(r6)
+ slbmte r9,r8
+ addi r6,r6,VCPU_SLB_SIZE
+ bdnz 1b
+9:
+
+ /* Restore state of CTRL run bit; assume 1 on entry */
+ lwz r5,VCPU_CTRL(r4)
+ andi. r5,r5,1
+ bne 4f
+ mfspr r6,SPRN_CTRLF
+ clrrdi r6,r6,1
+ mtspr SPRN_CTRLT,r6
+4:
+ ld r6, VCPU_CTR(r4)
+ lwz r7, VCPU_XER(r4)
+
+ mtctr r6
+ mtxer r7
+
+ /* Move SRR0 and SRR1 into the respective regs */
+ ld r6, VCPU_SRR0(r4)
+ ld r7, VCPU_SRR1(r4)
+ mtspr SPRN_SRR0, r6
+ mtspr SPRN_SRR1, r7
+
+ ld r10, VCPU_PC(r4)
+
+ ld r11, VCPU_MSR(r4) /* r10 = vcpu->arch.msr & ~MSR_HV */
+ rldicl r11, r11, 63 - MSR_HV_LG, 1
+ rotldi r11, r11, 1 + MSR_HV_LG
+ ori r11, r11, MSR_ME
+
+fast_guest_return:
+ mtspr SPRN_HSRR0,r10
+ mtspr SPRN_HSRR1,r11
+
+ /* Activate guest mode, so faults get handled by KVM */
+ li r9, KVM_GUEST_MODE_GUEST
+ stb r9, HSTATE_IN_GUEST(r13)
+
+ /* Enter guest */
+
+ ld r5, VCPU_LR(r4)
+ lwz r6, VCPU_CR(r4)
+ mtlr r5
+ mtcr r6
+
+ ld r0, VCPU_GPR(r0)(r4)
+ ld r1, VCPU_GPR(r1)(r4)
+ ld r2, VCPU_GPR(r2)(r4)
+ ld r3, VCPU_GPR(r3)(r4)
+ ld r5, VCPU_GPR(r5)(r4)
+ ld r6, VCPU_GPR(r6)(r4)
+ ld r7, VCPU_GPR(r7)(r4)
+ ld r8, VCPU_GPR(r8)(r4)
+ ld r9, VCPU_GPR(r9)(r4)
+ ld r10, VCPU_GPR(r10)(r4)
+ ld r11, VCPU_GPR(r11)(r4)
+ ld r12, VCPU_GPR(r12)(r4)
+ ld r13, VCPU_GPR(r13)(r4)
+
+ ld r4, VCPU_GPR(r4)(r4)
+
+ hrfid
+ b .
+
+/******************************************************************************
+ * *
+ * Exit code *
+ * *
+ *****************************************************************************/
+
+/*
+ * We come here from the first-level interrupt handlers.
+ */
+ .globl kvmppc_interrupt
+kvmppc_interrupt:
+ /*
+ * Register contents:
+ * R12 = interrupt vector
+ * R13 = PACA
+ * guest CR, R12 saved in shadow VCPU SCRATCH1/0
+ * guest R13 saved in SPRN_SCRATCH0
+ */
+ /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */
+ std r9, HSTATE_HOST_R2(r13)
+ ld r9, HSTATE_KVM_VCPU(r13)
+
+ /* Save registers */
+
+ std r0, VCPU_GPR(r0)(r9)
+ std r1, VCPU_GPR(r1)(r9)
+ std r2, VCPU_GPR(r2)(r9)
+ std r3, VCPU_GPR(r3)(r9)
+ std r4, VCPU_GPR(r4)(r9)
+ std r5, VCPU_GPR(r5)(r9)
+ std r6, VCPU_GPR(r6)(r9)
+ std r7, VCPU_GPR(r7)(r9)
+ std r8, VCPU_GPR(r8)(r9)
+ ld r0, HSTATE_HOST_R2(r13)
+ std r0, VCPU_GPR(r9)(r9)
+ std r10, VCPU_GPR(r10)(r9)
+ std r11, VCPU_GPR(r11)(r9)
+ ld r3, HSTATE_SCRATCH0(r13)
+ lwz r4, HSTATE_SCRATCH1(r13)
+ std r3, VCPU_GPR(r12)(r9)
+ stw r4, VCPU_CR(r9)
+
+ /* Restore R1/R2 so we can handle faults */
+ ld r1, HSTATE_HOST_R1(r13)
+ ld r2, PACATOC(r13)
+
+ mfspr r10, SPRN_SRR0
+ mfspr r11, SPRN_SRR1
+ std r10, VCPU_SRR0(r9)
+ std r11, VCPU_SRR1(r9)
+ andi. r0, r12, 2 /* need to read HSRR0/1? */
+ beq 1f
+ mfspr r10, SPRN_HSRR0
+ mfspr r11, SPRN_HSRR1
+ clrrdi r12, r12, 2
+1: std r10, VCPU_PC(r9)
+ std r11, VCPU_MSR(r9)
+
+ GET_SCRATCH0(r3)
+ mflr r4
+ std r3, VCPU_GPR(r13)(r9)
+ std r4, VCPU_LR(r9)
+
+ /* Unset guest mode */
+ li r0, KVM_GUEST_MODE_NONE
+ stb r0, HSTATE_IN_GUEST(r13)
+
+ stw r12,VCPU_TRAP(r9)
+
+ /* See if this is a leftover HDEC interrupt */
+ cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
+ bne 2f
+ mfspr r3,SPRN_HDEC
+ cmpwi r3,0
+ bge ignore_hdec
+2:
+ /* See if this is something we can handle in real mode */
+ cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
+ beq hcall_try_real_mode
+hcall_real_cont:
+
+ /* Check for mediated interrupts (could be done earlier really ...) */
+BEGIN_FTR_SECTION
+ cmpwi r12,BOOK3S_INTERRUPT_EXTERNAL
+ bne+ 1f
+ ld r5,VCPU_KVM(r9)
+ ld r5,KVM_LPCR(r5)
+ andi. r0,r11,MSR_EE
+ beq 1f
+ andi. r0,r5,LPCR_MER
+ bne bounce_ext_interrupt
+1:
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+
+ /* Save DEC */
+ mfspr r5,SPRN_DEC
+ mftb r6
+ extsw r5,r5
+ add r5,r5,r6
+ std r5,VCPU_DEC_EXPIRES(r9)
+
+ /* Save HEIR (HV emulation assist reg) in last_inst
+ if this is an HEI (HV emulation interrupt, e40) */
+ li r3,-1
+BEGIN_FTR_SECTION
+ cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
+ bne 11f
+ mfspr r3,SPRN_HEIR
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+11: stw r3,VCPU_LAST_INST(r9)
+
+ /* Save more register state */
+ mfxer r5
+ mfdar r6
+ mfdsisr r7
+ mfctr r8
+
+ stw r5, VCPU_XER(r9)
+ std r6, VCPU_DAR(r9)
+ stw r7, VCPU_DSISR(r9)
+ std r8, VCPU_CTR(r9)
+ /* grab HDAR & HDSISR if HV data storage interrupt (HDSI) */
+BEGIN_FTR_SECTION
+ cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
+ beq 6f
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+7: std r6, VCPU_FAULT_DAR(r9)
+ stw r7, VCPU_FAULT_DSISR(r9)
+
+ /* Save guest CTRL register, set runlatch to 1 */
+ mfspr r6,SPRN_CTRLF
+ stw r6,VCPU_CTRL(r9)
+ andi. r0,r6,1
+ bne 4f
+ ori r6,r6,1
+ mtspr SPRN_CTRLT,r6
+4:
+ /* Read the guest SLB and save it away */
+ lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
+ mtctr r0
+ li r6,0
+ addi r7,r9,VCPU_SLB
+ li r5,0
+1: slbmfee r8,r6
+ andis. r0,r8,SLB_ESID_V@h
+ beq 2f
+ add r8,r8,r6 /* put index in */
+ slbmfev r3,r6
+ std r8,VCPU_SLB_E(r7)
+ std r3,VCPU_SLB_V(r7)
+ addi r7,r7,VCPU_SLB_SIZE
+ addi r5,r5,1
+2: addi r6,r6,1
+ bdnz 1b
+ stw r5,VCPU_SLB_MAX(r9)
+
+ /*
+ * Save the guest PURR/SPURR
+ */
+BEGIN_FTR_SECTION
+ mfspr r5,SPRN_PURR
+ mfspr r6,SPRN_SPURR
+ ld r7,VCPU_PURR(r9)
+ ld r8,VCPU_SPURR(r9)
+ std r5,VCPU_PURR(r9)
+ std r6,VCPU_SPURR(r9)
+ subf r5,r7,r5
+ subf r6,r8,r6
+
+ /*
+ * Restore host PURR/SPURR and add guest times
+ * so that the time in the guest gets accounted.
+ */
+ ld r3,HSTATE_PURR(r13)
+ ld r4,HSTATE_SPURR(r13)
+ add r3,r3,r5
+ add r4,r4,r6
+ mtspr SPRN_PURR,r3
+ mtspr SPRN_SPURR,r4
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201)
+
+ /* Clear out SLB */
+ li r5,0
+ slbmte r5,r5
+ slbia
+ ptesync
+
+hdec_soon:
+BEGIN_FTR_SECTION
+ b 32f
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+ /*
+ * POWER7 guest -> host partition switch code.
+ * We don't have to lock against tlbies but we do
+ * have to coordinate the hardware threads.
+ */
+ /* Increment the threads-exiting-guest count in the 0xff00
+ bits of vcore->entry_exit_count */
+ lwsync
+ ld r5,HSTATE_KVM_VCORE(r13)
+ addi r6,r5,VCORE_ENTRY_EXIT
+41: lwarx r3,0,r6
+ addi r0,r3,0x100
+ stwcx. r0,0,r6
+ bne 41b
+
+ /*
+ * At this point we have an interrupt that we have to pass
+ * up to the kernel or qemu; we can't handle it in real mode.
+ * Thus we have to do a partition switch, so we have to
+ * collect the other threads, if we are the first thread
+ * to take an interrupt. To do this, we set the HDEC to 0,
+ * which causes an HDEC interrupt in all threads within 2ns
+ * because the HDEC register is shared between all 4 threads.
+ * However, we don't need to bother if this is an HDEC
+ * interrupt, since the other threads will already be on their
+ * way here in that case.
+ */
+ cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
+ beq 40f
+ cmpwi r3,0x100 /* Are we the first here? */
+ bge 40f
+ cmpwi r3,1
+ ble 40f
+ li r0,0
+ mtspr SPRN_HDEC,r0
+40:
+
+ /* Secondary threads wait for primary to do partition switch */
+ ld r4,VCPU_KVM(r9) /* pointer to struct kvm */
+ ld r5,HSTATE_KVM_VCORE(r13)
+ lwz r3,VCPU_PTID(r9)
+ cmpwi r3,0
+ beq 15f
+ HMT_LOW
+13: lbz r3,VCORE_IN_GUEST(r5)
+ cmpwi r3,0
+ bne 13b
+ HMT_MEDIUM
+ b 16f
+
+ /* Primary thread waits for all the secondaries to exit guest */
+15: lwz r3,VCORE_ENTRY_EXIT(r5)
+ srwi r0,r3,8
+ clrldi r3,r3,56
+ cmpw r3,r0
+ bne 15b
+ isync
+
+ /* Primary thread switches back to host partition */
+ ld r6,KVM_HOST_SDR1(r4)
+ lwz r7,KVM_HOST_LPID(r4)
+ li r8,LPID_RSVD /* switch to reserved LPID */
+ mtspr SPRN_LPID,r8
+ ptesync
+ mtspr SPRN_SDR1,r6 /* switch to partition page table */
+ mtspr SPRN_LPID,r7
+ isync
+ li r0,0
+ stb r0,VCORE_IN_GUEST(r5)
+ lis r8,0x7fff /* MAX_INT@h */
+ mtspr SPRN_HDEC,r8
+
+16: ld r8,KVM_HOST_LPCR(r4)
+ mtspr SPRN_LPCR,r8
+ isync
+ b 33f
+
+ /*
+ * PPC970 guest -> host partition switch code.
+ * We have to lock against concurrent tlbies, and
+ * we have to flush the whole TLB.
+ */
+32: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */
+
+ /* Take the guest's tlbie_lock */
+ lwz r8,PACA_LOCK_TOKEN(r13)
+ addi r3,r4,KVM_TLBIE_LOCK
+24: lwarx r0,0,r3
+ cmpwi r0,0
+ bne 24b
+ stwcx. r8,0,r3
+ bne 24b
+ isync
+
+ ld r7,KVM_HOST_LPCR(r4) /* use kvm->arch.host_lpcr for HID4 */
+ li r0,0x18f
+ rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
+ or r0,r7,r0
+ ptesync
+ sync
+ mtspr SPRN_HID4,r0 /* switch to reserved LPID */
+ isync
+ li r0,0
+ stw r0,0(r3) /* drop guest tlbie_lock */
+
+ /* invalidate the whole TLB */
+ li r0,256
+ mtctr r0
+ li r6,0
+25: tlbiel r6
+ addi r6,r6,0x1000
+ bdnz 25b
+ ptesync
+
+ /* take native_tlbie_lock */
+ ld r3,toc_tlbie_lock@toc(2)
+24: lwarx r0,0,r3
+ cmpwi r0,0
+ bne 24b
+ stwcx. r8,0,r3
+ bne 24b
+ isync
+
+ ld r6,KVM_HOST_SDR1(r4)
+ mtspr SPRN_SDR1,r6 /* switch to host page table */
+
+ /* Set up host HID4 value */
+ sync
+ mtspr SPRN_HID4,r7
+ isync
+ li r0,0
+ stw r0,0(r3) /* drop native_tlbie_lock */
+
+ lis r8,0x7fff /* MAX_INT@h */
+ mtspr SPRN_HDEC,r8
+
+ /* Disable HDEC interrupts */
+ mfspr r0,SPRN_HID0
+ li r3,0
+ rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
+ sync
+ mtspr SPRN_HID0,r0
+ mfspr r0,SPRN_HID0
+ mfspr r0,SPRN_HID0
+ mfspr r0,SPRN_HID0
+ mfspr r0,SPRN_HID0
+ mfspr r0,SPRN_HID0
+ mfspr r0,SPRN_HID0
+
+ /* load host SLB entries */
+33: ld r8,PACA_SLBSHADOWPTR(r13)
+
+ .rept SLB_NUM_BOLTED
+ ld r5,SLBSHADOW_SAVEAREA(r8)
+ ld r6,SLBSHADOW_SAVEAREA+8(r8)
+ andis. r7,r5,SLB_ESID_V@h
+ beq 1f
+ slbmte r6,r5
+1: addi r8,r8,16
+ .endr
+
+ /* Save and reset AMR and UAMOR before turning on the MMU */
+BEGIN_FTR_SECTION
+ mfspr r5,SPRN_AMR
+ mfspr r6,SPRN_UAMOR
+ std r5,VCPU_AMR(r9)
+ std r6,VCPU_UAMOR(r9)
+ li r6,0
+ mtspr SPRN_AMR,r6
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+
+ /* Restore host DABR and DABRX */
+ ld r5,HSTATE_DABR(r13)
+ li r6,7
+ mtspr SPRN_DABR,r5
+ mtspr SPRN_DABRX,r6
+
+ /* Switch DSCR back to host value */
+BEGIN_FTR_SECTION
+ mfspr r8, SPRN_DSCR
+ ld r7, HSTATE_DSCR(r13)
+ std r8, VCPU_DSCR(r7)
+ mtspr SPRN_DSCR, r7
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+
+ /* Save non-volatile GPRs */
+ std r14, VCPU_GPR(r14)(r9)
+ std r15, VCPU_GPR(r15)(r9)
+ std r16, VCPU_GPR(r16)(r9)
+ std r17, VCPU_GPR(r17)(r9)
+ std r18, VCPU_GPR(r18)(r9)
+ std r19, VCPU_GPR(r19)(r9)
+ std r20, VCPU_GPR(r20)(r9)
+ std r21, VCPU_GPR(r21)(r9)
+ std r22, VCPU_GPR(r22)(r9)
+ std r23, VCPU_GPR(r23)(r9)
+ std r24, VCPU_GPR(r24)(r9)
+ std r25, VCPU_GPR(r25)(r9)
+ std r26, VCPU_GPR(r26)(r9)
+ std r27, VCPU_GPR(r27)(r9)
+ std r28, VCPU_GPR(r28)(r9)
+ std r29, VCPU_GPR(r29)(r9)
+ std r30, VCPU_GPR(r30)(r9)
+ std r31, VCPU_GPR(r31)(r9)
+
+ /* Save SPRGs */
+ mfspr r3, SPRN_SPRG0
+ mfspr r4, SPRN_SPRG1
+ mfspr r5, SPRN_SPRG2
+ mfspr r6, SPRN_SPRG3
+ std r3, VCPU_SPRG0(r9)
+ std r4, VCPU_SPRG1(r9)
+ std r5, VCPU_SPRG2(r9)
+ std r6, VCPU_SPRG3(r9)
+
+ /* Increment yield count if they have a VPA */
+ ld r8, VCPU_VPA(r9) /* do they have a VPA? */
+ cmpdi r8, 0
+ beq 25f
+ lwz r3, LPPACA_YIELDCOUNT(r8)
+ addi r3, r3, 1
+ stw r3, LPPACA_YIELDCOUNT(r8)
+25:
+ /* Save PMU registers if requested */
+ /* r8 and cr0.eq are live here */
+ li r3, 1
+ sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
+ mfspr r4, SPRN_MMCR0 /* save MMCR0 */
+ mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
+ isync
+ beq 21f /* if no VPA, save PMU stuff anyway */
+ lbz r7, LPPACA_PMCINUSE(r8)
+ cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
+ bne 21f
+ std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
+ b 22f
+21: mfspr r5, SPRN_MMCR1
+ mfspr r6, SPRN_MMCRA
+ std r4, VCPU_MMCR(r9)
+ std r5, VCPU_MMCR + 8(r9)
+ std r6, VCPU_MMCR + 16(r9)
+ mfspr r3, SPRN_PMC1
+ mfspr r4, SPRN_PMC2
+ mfspr r5, SPRN_PMC3
+ mfspr r6, SPRN_PMC4
+ mfspr r7, SPRN_PMC5
+ mfspr r8, SPRN_PMC6
+BEGIN_FTR_SECTION
+ mfspr r10, SPRN_PMC7
+ mfspr r11, SPRN_PMC8
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+ stw r3, VCPU_PMC(r9)
+ stw r4, VCPU_PMC + 4(r9)
+ stw r5, VCPU_PMC + 8(r9)
+ stw r6, VCPU_PMC + 12(r9)
+ stw r7, VCPU_PMC + 16(r9)
+ stw r8, VCPU_PMC + 20(r9)
+BEGIN_FTR_SECTION
+ stw r10, VCPU_PMC + 24(r9)
+ stw r11, VCPU_PMC + 28(r9)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+22:
+ /* save FP state */
+ mr r3, r9
+ bl .kvmppc_save_fp
+
+ /* Secondary threads go off to take a nap on POWER7 */
+BEGIN_FTR_SECTION
+ lwz r0,VCPU_PTID(r3)
+ cmpwi r0,0
+ bne secondary_nap
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+
+ /*
+ * Reload DEC. HDEC interrupts were disabled when
+ * we reloaded the host's LPCR value.
+ */
+ ld r3, HSTATE_DECEXP(r13)
+ mftb r4
+ subf r4, r4, r3
+ mtspr SPRN_DEC, r4
+
+ /* Reload the host's PMU registers */
+ ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
+ lbz r4, LPPACA_PMCINUSE(r3)
+ cmpwi r4, 0
+ beq 23f /* skip if not */
+ lwz r3, HSTATE_PMC(r13)
+ lwz r4, HSTATE_PMC + 4(r13)
+ lwz r5, HSTATE_PMC + 8(r13)
+ lwz r6, HSTATE_PMC + 12(r13)
+ lwz r8, HSTATE_PMC + 16(r13)
+ lwz r9, HSTATE_PMC + 20(r13)
+BEGIN_FTR_SECTION
+ lwz r10, HSTATE_PMC + 24(r13)
+ lwz r11, HSTATE_PMC + 28(r13)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+ mtspr SPRN_PMC1, r3
+ mtspr SPRN_PMC2, r4
+ mtspr SPRN_PMC3, r5
+ mtspr SPRN_PMC4, r6
+ mtspr SPRN_PMC5, r8
+ mtspr SPRN_PMC6, r9
+BEGIN_FTR_SECTION
+ mtspr SPRN_PMC7, r10
+ mtspr SPRN_PMC8, r11
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+ ld r3, HSTATE_MMCR(r13)
+ ld r4, HSTATE_MMCR + 8(r13)
+ ld r5, HSTATE_MMCR + 16(r13)
+ mtspr SPRN_MMCR1, r4
+ mtspr SPRN_MMCRA, r5
+ mtspr SPRN_MMCR0, r3
+ isync
+23:
+ /*
+ * For external and machine check interrupts, we need
+ * to call the Linux handler to process the interrupt.
+ * We do that by jumping to the interrupt vector address
+ * which we have in r12. The [h]rfid at the end of the
+ * handler will return to the book3s_hv_interrupts.S code.
+ * For other interrupts we do the rfid to get back
+ * to the book3s_interrupts.S code here.
+ */
+ ld r8, HSTATE_VMHANDLER(r13)
+ ld r7, HSTATE_HOST_MSR(r13)
+
+ cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
+ beq 11f
+ cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
+
+ /* RFI into the highmem handler, or branch to interrupt handler */
+12: mfmsr r6
+ mtctr r12
+ li r0, MSR_RI
+ andc r6, r6, r0
+ mtmsrd r6, 1 /* Clear RI in MSR */
+ mtsrr0 r8
+ mtsrr1 r7
+ beqctr
+ RFI
+
+11:
+BEGIN_FTR_SECTION
+ b 12b
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+ mtspr SPRN_HSRR0, r8
+ mtspr SPRN_HSRR1, r7
+ ba 0x500
+
+6: mfspr r6,SPRN_HDAR
+ mfspr r7,SPRN_HDSISR
+ b 7b
+
+/*
+ * Try to handle an hcall in real mode.
+ * Returns to the guest if we handle it, or continues on up to
+ * the kernel if we can't (i.e. if we don't have a handler for
+ * it, or if the handler returns H_TOO_HARD).
+ */
+ .globl hcall_try_real_mode
+hcall_try_real_mode:
+ ld r3,VCPU_GPR(r3)(r9)
+ andi. r0,r11,MSR_PR
+ bne hcall_real_cont
+ clrrdi r3,r3,2
+ cmpldi r3,hcall_real_table_end - hcall_real_table
+ bge hcall_real_cont
+ LOAD_REG_ADDR(r4, hcall_real_table)
+ lwzx r3,r3,r4
+ cmpwi r3,0
+ beq hcall_real_cont
+ add r3,r3,r4
+ mtctr r3
+ mr r3,r9 /* get vcpu pointer */
+ ld r4,VCPU_GPR(r4)(r9)
+ bctrl
+ cmpdi r3,H_TOO_HARD
+ beq hcall_real_fallback
+ ld r4,HSTATE_KVM_VCPU(r13)
+ std r3,VCPU_GPR(r3)(r4)
+ ld r10,VCPU_PC(r4)
+ ld r11,VCPU_MSR(r4)
+ b fast_guest_return
+
+ /* We've attempted a real mode hcall, but it's punted it back
+ * to userspace. We need to restore some clobbered volatiles
+ * before resuming the pass-it-to-qemu path */
+hcall_real_fallback:
+ li r12,BOOK3S_INTERRUPT_SYSCALL
+ ld r9, HSTATE_KVM_VCPU(r13)
+ ld r11, VCPU_MSR(r9)
+
+ b hcall_real_cont
+
+ .globl hcall_real_table
+hcall_real_table:
+ .long 0 /* 0 - unused */
+ .long .kvmppc_h_remove - hcall_real_table
+ .long .kvmppc_h_enter - hcall_real_table
+ .long .kvmppc_h_read - hcall_real_table
+ .long 0 /* 0x10 - H_CLEAR_MOD */
+ .long 0 /* 0x14 - H_CLEAR_REF */
+ .long .kvmppc_h_protect - hcall_real_table
+ .long 0 /* 0x1c - H_GET_TCE */
+ .long .kvmppc_h_put_tce - hcall_real_table
+ .long 0 /* 0x24 - H_SET_SPRG0 */
+ .long .kvmppc_h_set_dabr - hcall_real_table
+ .long 0 /* 0x2c */
+ .long 0 /* 0x30 */
+ .long 0 /* 0x34 */
+ .long 0 /* 0x38 */
+ .long 0 /* 0x3c */
+ .long 0 /* 0x40 */
+ .long 0 /* 0x44 */
+ .long 0 /* 0x48 */
+ .long 0 /* 0x4c */
+ .long 0 /* 0x50 */
+ .long 0 /* 0x54 */
+ .long 0 /* 0x58 */
+ .long 0 /* 0x5c */
+ .long 0 /* 0x60 */
+ .long 0 /* 0x64 */
+ .long 0 /* 0x68 */
+ .long 0 /* 0x6c */
+ .long 0 /* 0x70 */
+ .long 0 /* 0x74 */
+ .long 0 /* 0x78 */
+ .long 0 /* 0x7c */
+ .long 0 /* 0x80 */
+ .long 0 /* 0x84 */
+ .long 0 /* 0x88 */
+ .long 0 /* 0x8c */
+ .long 0 /* 0x90 */
+ .long 0 /* 0x94 */
+ .long 0 /* 0x98 */
+ .long 0 /* 0x9c */
+ .long 0 /* 0xa0 */
+ .long 0 /* 0xa4 */
+ .long 0 /* 0xa8 */
+ .long 0 /* 0xac */
+ .long 0 /* 0xb0 */
+ .long 0 /* 0xb4 */
+ .long 0 /* 0xb8 */
+ .long 0 /* 0xbc */
+ .long 0 /* 0xc0 */
+ .long 0 /* 0xc4 */
+ .long 0 /* 0xc8 */
+ .long 0 /* 0xcc */
+ .long 0 /* 0xd0 */
+ .long 0 /* 0xd4 */
+ .long 0 /* 0xd8 */
+ .long 0 /* 0xdc */
+ .long 0 /* 0xe0 */
+ .long 0 /* 0xe4 */
+ .long 0 /* 0xe8 */
+ .long 0 /* 0xec */
+ .long 0 /* 0xf0 */
+ .long 0 /* 0xf4 */
+ .long 0 /* 0xf8 */
+ .long 0 /* 0xfc */
+ .long 0 /* 0x100 */
+ .long 0 /* 0x104 */
+ .long 0 /* 0x108 */
+ .long 0 /* 0x10c */
+ .long 0 /* 0x110 */
+ .long 0 /* 0x114 */
+ .long 0 /* 0x118 */
+ .long 0 /* 0x11c */
+ .long 0 /* 0x120 */
+ .long .kvmppc_h_bulk_remove - hcall_real_table
+hcall_real_table_end:
+
+ignore_hdec:
+ mr r4,r9
+ b fast_guest_return
+
+bounce_ext_interrupt:
+ mr r4,r9
+ mtspr SPRN_SRR0,r10
+ mtspr SPRN_SRR1,r11
+ li r10,BOOK3S_INTERRUPT_EXTERNAL
+ LOAD_REG_IMMEDIATE(r11,MSR_SF | MSR_ME);
+ b fast_guest_return
+
+_GLOBAL(kvmppc_h_set_dabr)
+ std r4,VCPU_DABR(r3)
+ mtspr SPRN_DABR,r4
+ li r3,0
+ blr
+
+secondary_too_late:
+ ld r5,HSTATE_KVM_VCORE(r13)
+ HMT_LOW
+13: lbz r3,VCORE_IN_GUEST(r5)
+ cmpwi r3,0
+ bne 13b
+ HMT_MEDIUM
+ ld r11,PACA_SLBSHADOWPTR(r13)
+
+ .rept SLB_NUM_BOLTED
+ ld r5,SLBSHADOW_SAVEAREA(r11)
+ ld r6,SLBSHADOW_SAVEAREA+8(r11)
+ andis. r7,r5,SLB_ESID_V@h
+ beq 1f
+ slbmte r6,r5
+1: addi r11,r11,16
+ .endr
+ b 50f
+
+secondary_nap:
+ /* Clear any pending IPI */
+50: ld r5, HSTATE_XICS_PHYS(r13)
+ li r0, 0xff
+ li r6, XICS_QIRR
+ stbcix r0, r5, r6
+
+ /* increment the nap count and then go to nap mode */
+ ld r4, HSTATE_KVM_VCORE(r13)
+ addi r4, r4, VCORE_NAP_COUNT
+ lwsync /* make previous updates visible */
+51: lwarx r3, 0, r4
+ addi r3, r3, 1
+ stwcx. r3, 0, r4
+ bne 51b
+ isync
+
+ mfspr r4, SPRN_LPCR
+ li r0, LPCR_PECE
+ andc r4, r4, r0
+ ori r4, r4, LPCR_PECE0 /* exit nap on interrupt */
+ mtspr SPRN_LPCR, r4
+ li r0, 0
+ std r0, HSTATE_SCRATCH0(r13)
+ ptesync
+ ld r0, HSTATE_SCRATCH0(r13)
+1: cmpd r0, r0
+ bne 1b
+ nap
+ b .
+
+/*
+ * Save away FP, VMX and VSX registers.
+ * r3 = vcpu pointer
+ */
+_GLOBAL(kvmppc_save_fp)
+ mfmsr r9
+ ori r8,r9,MSR_FP
+#ifdef CONFIG_ALTIVEC
+BEGIN_FTR_SECTION
+ oris r8,r8,MSR_VEC@h
+END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+#endif
+#ifdef CONFIG_VSX
+BEGIN_FTR_SECTION
+ oris r8,r8,MSR_VSX@h
+END_FTR_SECTION_IFSET(CPU_FTR_VSX)
+#endif
+ mtmsrd r8
+ isync
+#ifdef CONFIG_VSX
+BEGIN_FTR_SECTION
+ reg = 0
+ .rept 32
+ li r6,reg*16+VCPU_VSRS
+ stxvd2x reg,r6,r3
+ reg = reg + 1
+ .endr
+FTR_SECTION_ELSE
+#endif
+ reg = 0
+ .rept 32
+ stfd reg,reg*8+VCPU_FPRS(r3)
+ reg = reg + 1
+ .endr
+#ifdef CONFIG_VSX
+ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
+#endif
+ mffs fr0
+ stfd fr0,VCPU_FPSCR(r3)
+
+#ifdef CONFIG_ALTIVEC
+BEGIN_FTR_SECTION
+ reg = 0
+ .rept 32
+ li r6,reg*16+VCPU_VRS
+ stvx reg,r6,r3
+ reg = reg + 1
+ .endr
+ mfvscr vr0
+ li r6,VCPU_VSCR
+ stvx vr0,r6,r3
+END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+#endif
+ mfspr r6,SPRN_VRSAVE
+ stw r6,VCPU_VRSAVE(r3)
+ mtmsrd r9
+ isync
+ blr
+
+/*
+ * Load up FP, VMX and VSX registers
+ * r4 = vcpu pointer
+ */
+ .globl kvmppc_load_fp
+kvmppc_load_fp:
+ mfmsr r9
+ ori r8,r9,MSR_FP
+#ifdef CONFIG_ALTIVEC
+BEGIN_FTR_SECTION
+ oris r8,r8,MSR_VEC@h
+END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+#endif
+#ifdef CONFIG_VSX
+BEGIN_FTR_SECTION
+ oris r8,r8,MSR_VSX@h
+END_FTR_SECTION_IFSET(CPU_FTR_VSX)
+#endif
+ mtmsrd r8
+ isync
+ lfd fr0,VCPU_FPSCR(r4)
+ MTFSF_L(fr0)
+#ifdef CONFIG_VSX
+BEGIN_FTR_SECTION
+ reg = 0
+ .rept 32
+ li r7,reg*16+VCPU_VSRS
+ lxvd2x reg,r7,r4
+ reg = reg + 1
+ .endr
+FTR_SECTION_ELSE
+#endif
+ reg = 0
+ .rept 32
+ lfd reg,reg*8+VCPU_FPRS(r4)
+ reg = reg + 1
+ .endr
+#ifdef CONFIG_VSX
+ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
+#endif
+
+#ifdef CONFIG_ALTIVEC
+BEGIN_FTR_SECTION
+ li r7,VCPU_VSCR
+ lvx vr0,r7,r4
+ mtvscr vr0
+ reg = 0
+ .rept 32
+ li r7,reg*16+VCPU_VRS
+ lvx reg,r7,r4
+ reg = reg + 1
+ .endr
+END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+#endif
+ lwz r7,VCPU_VRSAVE(r4)
+ mtspr SPRN_VRSAVE,r7
+ blr
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index 2f0bc928b08a..c54b0e30cf3f 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -29,8 +29,7 @@
#define ULONG_SIZE 8
#define FUNC(name) GLUE(.,name)
-#define GET_SHADOW_VCPU(reg) \
- addi reg, r13, PACA_KVM_SVCPU
+#define GET_SHADOW_VCPU_R13
#define DISABLE_INTERRUPTS \
mfmsr r0; \
@@ -43,8 +42,8 @@
#define ULONG_SIZE 4
#define FUNC(name) name
-#define GET_SHADOW_VCPU(reg) \
- lwz reg, (THREAD + THREAD_KVM_SVCPU)(r2)
+#define GET_SHADOW_VCPU_R13 \
+ lwz r13, (THREAD + THREAD_KVM_SVCPU)(r2)
#define DISABLE_INTERRUPTS \
mfmsr r0; \
@@ -85,7 +84,7 @@
* r3: kvm_run pointer
* r4: vcpu pointer
*/
-_GLOBAL(__kvmppc_vcpu_entry)
+_GLOBAL(__kvmppc_vcpu_run)
kvm_start_entry:
/* Write correct stack frame */
@@ -107,17 +106,11 @@ kvm_start_entry:
/* Load non-volatile guest state from the vcpu */
VCPU_LOAD_NVGPRS(r4)
- GET_SHADOW_VCPU(r5)
-
- /* Save R1/R2 in the PACA */
- PPC_STL r1, SVCPU_HOST_R1(r5)
- PPC_STL r2, SVCPU_HOST_R2(r5)
+kvm_start_lightweight:
- /* XXX swap in/out on load? */
+ GET_SHADOW_VCPU_R13
PPC_LL r3, VCPU_HIGHMEM_HANDLER(r4)
- PPC_STL r3, SVCPU_VMHANDLER(r5)
-
-kvm_start_lightweight:
+ PPC_STL r3, HSTATE_VMHANDLER(r13)
PPC_LL r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */
diff --git a/arch/powerpc/kvm/book3s_mmu_hpte.c b/arch/powerpc/kvm/book3s_mmu_hpte.c
index 79751d8dd131..41cb0017e757 100644
--- a/arch/powerpc/kvm/book3s_mmu_hpte.c
+++ b/arch/powerpc/kvm/book3s_mmu_hpte.c
@@ -21,7 +21,6 @@
#include <linux/kvm_host.h>
#include <linux/hash.h>
#include <linux/slab.h>
-#include "trace.h"
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
@@ -29,6 +28,8 @@
#include <asm/mmu_context.h>
#include <asm/hw_irq.h>
+#include "trace.h"
+
#define PTE_SIZE 12
static struct kmem_cache *hpte_cache;
@@ -58,30 +59,31 @@ static inline u64 kvmppc_mmu_hash_vpte_long(u64 vpage)
void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
{
u64 index;
+ struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
trace_kvm_book3s_mmu_map(pte);
- spin_lock(&vcpu->arch.mmu_lock);
+ spin_lock(&vcpu3s->mmu_lock);
/* Add to ePTE list */
index = kvmppc_mmu_hash_pte(pte->pte.eaddr);
- hlist_add_head_rcu(&pte->list_pte, &vcpu->arch.hpte_hash_pte[index]);
+ hlist_add_head_rcu(&pte->list_pte, &vcpu3s->hpte_hash_pte[index]);
/* Add to ePTE_long list */
index = kvmppc_mmu_hash_pte_long(pte->pte.eaddr);
hlist_add_head_rcu(&pte->list_pte_long,
- &vcpu->arch.hpte_hash_pte_long[index]);
+ &vcpu3s->hpte_hash_pte_long[index]);
/* Add to vPTE list */
index = kvmppc_mmu_hash_vpte(pte->pte.vpage);
- hlist_add_head_rcu(&pte->list_vpte, &vcpu->arch.hpte_hash_vpte[index]);
+ hlist_add_head_rcu(&pte->list_vpte, &vcpu3s->hpte_hash_vpte[index]);
/* Add to vPTE_long list */
index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage);
hlist_add_head_rcu(&pte->list_vpte_long,
- &vcpu->arch.hpte_hash_vpte_long[index]);
+ &vcpu3s->hpte_hash_vpte_long[index]);
- spin_unlock(&vcpu->arch.mmu_lock);
+ spin_unlock(&vcpu3s->mmu_lock);
}
static void free_pte_rcu(struct rcu_head *head)
@@ -92,16 +94,18 @@ static void free_pte_rcu(struct rcu_head *head)
static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
{
+ struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
+
trace_kvm_book3s_mmu_invalidate(pte);
/* Different for 32 and 64 bit */
kvmppc_mmu_invalidate_pte(vcpu, pte);
- spin_lock(&vcpu->arch.mmu_lock);
+ spin_lock(&vcpu3s->mmu_lock);
/* pte already invalidated in between? */
if (hlist_unhashed(&pte->list_pte)) {
- spin_unlock(&vcpu->arch.mmu_lock);
+ spin_unlock(&vcpu3s->mmu_lock);
return;
}
@@ -115,14 +119,15 @@ static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
else
kvm_release_pfn_clean(pte->pfn);
- spin_unlock(&vcpu->arch.mmu_lock);
+ spin_unlock(&vcpu3s->mmu_lock);
- vcpu->arch.hpte_cache_count--;
+ vcpu3s->hpte_cache_count--;
call_rcu(&pte->rcu_head, free_pte_rcu);
}
static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
{
+ struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
struct hpte_cache *pte;
struct hlist_node *node;
int i;
@@ -130,7 +135,7 @@ static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
rcu_read_lock();
for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
- struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i];
+ struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
invalidate_pte(vcpu, pte);
@@ -141,12 +146,13 @@ static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
{
+ struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
struct hlist_head *list;
struct hlist_node *node;
struct hpte_cache *pte;
/* Find the list of entries in the map */
- list = &vcpu->arch.hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)];
+ list = &vcpu3s->hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)];
rcu_read_lock();
@@ -160,12 +166,13 @@ static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea)
{
+ struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
struct hlist_head *list;
struct hlist_node *node;
struct hpte_cache *pte;
/* Find the list of entries in the map */
- list = &vcpu->arch.hpte_hash_pte_long[
+ list = &vcpu3s->hpte_hash_pte_long[
kvmppc_mmu_hash_pte_long(guest_ea)];
rcu_read_lock();
@@ -203,12 +210,13 @@ void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
/* Flush with mask 0xfffffffff */
static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
{
+ struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
struct hlist_head *list;
struct hlist_node *node;
struct hpte_cache *pte;
u64 vp_mask = 0xfffffffffULL;
- list = &vcpu->arch.hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)];
+ list = &vcpu3s->hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)];
rcu_read_lock();
@@ -223,12 +231,13 @@ static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
/* Flush with mask 0xffffff000 */
static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
{
+ struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
struct hlist_head *list;
struct hlist_node *node;
struct hpte_cache *pte;
u64 vp_mask = 0xffffff000ULL;
- list = &vcpu->arch.hpte_hash_vpte_long[
+ list = &vcpu3s->hpte_hash_vpte_long[
kvmppc_mmu_hash_vpte_long(guest_vp)];
rcu_read_lock();
@@ -261,6 +270,7 @@ void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
{
+ struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
struct hlist_node *node;
struct hpte_cache *pte;
int i;
@@ -270,7 +280,7 @@ void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
rcu_read_lock();
for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
- struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i];
+ struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
if ((pte->pte.raddr >= pa_start) &&
@@ -283,12 +293,13 @@ void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
{
+ struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
struct hpte_cache *pte;
pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL);
- vcpu->arch.hpte_cache_count++;
+ vcpu3s->hpte_cache_count++;
- if (vcpu->arch.hpte_cache_count == HPTEG_CACHE_NUM)
+ if (vcpu3s->hpte_cache_count == HPTEG_CACHE_NUM)
kvmppc_mmu_pte_flush_all(vcpu);
return pte;
@@ -309,17 +320,19 @@ static void kvmppc_mmu_hpte_init_hash(struct hlist_head *hash_list, int len)
int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu)
{
+ struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
+
/* init hpte lookup hashes */
- kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_pte,
- ARRAY_SIZE(vcpu->arch.hpte_hash_pte));
- kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_pte_long,
- ARRAY_SIZE(vcpu->arch.hpte_hash_pte_long));
- kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte,
- ARRAY_SIZE(vcpu->arch.hpte_hash_vpte));
- kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte_long,
- ARRAY_SIZE(vcpu->arch.hpte_hash_vpte_long));
-
- spin_lock_init(&vcpu->arch.mmu_lock);
+ kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_pte,
+ ARRAY_SIZE(vcpu3s->hpte_hash_pte));
+ kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_pte_long,
+ ARRAY_SIZE(vcpu3s->hpte_hash_pte_long));
+ kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte,
+ ARRAY_SIZE(vcpu3s->hpte_hash_vpte));
+ kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_long,
+ ARRAY_SIZE(vcpu3s->hpte_hash_vpte_long));
+
+ spin_lock_init(&vcpu3s->mmu_lock);
return 0;
}
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
new file mode 100644
index 000000000000..0c0d3f274437
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -0,0 +1,1029 @@
+/*
+ * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
+ *
+ * Authors:
+ * Alexander Graf <agraf@suse.de>
+ * Kevin Wolf <mail@kevin-wolf.de>
+ * Paul Mackerras <paulus@samba.org>
+ *
+ * Description:
+ * Functions relating to running KVM on Book 3S processors where
+ * we don't have access to hypervisor mode, and we run the guest
+ * in problem state (user mode).
+ *
+ * This file is derived from arch/powerpc/kvm/44x.c,
+ * by Hollis Blanchard <hollisb@us.ibm.com>.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kvm_host.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+
+#include <asm/reg.h>
+#include <asm/cputable.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/kvm_ppc.h>
+#include <asm/kvm_book3s.h>
+#include <asm/mmu_context.h>
+#include <linux/gfp.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+#include <linux/highmem.h>
+
+#include "trace.h"
+
+/* #define EXIT_DEBUG */
+/* #define DEBUG_EXT */
+
+static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
+ ulong msr);
+
+/* Some compatibility defines */
+#ifdef CONFIG_PPC_BOOK3S_32
+#define MSR_USER32 MSR_USER
+#define MSR_USER64 MSR_USER
+#define HW_PAGE_SIZE PAGE_SIZE
+#endif
+
+void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+#ifdef CONFIG_PPC_BOOK3S_64
+ memcpy(to_svcpu(vcpu)->slb, to_book3s(vcpu)->slb_shadow, sizeof(to_svcpu(vcpu)->slb));
+ memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu,
+ sizeof(get_paca()->shadow_vcpu));
+ to_svcpu(vcpu)->slb_max = to_book3s(vcpu)->slb_shadow_max;
+#endif
+
+#ifdef CONFIG_PPC_BOOK3S_32
+ current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu;
+#endif
+}
+
+void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_PPC_BOOK3S_64
+ memcpy(to_book3s(vcpu)->slb_shadow, to_svcpu(vcpu)->slb, sizeof(to_svcpu(vcpu)->slb));
+ memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
+ sizeof(get_paca()->shadow_vcpu));
+ to_book3s(vcpu)->slb_shadow_max = to_svcpu(vcpu)->slb_max;
+#endif
+
+ kvmppc_giveup_ext(vcpu, MSR_FP);
+ kvmppc_giveup_ext(vcpu, MSR_VEC);
+ kvmppc_giveup_ext(vcpu, MSR_VSX);
+}
+
+static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
+{
+ ulong smsr = vcpu->arch.shared->msr;
+
+ /* Guest MSR values */
+ smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_DE;
+ /* Process MSR values */
+ smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
+ /* External providers the guest reserved */
+ smsr |= (vcpu->arch.shared->msr & vcpu->arch.guest_owned_ext);
+ /* 64-bit Process MSR values */
+#ifdef CONFIG_PPC_BOOK3S_64
+ smsr |= MSR_ISF | MSR_HV;
+#endif
+ vcpu->arch.shadow_msr = smsr;
+}
+
+void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
+{
+ ulong old_msr = vcpu->arch.shared->msr;
+
+#ifdef EXIT_DEBUG
+ printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
+#endif
+
+ msr &= to_book3s(vcpu)->msr_mask;
+ vcpu->arch.shared->msr = msr;
+ kvmppc_recalc_shadow_msr(vcpu);
+
+ if (msr & MSR_POW) {
+ if (!vcpu->arch.pending_exceptions) {
+ kvm_vcpu_block(vcpu);
+ vcpu->stat.halt_wakeup++;
+
+ /* Unset POW bit after we woke up */
+ msr &= ~MSR_POW;
+ vcpu->arch.shared->msr = msr;
+ }
+ }
+
+ if ((vcpu->arch.shared->msr & (MSR_PR|MSR_IR|MSR_DR)) !=
+ (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
+ kvmppc_mmu_flush_segments(vcpu);
+ kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
+
+ /* Preload magic page segment when in kernel mode */
+ if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) {
+ struct kvm_vcpu_arch *a = &vcpu->arch;
+
+ if (msr & MSR_DR)
+ kvmppc_mmu_map_segment(vcpu, a->magic_page_ea);
+ else
+ kvmppc_mmu_map_segment(vcpu, a->magic_page_pa);
+ }
+ }
+
+ /* Preload FPU if it's enabled */
+ if (vcpu->arch.shared->msr & MSR_FP)
+ kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
+}
+
+void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
+{
+ u32 host_pvr;
+
+ vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
+ vcpu->arch.pvr = pvr;
+#ifdef CONFIG_PPC_BOOK3S_64
+ if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
+ kvmppc_mmu_book3s_64_init(vcpu);
+ to_book3s(vcpu)->hior = 0xfff00000;
+ to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
+ } else
+#endif
+ {
+ kvmppc_mmu_book3s_32_init(vcpu);
+ to_book3s(vcpu)->hior = 0;
+ to_book3s(vcpu)->msr_mask = 0xffffffffULL;
+ }
+
+ /* If we are in hypervisor level on 970, we can tell the CPU to
+ * treat DCBZ as 32 bytes store */
+ vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
+ if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
+ !strcmp(cur_cpu_spec->platform, "ppc970"))
+ vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
+
+ /* Cell performs badly if MSR_FEx are set. So let's hope nobody
+ really needs them in a VM on Cell and force disable them. */
+ if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
+ to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
+
+#ifdef CONFIG_PPC_BOOK3S_32
+ /* 32 bit Book3S always has 32 byte dcbz */
+ vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
+#endif
+
+ /* On some CPUs we can execute paired single operations natively */
+ asm ( "mfpvr %0" : "=r"(host_pvr));
+ switch (host_pvr) {
+ case 0x00080200: /* lonestar 2.0 */
+ case 0x00088202: /* lonestar 2.2 */
+ case 0x70000100: /* gekko 1.0 */
+ case 0x00080100: /* gekko 2.0 */
+ case 0x00083203: /* gekko 2.3a */
+ case 0x00083213: /* gekko 2.3b */
+ case 0x00083204: /* gekko 2.4 */
+ case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
+ case 0x00087200: /* broadway */
+ vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS;
+ /* Enable HID2.PSE - in case we need it later */
+ mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29));
+ }
+}
+
+/* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
+ * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
+ * emulate 32 bytes dcbz length.
+ *
+ * The Book3s_64 inventors also realized this case and implemented a special bit
+ * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
+ *
+ * My approach here is to patch the dcbz instruction on executing pages.
+ */
+static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
+{
+ struct page *hpage;
+ u64 hpage_offset;
+ u32 *page;
+ int i;
+
+ hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
+ if (is_error_page(hpage)) {
+ kvm_release_page_clean(hpage);
+ return;
+ }
+
+ hpage_offset = pte->raddr & ~PAGE_MASK;
+ hpage_offset &= ~0xFFFULL;
+ hpage_offset /= 4;
+
+ get_page(hpage);
+ page = kmap_atomic(hpage, KM_USER0);
+
+ /* patch dcbz into reserved instruction, so we trap */
+ for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
+ if ((page[i] & 0xff0007ff) == INS_DCBZ)
+ page[i] &= 0xfffffff7;
+
+ kunmap_atomic(page, KM_USER0);
+ put_page(hpage);
+}
+
+static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
+{
+ ulong mp_pa = vcpu->arch.magic_page_pa;
+
+ if (unlikely(mp_pa) &&
+ unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) {
+ return 1;
+ }
+
+ return kvm_is_visible_gfn(vcpu->kvm, gfn);
+}
+
+int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ ulong eaddr, int vec)
+{
+ bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
+ int r = RESUME_GUEST;
+ int relocated;
+ int page_found = 0;
+ struct kvmppc_pte pte;
+ bool is_mmio = false;
+ bool dr = (vcpu->arch.shared->msr & MSR_DR) ? true : false;
+ bool ir = (vcpu->arch.shared->msr & MSR_IR) ? true : false;
+ u64 vsid;
+
+ relocated = data ? dr : ir;
+
+ /* Resolve real address if translation turned on */
+ if (relocated) {
+ page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data);
+ } else {
+ pte.may_execute = true;
+ pte.may_read = true;
+ pte.may_write = true;
+ pte.raddr = eaddr & KVM_PAM;
+ pte.eaddr = eaddr;
+ pte.vpage = eaddr >> 12;
+ }
+
+ switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
+ case 0:
+ pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
+ break;
+ case MSR_DR:
+ case MSR_IR:
+ vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
+
+ if ((vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) == MSR_DR)
+ pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
+ else
+ pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
+ pte.vpage |= vsid;
+
+ if (vsid == -1)
+ page_found = -EINVAL;
+ break;
+ }
+
+ if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
+ (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
+ /*
+ * If we do the dcbz hack, we have to NX on every execution,
+ * so we can patch the executing code. This renders our guest
+ * NX-less.
+ */
+ pte.may_execute = !data;
+ }
+
+ if (page_found == -ENOENT) {
+ /* Page not found in guest PTE entries */
+ vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
+ vcpu->arch.shared->dsisr = to_svcpu(vcpu)->fault_dsisr;
+ vcpu->arch.shared->msr |=
+ (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
+ kvmppc_book3s_queue_irqprio(vcpu, vec);
+ } else if (page_found == -EPERM) {
+ /* Storage protection */
+ vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
+ vcpu->arch.shared->dsisr =
+ to_svcpu(vcpu)->fault_dsisr & ~DSISR_NOHPTE;
+ vcpu->arch.shared->dsisr |= DSISR_PROTFAULT;
+ vcpu->arch.shared->msr |=
+ (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
+ kvmppc_book3s_queue_irqprio(vcpu, vec);
+ } else if (page_found == -EINVAL) {
+ /* Page not found in guest SLB */
+ vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
+ kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
+ } else if (!is_mmio &&
+ kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
+ /* The guest's PTE is not mapped yet. Map on the host */
+ kvmppc_mmu_map_page(vcpu, &pte);
+ if (data)
+ vcpu->stat.sp_storage++;
+ else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
+ (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
+ kvmppc_patch_dcbz(vcpu, &pte);
+ } else {
+ /* MMIO */
+ vcpu->stat.mmio_exits++;
+ vcpu->arch.paddr_accessed = pte.raddr;
+ r = kvmppc_emulate_mmio(run, vcpu);
+ if ( r == RESUME_HOST_NV )
+ r = RESUME_HOST;
+ }
+
+ return r;
+}
+
+static inline int get_fpr_index(int i)
+{
+#ifdef CONFIG_VSX
+ i *= 2;
+#endif
+ return i;
+}
+
+/* Give up external provider (FPU, Altivec, VSX) */
+void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
+{
+ struct thread_struct *t = &current->thread;
+ u64 *vcpu_fpr = vcpu->arch.fpr;
+#ifdef CONFIG_VSX
+ u64 *vcpu_vsx = vcpu->arch.vsr;
+#endif
+ u64 *thread_fpr = (u64*)t->fpr;
+ int i;
+
+ if (!(vcpu->arch.guest_owned_ext & msr))
+ return;
+
+#ifdef DEBUG_EXT
+ printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
+#endif
+
+ switch (msr) {
+ case MSR_FP:
+ giveup_fpu(current);
+ for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
+ vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
+
+ vcpu->arch.fpscr = t->fpscr.val;
+ break;
+ case MSR_VEC:
+#ifdef CONFIG_ALTIVEC
+ giveup_altivec(current);
+ memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr));
+ vcpu->arch.vscr = t->vscr;
+#endif
+ break;
+ case MSR_VSX:
+#ifdef CONFIG_VSX
+ __giveup_vsx(current);
+ for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++)
+ vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1];
+#endif
+ break;
+ default:
+ BUG();
+ }
+
+ vcpu->arch.guest_owned_ext &= ~msr;
+ current->thread.regs->msr &= ~msr;
+ kvmppc_recalc_shadow_msr(vcpu);
+}
+
+static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
+{
+ ulong srr0 = kvmppc_get_pc(vcpu);
+ u32 last_inst = kvmppc_get_last_inst(vcpu);
+ int ret;
+
+ ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
+ if (ret == -ENOENT) {
+ ulong msr = vcpu->arch.shared->msr;
+
+ msr = kvmppc_set_field(msr, 33, 33, 1);
+ msr = kvmppc_set_field(msr, 34, 36, 0);
+ vcpu->arch.shared->msr = kvmppc_set_field(msr, 42, 47, 0);
+ kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
+ return EMULATE_AGAIN;
+ }
+
+ return EMULATE_DONE;
+}
+
+static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr)
+{
+
+ /* Need to do paired single emulation? */
+ if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE))
+ return EMULATE_DONE;
+
+ /* Read out the instruction */
+ if (kvmppc_read_inst(vcpu) == EMULATE_DONE)
+ /* Need to emulate */
+ return EMULATE_FAIL;
+
+ return EMULATE_AGAIN;
+}
+
+/* Handle external providers (FPU, Altivec, VSX) */
+static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
+ ulong msr)
+{
+ struct thread_struct *t = &current->thread;
+ u64 *vcpu_fpr = vcpu->arch.fpr;
+#ifdef CONFIG_VSX
+ u64 *vcpu_vsx = vcpu->arch.vsr;
+#endif
+ u64 *thread_fpr = (u64*)t->fpr;
+ int i;
+
+ /* When we have paired singles, we emulate in software */
+ if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
+ return RESUME_GUEST;
+
+ if (!(vcpu->arch.shared->msr & msr)) {
+ kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
+ return RESUME_GUEST;
+ }
+
+ /* We already own the ext */
+ if (vcpu->arch.guest_owned_ext & msr) {
+ return RESUME_GUEST;
+ }
+
+#ifdef DEBUG_EXT
+ printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
+#endif
+
+ current->thread.regs->msr |= msr;
+
+ switch (msr) {
+ case MSR_FP:
+ for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
+ thread_fpr[get_fpr_index(i)] = vcpu_fpr[i];
+
+ t->fpscr.val = vcpu->arch.fpscr;
+ t->fpexc_mode = 0;
+ kvmppc_load_up_fpu();
+ break;
+ case MSR_VEC:
+#ifdef CONFIG_ALTIVEC
+ memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
+ t->vscr = vcpu->arch.vscr;
+ t->vrsave = -1;
+ kvmppc_load_up_altivec();
+#endif
+ break;
+ case MSR_VSX:
+#ifdef CONFIG_VSX
+ for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++)
+ thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
+ kvmppc_load_up_vsx();
+#endif
+ break;
+ default:
+ BUG();
+ }
+
+ vcpu->arch.guest_owned_ext |= msr;
+
+ kvmppc_recalc_shadow_msr(vcpu);
+
+ return RESUME_GUEST;
+}
+
+int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int exit_nr)
+{
+ int r = RESUME_HOST;
+
+ vcpu->stat.sum_exits++;
+
+ run->exit_reason = KVM_EXIT_UNKNOWN;
+ run->ready_for_interrupt_injection = 1;
+
+ trace_kvm_book3s_exit(exit_nr, vcpu);
+ kvm_resched(vcpu);
+ switch (exit_nr) {
+ case BOOK3S_INTERRUPT_INST_STORAGE:
+ vcpu->stat.pf_instruc++;
+
+#ifdef CONFIG_PPC_BOOK3S_32
+ /* We set segments as unused segments when invalidating them. So
+ * treat the respective fault as segment fault. */
+ if (to_svcpu(vcpu)->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT]
+ == SR_INVALID) {
+ kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
+ r = RESUME_GUEST;
+ break;
+ }
+#endif
+
+ /* only care about PTEG not found errors, but leave NX alone */
+ if (to_svcpu(vcpu)->shadow_srr1 & 0x40000000) {
+ r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
+ vcpu->stat.sp_instruc++;
+ } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
+ (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
+ /*
+ * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
+ * so we can't use the NX bit inside the guest. Let's cross our fingers,
+ * that no guest that needs the dcbz hack does NX.
+ */
+ kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
+ r = RESUME_GUEST;
+ } else {
+ vcpu->arch.shared->msr |=
+ to_svcpu(vcpu)->shadow_srr1 & 0x58000000;
+ kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
+ r = RESUME_GUEST;
+ }
+ break;
+ case BOOK3S_INTERRUPT_DATA_STORAGE:
+ {
+ ulong dar = kvmppc_get_fault_dar(vcpu);
+ vcpu->stat.pf_storage++;
+
+#ifdef CONFIG_PPC_BOOK3S_32
+ /* We set segments as unused segments when invalidating them. So
+ * treat the respective fault as segment fault. */
+ if ((to_svcpu(vcpu)->sr[dar >> SID_SHIFT]) == SR_INVALID) {
+ kvmppc_mmu_map_segment(vcpu, dar);
+ r = RESUME_GUEST;
+ break;
+ }
+#endif
+
+ /* The only case we need to handle is missing shadow PTEs */
+ if (to_svcpu(vcpu)->fault_dsisr & DSISR_NOHPTE) {
+ r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
+ } else {
+ vcpu->arch.shared->dar = dar;
+ vcpu->arch.shared->dsisr = to_svcpu(vcpu)->fault_dsisr;
+ kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
+ r = RESUME_GUEST;
+ }
+ break;
+ }
+ case BOOK3S_INTERRUPT_DATA_SEGMENT:
+ if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
+ vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
+ kvmppc_book3s_queue_irqprio(vcpu,
+ BOOK3S_INTERRUPT_DATA_SEGMENT);
+ }
+ r = RESUME_GUEST;
+ break;
+ case BOOK3S_INTERRUPT_INST_SEGMENT:
+ if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
+ kvmppc_book3s_queue_irqprio(vcpu,
+ BOOK3S_INTERRUPT_INST_SEGMENT);
+ }
+ r = RESUME_GUEST;
+ break;
+ /* We're good on these - the host merely wanted to get our attention */
+ case BOOK3S_INTERRUPT_DECREMENTER:
+ vcpu->stat.dec_exits++;
+ r = RESUME_GUEST;
+ break;
+ case BOOK3S_INTERRUPT_EXTERNAL:
+ vcpu->stat.ext_intr_exits++;
+ r = RESUME_GUEST;
+ break;
+ case BOOK3S_INTERRUPT_PERFMON:
+ r = RESUME_GUEST;
+ break;
+ case BOOK3S_INTERRUPT_PROGRAM:
+ {
+ enum emulation_result er;
+ ulong flags;
+
+program_interrupt:
+ flags = to_svcpu(vcpu)->shadow_srr1 & 0x1f0000ull;
+
+ if (vcpu->arch.shared->msr & MSR_PR) {
+#ifdef EXIT_DEBUG
+ printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
+#endif
+ if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) !=
+ (INS_DCBZ & 0xfffffff7)) {
+ kvmppc_core_queue_program(vcpu, flags);
+ r = RESUME_GUEST;
+ break;
+ }
+ }
+
+ vcpu->stat.emulated_inst_exits++;
+ er = kvmppc_emulate_instruction(run, vcpu);
+ switch (er) {
+ case EMULATE_DONE:
+ r = RESUME_GUEST_NV;
+ break;
+ case EMULATE_AGAIN:
+ r = RESUME_GUEST;
+ break;
+ case EMULATE_FAIL:
+ printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
+ __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
+ kvmppc_core_queue_program(vcpu, flags);
+ r = RESUME_GUEST;
+ break;
+ case EMULATE_DO_MMIO:
+ run->exit_reason = KVM_EXIT_MMIO;
+ r = RESUME_HOST_NV;
+ break;
+ default:
+ BUG();
+ }
+ break;
+ }
+ case BOOK3S_INTERRUPT_SYSCALL:
+ if (vcpu->arch.osi_enabled &&
+ (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
+ (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
+ /* MOL hypercalls */
+ u64 *gprs = run->osi.gprs;
+ int i;
+
+ run->exit_reason = KVM_EXIT_OSI;
+ for (i = 0; i < 32; i++)
+ gprs[i] = kvmppc_get_gpr(vcpu, i);
+ vcpu->arch.osi_needed = 1;
+ r = RESUME_HOST_NV;
+ } else if (!(vcpu->arch.shared->msr & MSR_PR) &&
+ (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
+ /* KVM PV hypercalls */
+ kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
+ r = RESUME_GUEST;
+ } else {
+ /* Guest syscalls */
+ vcpu->stat.syscall_exits++;
+ kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
+ r = RESUME_GUEST;
+ }
+ break;
+ case BOOK3S_INTERRUPT_FP_UNAVAIL:
+ case BOOK3S_INTERRUPT_ALTIVEC:
+ case BOOK3S_INTERRUPT_VSX:
+ {
+ int ext_msr = 0;
+
+ switch (exit_nr) {
+ case BOOK3S_INTERRUPT_FP_UNAVAIL: ext_msr = MSR_FP; break;
+ case BOOK3S_INTERRUPT_ALTIVEC: ext_msr = MSR_VEC; break;
+ case BOOK3S_INTERRUPT_VSX: ext_msr = MSR_VSX; break;
+ }
+
+ switch (kvmppc_check_ext(vcpu, exit_nr)) {
+ case EMULATE_DONE:
+ /* everything ok - let's enable the ext */
+ r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
+ break;
+ case EMULATE_FAIL:
+ /* we need to emulate this instruction */
+ goto program_interrupt;
+ break;
+ default:
+ /* nothing to worry about - go again */
+ break;
+ }
+ break;
+ }
+ case BOOK3S_INTERRUPT_ALIGNMENT:
+ if (kvmppc_read_inst(vcpu) == EMULATE_DONE) {
+ vcpu->arch.shared->dsisr = kvmppc_alignment_dsisr(vcpu,
+ kvmppc_get_last_inst(vcpu));
+ vcpu->arch.shared->dar = kvmppc_alignment_dar(vcpu,
+ kvmppc_get_last_inst(vcpu));
+ kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
+ }
+ r = RESUME_GUEST;
+ break;
+ case BOOK3S_INTERRUPT_MACHINE_CHECK:
+ case BOOK3S_INTERRUPT_TRACE:
+ kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
+ r = RESUME_GUEST;
+ break;
+ default:
+ /* Ugh - bork here! What did we get? */
+ printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
+ exit_nr, kvmppc_get_pc(vcpu), to_svcpu(vcpu)->shadow_srr1);
+ r = RESUME_HOST;
+ BUG();
+ break;
+ }
+
+
+ if (!(r & RESUME_HOST)) {
+ /* To avoid clobbering exit_reason, only check for signals if
+ * we aren't already exiting to userspace for some other
+ * reason. */
+ if (signal_pending(current)) {
+#ifdef EXIT_DEBUG
+ printk(KERN_EMERG "KVM: Going back to host\n");
+#endif
+ vcpu->stat.signal_exits++;
+ run->exit_reason = KVM_EXIT_INTR;
+ r = -EINTR;
+ } else {
+ /* In case an interrupt came in that was triggered
+ * from userspace (like DEC), we need to check what
+ * to inject now! */
+ kvmppc_core_deliver_interrupts(vcpu);
+ }
+ }
+
+ trace_kvm_book3s_reenter(r, vcpu);
+
+ return r;
+}
+
+int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
+{
+ struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
+ int i;
+
+ sregs->pvr = vcpu->arch.pvr;
+
+ sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
+ if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
+ for (i = 0; i < 64; i++) {
+ sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i;
+ sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
+ }
+ } else {
+ for (i = 0; i < 16; i++)
+ sregs->u.s.ppc32.sr[i] = vcpu->arch.shared->sr[i];
+
+ for (i = 0; i < 8; i++) {
+ sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
+ sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
+ }
+ }
+
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
+{
+ struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
+ int i;
+
+ kvmppc_set_pvr(vcpu, sregs->pvr);
+
+ vcpu3s->sdr1 = sregs->u.s.sdr1;
+ if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
+ for (i = 0; i < 64; i++) {
+ vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv,
+ sregs->u.s.ppc64.slb[i].slbe);
+ }
+ } else {
+ for (i = 0; i < 16; i++) {
+ vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
+ }
+ for (i = 0; i < 8; i++) {
+ kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
+ (u32)sregs->u.s.ppc32.ibat[i]);
+ kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
+ (u32)(sregs->u.s.ppc32.ibat[i] >> 32));
+ kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
+ (u32)sregs->u.s.ppc32.dbat[i]);
+ kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
+ (u32)(sregs->u.s.ppc32.dbat[i] >> 32));
+ }
+ }
+
+ /* Flush the MMU after messing with the segments */
+ kvmppc_mmu_pte_flush(vcpu, 0, 0);
+
+ return 0;
+}
+
+int kvmppc_core_check_processor_compat(void)
+{
+ return 0;
+}
+
+struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
+{
+ struct kvmppc_vcpu_book3s *vcpu_book3s;
+ struct kvm_vcpu *vcpu;
+ int err = -ENOMEM;
+ unsigned long p;
+
+ vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
+ if (!vcpu_book3s)
+ goto out;
+
+ vcpu_book3s->shadow_vcpu = (struct kvmppc_book3s_shadow_vcpu *)
+ kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL);
+ if (!vcpu_book3s->shadow_vcpu)
+ goto free_vcpu;
+
+ vcpu = &vcpu_book3s->vcpu;
+ err = kvm_vcpu_init(vcpu, kvm, id);
+ if (err)
+ goto free_shadow_vcpu;
+
+ p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
+ /* the real shared page fills the last 4k of our page */
+ vcpu->arch.shared = (void*)(p + PAGE_SIZE - 4096);
+ if (!p)
+ goto uninit_vcpu;
+
+ vcpu->arch.host_retip = kvm_return_point;
+ vcpu->arch.host_msr = mfmsr();
+#ifdef CONFIG_PPC_BOOK3S_64
+ /* default to book3s_64 (970fx) */
+ vcpu->arch.pvr = 0x3C0301;
+#else
+ /* default to book3s_32 (750) */
+ vcpu->arch.pvr = 0x84202;
+#endif
+ kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
+ vcpu->arch.slb_nr = 64;
+
+ /* remember where some real-mode handlers are */
+ vcpu->arch.trampoline_lowmem = __pa(kvmppc_handler_lowmem_trampoline);
+ vcpu->arch.trampoline_enter = __pa(kvmppc_handler_trampoline_enter);
+ vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem;
+#ifdef CONFIG_PPC_BOOK3S_64
+ vcpu->arch.rmcall = *(ulong*)kvmppc_rmcall;
+#else
+ vcpu->arch.rmcall = (ulong)kvmppc_rmcall;
+#endif
+
+ vcpu->arch.shadow_msr = MSR_USER64;
+
+ err = kvmppc_mmu_init(vcpu);
+ if (err < 0)
+ goto uninit_vcpu;
+
+ return vcpu;
+
+uninit_vcpu:
+ kvm_vcpu_uninit(vcpu);
+free_shadow_vcpu:
+ kfree(vcpu_book3s->shadow_vcpu);
+free_vcpu:
+ vfree(vcpu_book3s);
+out:
+ return ERR_PTR(err);
+}
+
+void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
+
+ free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
+ kvm_vcpu_uninit(vcpu);
+ kfree(vcpu_book3s->shadow_vcpu);
+ vfree(vcpu_book3s);
+}
+
+int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
+{
+ int ret;
+ double fpr[32][TS_FPRWIDTH];
+ unsigned int fpscr;
+ int fpexc_mode;
+#ifdef CONFIG_ALTIVEC
+ vector128 vr[32];
+ vector128 vscr;
+ unsigned long uninitialized_var(vrsave);
+ int used_vr;
+#endif
+#ifdef CONFIG_VSX
+ int used_vsr;
+#endif
+ ulong ext_msr;
+
+ /* No need to go into the guest when all we do is going out */
+ if (signal_pending(current)) {
+ kvm_run->exit_reason = KVM_EXIT_INTR;
+ return -EINTR;
+ }
+
+ /* Save FPU state in stack */
+ if (current->thread.regs->msr & MSR_FP)
+ giveup_fpu(current);
+ memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
+ fpscr = current->thread.fpscr.val;
+ fpexc_mode = current->thread.fpexc_mode;
+
+#ifdef CONFIG_ALTIVEC
+ /* Save Altivec state in stack */
+ used_vr = current->thread.used_vr;
+ if (used_vr) {
+ if (current->thread.regs->msr & MSR_VEC)
+ giveup_altivec(current);
+ memcpy(vr, current->thread.vr, sizeof(current->thread.vr));
+ vscr = current->thread.vscr;
+ vrsave = current->thread.vrsave;
+ }
+#endif
+
+#ifdef CONFIG_VSX
+ /* Save VSX state in stack */
+ used_vsr = current->thread.used_vsr;
+ if (used_vsr && (current->thread.regs->msr & MSR_VSX))
+ __giveup_vsx(current);
+#endif
+
+ /* Remember the MSR with disabled extensions */
+ ext_msr = current->thread.regs->msr;
+
+ /* Preload FPU if it's enabled */
+ if (vcpu->arch.shared->msr & MSR_FP)
+ kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
+
+ kvm_guest_enter();
+
+ ret = __kvmppc_vcpu_run(kvm_run, vcpu);
+
+ kvm_guest_exit();
+
+ local_irq_disable();
+
+ current->thread.regs->msr = ext_msr;
+
+ /* Make sure we save the guest FPU/Altivec/VSX state */
+ kvmppc_giveup_ext(vcpu, MSR_FP);
+ kvmppc_giveup_ext(vcpu, MSR_VEC);
+ kvmppc_giveup_ext(vcpu, MSR_VSX);
+
+ /* Restore FPU state from stack */
+ memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
+ current->thread.fpscr.val = fpscr;
+ current->thread.fpexc_mode = fpexc_mode;
+
+#ifdef CONFIG_ALTIVEC
+ /* Restore Altivec state from stack */
+ if (used_vr && current->thread.used_vr) {
+ memcpy(current->thread.vr, vr, sizeof(current->thread.vr));
+ current->thread.vscr = vscr;
+ current->thread.vrsave = vrsave;
+ }
+ current->thread.used_vr = used_vr;
+#endif
+
+#ifdef CONFIG_VSX
+ current->thread.used_vsr = used_vsr;
+#endif
+
+ return ret;
+}
+
+int kvmppc_core_prepare_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem)
+{
+ return 0;
+}
+
+void kvmppc_core_commit_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem)
+{
+}
+
+int kvmppc_core_init_vm(struct kvm *kvm)
+{
+ return 0;
+}
+
+void kvmppc_core_destroy_vm(struct kvm *kvm)
+{
+}
+
+static int kvmppc_book3s_init(void)
+{
+ int r;
+
+ r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0,
+ THIS_MODULE);
+
+ if (r)
+ return r;
+
+ r = kvmppc_mmu_hpte_sysinit();
+
+ return r;
+}
+
+static void kvmppc_book3s_exit(void)
+{
+ kvmppc_mmu_hpte_sysexit();
+ kvm_exit();
+}
+
+module_init(kvmppc_book3s_init);
+module_exit(kvmppc_book3s_exit);
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index 1a1b34487e71..c1f877c4a884 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -36,41 +36,44 @@
#if defined(CONFIG_PPC_BOOK3S_64)
#define LOAD_SHADOW_VCPU(reg) GET_PACA(reg)
-#define SHADOW_VCPU_OFF PACA_KVM_SVCPU
#define MSR_NOIRQ MSR_KERNEL & ~(MSR_IR | MSR_DR)
#define FUNC(name) GLUE(.,name)
+kvmppc_skip_interrupt:
+ /*
+ * Here all GPRs are unchanged from when the interrupt happened
+ * except for r13, which is saved in SPRG_SCRATCH0.
+ */
+ mfspr r13, SPRN_SRR0
+ addi r13, r13, 4
+ mtspr SPRN_SRR0, r13
+ GET_SCRATCH0(r13)
+ rfid
+ b .
+
+kvmppc_skip_Hinterrupt:
+ /*
+ * Here all GPRs are unchanged from when the interrupt happened
+ * except for r13, which is saved in SPRG_SCRATCH0.
+ */
+ mfspr r13, SPRN_HSRR0
+ addi r13, r13, 4
+ mtspr SPRN_HSRR0, r13
+ GET_SCRATCH0(r13)
+ hrfid
+ b .
+
#elif defined(CONFIG_PPC_BOOK3S_32)
-#define LOAD_SHADOW_VCPU(reg) \
- mfspr reg, SPRN_SPRG_THREAD; \
- lwz reg, THREAD_KVM_SVCPU(reg); \
- /* PPC32 can have a NULL pointer - let's check for that */ \
- mtspr SPRN_SPRG_SCRATCH1, r12; /* Save r12 */ \
- mfcr r12; \
- cmpwi reg, 0; \
- bne 1f; \
- mfspr reg, SPRN_SPRG_SCRATCH0; \
- mtcr r12; \
- mfspr r12, SPRN_SPRG_SCRATCH1; \
- b kvmppc_resume_\intno; \
-1:; \
- mtcr r12; \
- mfspr r12, SPRN_SPRG_SCRATCH1; \
- tophys(reg, reg)
-
-#define SHADOW_VCPU_OFF 0
#define MSR_NOIRQ MSR_KERNEL
#define FUNC(name) name
-#endif
-
.macro INTERRUPT_TRAMPOLINE intno
.global kvmppc_trampoline_\intno
kvmppc_trampoline_\intno:
- SET_SCRATCH0(r13) /* Save r13 */
+ mtspr SPRN_SPRG_SCRATCH0, r13 /* Save r13 */
/*
* First thing to do is to find out if we're coming
@@ -78,19 +81,28 @@ kvmppc_trampoline_\intno:
*
* To distinguish, we check a magic byte in the PACA/current
*/
- LOAD_SHADOW_VCPU(r13)
- PPC_STL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13)
+ mfspr r13, SPRN_SPRG_THREAD
+ lwz r13, THREAD_KVM_SVCPU(r13)
+ /* PPC32 can have a NULL pointer - let's check for that */
+ mtspr SPRN_SPRG_SCRATCH1, r12 /* Save r12 */
mfcr r12
- stw r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13)
- lbz r12, (SHADOW_VCPU_OFF + SVCPU_IN_GUEST)(r13)
+ cmpwi r13, 0
+ bne 1f
+2: mtcr r12
+ mfspr r12, SPRN_SPRG_SCRATCH1
+ mfspr r13, SPRN_SPRG_SCRATCH0 /* r13 = original r13 */
+ b kvmppc_resume_\intno /* Get back original handler */
+
+1: tophys(r13, r13)
+ stw r12, HSTATE_SCRATCH1(r13)
+ mfspr r12, SPRN_SPRG_SCRATCH1
+ stw r12, HSTATE_SCRATCH0(r13)
+ lbz r12, HSTATE_IN_GUEST(r13)
cmpwi r12, KVM_GUEST_MODE_NONE
bne ..kvmppc_handler_hasmagic_\intno
/* No KVM guest? Then jump back to the Linux handler! */
- lwz r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13)
- mtcr r12
- PPC_LL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13)
- GET_SCRATCH0(r13) /* r13 = original r13 */
- b kvmppc_resume_\intno /* Get back original handler */
+ lwz r12, HSTATE_SCRATCH1(r13)
+ b 2b
/* Now we know we're handling a KVM guest */
..kvmppc_handler_hasmagic_\intno:
@@ -112,9 +124,6 @@ INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_MACHINE_CHECK
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_STORAGE
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_STORAGE
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_EXTERNAL
-#ifdef CONFIG_PPC_BOOK3S_64
-INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_EXTERNAL_HV
-#endif
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALIGNMENT
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PROGRAM
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_FP_UNAVAIL
@@ -124,14 +133,6 @@ INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_TRACE
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PERFMON
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALTIVEC
-/* Those are only available on 64 bit machines */
-
-#ifdef CONFIG_PPC_BOOK3S_64
-INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_SEGMENT
-INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_SEGMENT
-INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_VSX
-#endif
-
/*
* Bring us back to the faulting code, but skip the
* faulting instruction.
@@ -143,8 +144,8 @@ INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_VSX
*
* R12 = free
* R13 = Shadow VCPU (PACA)
- * SVCPU.SCRATCH0 = guest R12
- * SVCPU.SCRATCH1 = guest CR
+ * HSTATE.SCRATCH0 = guest R12
+ * HSTATE.SCRATCH1 = guest CR
* SPRG_SCRATCH0 = guest R13
*
*/
@@ -156,13 +157,14 @@ kvmppc_handler_skip_ins:
mtsrr0 r12
/* Clean up all state */
- lwz r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13)
+ lwz r12, HSTATE_SCRATCH1(r13)
mtcr r12
- PPC_LL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13)
+ PPC_LL r12, HSTATE_SCRATCH0(r13)
GET_SCRATCH0(r13)
/* And get back into the code */
RFI
+#endif
/*
* This trampoline brings us back to a real mode handler
@@ -251,12 +253,4 @@ define_load_up(altivec)
define_load_up(vsx)
#endif
-.global kvmppc_trampoline_lowmem
-kvmppc_trampoline_lowmem:
- PPC_LONG kvmppc_handler_lowmem_trampoline - CONFIG_KERNEL_START
-
-.global kvmppc_trampoline_enter
-kvmppc_trampoline_enter:
- PPC_LONG kvmppc_handler_trampoline_enter - CONFIG_KERNEL_START
-
#include "book3s_segment.S"
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
index 451264274b8c..aed32e517212 100644
--- a/arch/powerpc/kvm/book3s_segment.S
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -22,7 +22,7 @@
#if defined(CONFIG_PPC_BOOK3S_64)
#define GET_SHADOW_VCPU(reg) \
- addi reg, r13, PACA_KVM_SVCPU
+ mr reg, r13
#elif defined(CONFIG_PPC_BOOK3S_32)
@@ -71,6 +71,10 @@ kvmppc_handler_trampoline_enter:
/* r3 = shadow vcpu */
GET_SHADOW_VCPU(r3)
+ /* Save R1/R2 in the PACA (64-bit) or shadow_vcpu (32-bit) */
+ PPC_STL r1, HSTATE_HOST_R1(r3)
+ PPC_STL r2, HSTATE_HOST_R2(r3)
+
/* Move SRR0 and SRR1 into the respective regs */
PPC_LL r9, SVCPU_PC(r3)
mtsrr0 r9
@@ -78,36 +82,36 @@ kvmppc_handler_trampoline_enter:
/* Activate guest mode, so faults get handled by KVM */
li r11, KVM_GUEST_MODE_GUEST
- stb r11, SVCPU_IN_GUEST(r3)
+ stb r11, HSTATE_IN_GUEST(r3)
/* Switch to guest segment. This is subarch specific. */
LOAD_GUEST_SEGMENTS
/* Enter guest */
- PPC_LL r4, (SVCPU_CTR)(r3)
- PPC_LL r5, (SVCPU_LR)(r3)
- lwz r6, (SVCPU_CR)(r3)
- lwz r7, (SVCPU_XER)(r3)
+ PPC_LL r4, SVCPU_CTR(r3)
+ PPC_LL r5, SVCPU_LR(r3)
+ lwz r6, SVCPU_CR(r3)
+ lwz r7, SVCPU_XER(r3)
mtctr r4
mtlr r5
mtcr r6
mtxer r7
- PPC_LL r0, (SVCPU_R0)(r3)
- PPC_LL r1, (SVCPU_R1)(r3)
- PPC_LL r2, (SVCPU_R2)(r3)
- PPC_LL r4, (SVCPU_R4)(r3)
- PPC_LL r5, (SVCPU_R5)(r3)
- PPC_LL r6, (SVCPU_R6)(r3)
- PPC_LL r7, (SVCPU_R7)(r3)
- PPC_LL r8, (SVCPU_R8)(r3)
- PPC_LL r9, (SVCPU_R9)(r3)
- PPC_LL r10, (SVCPU_R10)(r3)
- PPC_LL r11, (SVCPU_R11)(r3)
- PPC_LL r12, (SVCPU_R12)(r3)
- PPC_LL r13, (SVCPU_R13)(r3)
+ PPC_LL r0, SVCPU_R0(r3)
+ PPC_LL r1, SVCPU_R1(r3)
+ PPC_LL r2, SVCPU_R2(r3)
+ PPC_LL r4, SVCPU_R4(r3)
+ PPC_LL r5, SVCPU_R5(r3)
+ PPC_LL r6, SVCPU_R6(r3)
+ PPC_LL r7, SVCPU_R7(r3)
+ PPC_LL r8, SVCPU_R8(r3)
+ PPC_LL r9, SVCPU_R9(r3)
+ PPC_LL r10, SVCPU_R10(r3)
+ PPC_LL r11, SVCPU_R11(r3)
+ PPC_LL r12, SVCPU_R12(r3)
+ PPC_LL r13, SVCPU_R13(r3)
PPC_LL r3, (SVCPU_R3)(r3)
@@ -125,56 +129,63 @@ kvmppc_handler_trampoline_enter_end:
.global kvmppc_handler_trampoline_exit
kvmppc_handler_trampoline_exit:
+.global kvmppc_interrupt
+kvmppc_interrupt:
+
/* Register usage at this point:
*
* SPRG_SCRATCH0 = guest R13
* R12 = exit handler id
- * R13 = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64]
- * SVCPU.SCRATCH0 = guest R12
- * SVCPU.SCRATCH1 = guest CR
+ * R13 = shadow vcpu (32-bit) or PACA (64-bit)
+ * HSTATE.SCRATCH0 = guest R12
+ * HSTATE.SCRATCH1 = guest CR
*
*/
/* Save registers */
- PPC_STL r0, (SHADOW_VCPU_OFF + SVCPU_R0)(r13)
- PPC_STL r1, (SHADOW_VCPU_OFF + SVCPU_R1)(r13)
- PPC_STL r2, (SHADOW_VCPU_OFF + SVCPU_R2)(r13)
- PPC_STL r3, (SHADOW_VCPU_OFF + SVCPU_R3)(r13)
- PPC_STL r4, (SHADOW_VCPU_OFF + SVCPU_R4)(r13)
- PPC_STL r5, (SHADOW_VCPU_OFF + SVCPU_R5)(r13)
- PPC_STL r6, (SHADOW_VCPU_OFF + SVCPU_R6)(r13)
- PPC_STL r7, (SHADOW_VCPU_OFF + SVCPU_R7)(r13)
- PPC_STL r8, (SHADOW_VCPU_OFF + SVCPU_R8)(r13)
- PPC_STL r9, (SHADOW_VCPU_OFF + SVCPU_R9)(r13)
- PPC_STL r10, (SHADOW_VCPU_OFF + SVCPU_R10)(r13)
- PPC_STL r11, (SHADOW_VCPU_OFF + SVCPU_R11)(r13)
+ PPC_STL r0, SVCPU_R0(r13)
+ PPC_STL r1, SVCPU_R1(r13)
+ PPC_STL r2, SVCPU_R2(r13)
+ PPC_STL r3, SVCPU_R3(r13)
+ PPC_STL r4, SVCPU_R4(r13)
+ PPC_STL r5, SVCPU_R5(r13)
+ PPC_STL r6, SVCPU_R6(r13)
+ PPC_STL r7, SVCPU_R7(r13)
+ PPC_STL r8, SVCPU_R8(r13)
+ PPC_STL r9, SVCPU_R9(r13)
+ PPC_STL r10, SVCPU_R10(r13)
+ PPC_STL r11, SVCPU_R11(r13)
/* Restore R1/R2 so we can handle faults */
- PPC_LL r1, (SHADOW_VCPU_OFF + SVCPU_HOST_R1)(r13)
- PPC_LL r2, (SHADOW_VCPU_OFF + SVCPU_HOST_R2)(r13)
+ PPC_LL r1, HSTATE_HOST_R1(r13)
+ PPC_LL r2, HSTATE_HOST_R2(r13)
/* Save guest PC and MSR */
+#ifdef CONFIG_PPC64
+BEGIN_FTR_SECTION
andi. r0,r12,0x2
beq 1f
mfspr r3,SPRN_HSRR0
mfspr r4,SPRN_HSRR1
andi. r12,r12,0x3ffd
b 2f
+END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
+#endif
1: mfsrr0 r3
mfsrr1 r4
2:
- PPC_STL r3, (SHADOW_VCPU_OFF + SVCPU_PC)(r13)
- PPC_STL r4, (SHADOW_VCPU_OFF + SVCPU_SHADOW_SRR1)(r13)
+ PPC_STL r3, SVCPU_PC(r13)
+ PPC_STL r4, SVCPU_SHADOW_SRR1(r13)
/* Get scratch'ed off registers */
GET_SCRATCH0(r9)
- PPC_LL r8, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13)
- lwz r7, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13)
+ PPC_LL r8, HSTATE_SCRATCH0(r13)
+ lwz r7, HSTATE_SCRATCH1(r13)
- PPC_STL r9, (SHADOW_VCPU_OFF + SVCPU_R13)(r13)
- PPC_STL r8, (SHADOW_VCPU_OFF + SVCPU_R12)(r13)
- stw r7, (SHADOW_VCPU_OFF + SVCPU_CR)(r13)
+ PPC_STL r9, SVCPU_R13(r13)
+ PPC_STL r8, SVCPU_R12(r13)
+ stw r7, SVCPU_CR(r13)
/* Save more register state */
@@ -184,11 +195,11 @@ kvmppc_handler_trampoline_exit:
mfctr r8
mflr r9
- stw r5, (SHADOW_VCPU_OFF + SVCPU_XER)(r13)
- PPC_STL r6, (SHADOW_VCPU_OFF + SVCPU_FAULT_DAR)(r13)
- stw r7, (SHADOW_VCPU_OFF + SVCPU_FAULT_DSISR)(r13)
- PPC_STL r8, (SHADOW_VCPU_OFF + SVCPU_CTR)(r13)
- PPC_STL r9, (SHADOW_VCPU_OFF + SVCPU_LR)(r13)
+ stw r5, SVCPU_XER(r13)
+ PPC_STL r6, SVCPU_FAULT_DAR(r13)
+ stw r7, SVCPU_FAULT_DSISR(r13)
+ PPC_STL r8, SVCPU_CTR(r13)
+ PPC_STL r9, SVCPU_LR(r13)
/*
* In order for us to easily get the last instruction,
@@ -218,7 +229,7 @@ ld_last_inst:
/* Set guest mode to 'jump over instruction' so if lwz faults
* we'll just continue at the next IP. */
li r9, KVM_GUEST_MODE_SKIP
- stb r9, (SHADOW_VCPU_OFF + SVCPU_IN_GUEST)(r13)
+ stb r9, HSTATE_IN_GUEST(r13)
/* 1) enable paging for data */
mfmsr r9
@@ -232,13 +243,13 @@ ld_last_inst:
sync
#endif
- stw r0, (SHADOW_VCPU_OFF + SVCPU_LAST_INST)(r13)
+ stw r0, SVCPU_LAST_INST(r13)
no_ld_last_inst:
/* Unset guest mode */
li r9, KVM_GUEST_MODE_NONE
- stb r9, (SHADOW_VCPU_OFF + SVCPU_IN_GUEST)(r13)
+ stb r9, HSTATE_IN_GUEST(r13)
/* Switch back to host MMU */
LOAD_HOST_SEGMENTS
@@ -248,7 +259,7 @@ no_ld_last_inst:
* R1 = host R1
* R2 = host R2
* R12 = exit handler id
- * R13 = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64]
+ * R13 = shadow vcpu (32-bit) or PACA (64-bit)
* SVCPU.* = guest *
*
*/
@@ -258,7 +269,7 @@ no_ld_last_inst:
ori r7, r7, MSR_IR|MSR_DR|MSR_RI|MSR_ME /* Enable paging */
mtsrr1 r7
/* Load highmem handler address */
- PPC_LL r8, (SHADOW_VCPU_OFF + SVCPU_VMHANDLER)(r13)
+ PPC_LL r8, HSTATE_VMHANDLER(r13)
mtsrr0 r8
RFI
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 8462b3a1c1c7..ee45fa01220e 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -13,6 +13,7 @@
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Copyright IBM Corp. 2007
+ * Copyright 2010-2011 Freescale Semiconductor, Inc.
*
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
* Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
@@ -78,6 +79,60 @@ void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
}
}
+#ifdef CONFIG_SPE
+void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
+{
+ preempt_disable();
+ enable_kernel_spe();
+ kvmppc_save_guest_spe(vcpu);
+ vcpu->arch.shadow_msr &= ~MSR_SPE;
+ preempt_enable();
+}
+
+static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
+{
+ preempt_disable();
+ enable_kernel_spe();
+ kvmppc_load_guest_spe(vcpu);
+ vcpu->arch.shadow_msr |= MSR_SPE;
+ preempt_enable();
+}
+
+static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->arch.shared->msr & MSR_SPE) {
+ if (!(vcpu->arch.shadow_msr & MSR_SPE))
+ kvmppc_vcpu_enable_spe(vcpu);
+ } else if (vcpu->arch.shadow_msr & MSR_SPE) {
+ kvmppc_vcpu_disable_spe(vcpu);
+ }
+}
+#else
+static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
+{
+}
+#endif
+
+/*
+ * Helper function for "full" MSR writes. No need to call this if only
+ * EE/CE/ME/DE/RI are changing.
+ */
+void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
+{
+ u32 old_msr = vcpu->arch.shared->msr;
+
+ vcpu->arch.shared->msr = new_msr;
+
+ kvmppc_mmu_msr_notify(vcpu, old_msr);
+
+ if (vcpu->arch.shared->msr & MSR_WE) {
+ kvm_vcpu_block(vcpu);
+ kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
+ };
+
+ kvmppc_vcpu_sync_spe(vcpu);
+}
+
static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
unsigned int priority)
{
@@ -257,6 +312,19 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
vcpu->arch.shared->int_pending = 0;
}
+int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
+{
+ int ret;
+
+ local_irq_disable();
+ kvm_guest_enter();
+ ret = __kvmppc_vcpu_run(kvm_run, vcpu);
+ kvm_guest_exit();
+ local_irq_enable();
+
+ return ret;
+}
+
/**
* kvmppc_handle_exit
*
@@ -344,10 +412,16 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
r = RESUME_GUEST;
break;
- case BOOKE_INTERRUPT_SPE_UNAVAIL:
- kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_UNAVAIL);
+#ifdef CONFIG_SPE
+ case BOOKE_INTERRUPT_SPE_UNAVAIL: {
+ if (vcpu->arch.shared->msr & MSR_SPE)
+ kvmppc_vcpu_enable_spe(vcpu);
+ else
+ kvmppc_booke_queue_irqprio(vcpu,
+ BOOKE_IRQPRIO_SPE_UNAVAIL);
r = RESUME_GUEST;
break;
+ }
case BOOKE_INTERRUPT_SPE_FP_DATA:
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
@@ -358,6 +432,28 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
r = RESUME_GUEST;
break;
+#else
+ case BOOKE_INTERRUPT_SPE_UNAVAIL:
+ /*
+ * Guest wants SPE, but host kernel doesn't support it. Send
+ * an "unimplemented operation" program check to the guest.
+ */
+ kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
+ r = RESUME_GUEST;
+ break;
+
+ /*
+ * These really should never happen without CONFIG_SPE,
+ * as we should never enable the real MSR[SPE] in the guest.
+ */
+ case BOOKE_INTERRUPT_SPE_FP_DATA:
+ case BOOKE_INTERRUPT_SPE_FP_ROUND:
+ printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
+ __func__, exit_nr, vcpu->arch.pc);
+ run->hw.hardware_exit_reason = exit_nr;
+ r = RESUME_HOST;
+ break;
+#endif
case BOOKE_INTERRUPT_DATA_STORAGE:
kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
@@ -392,6 +488,17 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
gpa_t gpaddr;
gfn_t gfn;
+#ifdef CONFIG_KVM_E500
+ if (!(vcpu->arch.shared->msr & MSR_PR) &&
+ (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
+ kvmppc_map_magic(vcpu);
+ kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
+ r = RESUME_GUEST;
+
+ break;
+ }
+#endif
+
/* Check the guest TLB. */
gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
if (gtlb_index < 0) {
@@ -514,6 +621,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
vcpu->arch.pc = 0;
vcpu->arch.shared->msr = 0;
+ vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
vcpu->arch.shadow_pid = 1;
@@ -770,6 +878,26 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
return -ENOTSUPP;
}
+int kvmppc_core_prepare_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem)
+{
+ return 0;
+}
+
+void kvmppc_core_commit_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem)
+{
+}
+
+int kvmppc_core_init_vm(struct kvm *kvm)
+{
+ return 0;
+}
+
+void kvmppc_core_destroy_vm(struct kvm *kvm)
+{
+}
+
int __init kvmppc_booke_init(void)
{
unsigned long ivor[16];
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index 492bb7030358..8e1fe33d64e5 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -52,24 +52,19 @@
extern unsigned long kvmppc_booke_handlers;
-/* Helper function for "full" MSR writes. No need to call this if only EE is
- * changing. */
-static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
-{
- if ((new_msr & MSR_PR) != (vcpu->arch.shared->msr & MSR_PR))
- kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR);
-
- vcpu->arch.shared->msr = new_msr;
-
- if (vcpu->arch.shared->msr & MSR_WE) {
- kvm_vcpu_block(vcpu);
- kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
- };
-}
+void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr);
+void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr);
int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance);
int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt);
int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs);
+/* low-level asm code to transfer guest state */
+void kvmppc_load_guest_spe(struct kvm_vcpu *vcpu);
+void kvmppc_save_guest_spe(struct kvm_vcpu *vcpu);
+
+/* high-level function, manages flags, host state */
+void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu);
+
#endif /* __KVM_BOOKE_H__ */
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index b58ccae95904..42f2fb1f66e9 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -13,6 +13,7 @@
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Copyright IBM Corp. 2007
+ * Copyright 2011 Freescale Semiconductor, Inc.
*
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
*/
@@ -24,8 +25,6 @@
#include <asm/page.h>
#include <asm/asm-offsets.h>
-#define KVMPPC_MSR_MASK (MSR_CE|MSR_EE|MSR_PR|MSR_DE|MSR_ME|MSR_IS|MSR_DS)
-
#define VCPU_GPR(n) (VCPU_GPRS + (n * 4))
/* The host stack layout: */
@@ -192,6 +191,12 @@ _GLOBAL(kvmppc_resume_host)
lwz r3, VCPU_HOST_PID(r4)
mtspr SPRN_PID, r3
+#ifdef CONFIG_FSL_BOOKE
+ /* we cheat and know that Linux doesn't use PID1 which is always 0 */
+ lis r3, 0
+ mtspr SPRN_PID1, r3
+#endif
+
/* Restore host IVPR before re-enabling interrupts. We cheat and know
* that Linux IVPR is always 0xc0000000. */
lis r3, 0xc000
@@ -241,6 +246,14 @@ _GLOBAL(kvmppc_resume_host)
heavyweight_exit:
/* Not returning to guest. */
+#ifdef CONFIG_SPE
+ /* save guest SPEFSCR and load host SPEFSCR */
+ mfspr r9, SPRN_SPEFSCR
+ stw r9, VCPU_SPEFSCR(r4)
+ lwz r9, VCPU_HOST_SPEFSCR(r4)
+ mtspr SPRN_SPEFSCR, r9
+#endif
+
/* We already saved guest volatile register state; now save the
* non-volatiles. */
stw r15, VCPU_GPR(r15)(r4)
@@ -342,6 +355,14 @@ _GLOBAL(__kvmppc_vcpu_run)
lwz r30, VCPU_GPR(r30)(r4)
lwz r31, VCPU_GPR(r31)(r4)
+#ifdef CONFIG_SPE
+ /* save host SPEFSCR and load guest SPEFSCR */
+ mfspr r3, SPRN_SPEFSCR
+ stw r3, VCPU_HOST_SPEFSCR(r4)
+ lwz r3, VCPU_SPEFSCR(r4)
+ mtspr SPRN_SPEFSCR, r3
+#endif
+
lightweight_exit:
stw r2, HOST_R2(r1)
@@ -350,6 +371,11 @@ lightweight_exit:
lwz r3, VCPU_SHADOW_PID(r4)
mtspr SPRN_PID, r3
+#ifdef CONFIG_FSL_BOOKE
+ lwz r3, VCPU_SHADOW_PID1(r4)
+ mtspr SPRN_PID1, r3
+#endif
+
#ifdef CONFIG_44x
iccci 0, 0 /* XXX hack */
#endif
@@ -405,20 +431,17 @@ lightweight_exit:
/* Finish loading guest volatiles and jump to guest. */
lwz r3, VCPU_CTR(r4)
+ lwz r5, VCPU_CR(r4)
+ lwz r6, VCPU_PC(r4)
+ lwz r7, VCPU_SHADOW_MSR(r4)
mtctr r3
- lwz r3, VCPU_CR(r4)
- mtcr r3
+ mtcr r5
+ mtsrr0 r6
+ mtsrr1 r7
lwz r5, VCPU_GPR(r5)(r4)
lwz r6, VCPU_GPR(r6)(r4)
lwz r7, VCPU_GPR(r7)(r4)
lwz r8, VCPU_GPR(r8)(r4)
- lwz r3, VCPU_PC(r4)
- mtsrr0 r3
- lwz r3, VCPU_SHARED(r4)
- lwz r3, (VCPU_SHARED_MSR + 4)(r3)
- oris r3, r3, KVMPPC_MSR_MASK@h
- ori r3, r3, KVMPPC_MSR_MASK@l
- mtsrr1 r3
/* Clear any debug events which occurred since we disabled MSR[DE].
* XXX This gives us a 3-instruction window in which a breakpoint
@@ -430,3 +453,24 @@ lightweight_exit:
lwz r3, VCPU_GPR(r3)(r4)
lwz r4, VCPU_GPR(r4)(r4)
rfi
+
+#ifdef CONFIG_SPE
+_GLOBAL(kvmppc_save_guest_spe)
+ cmpi 0,r3,0
+ beqlr-
+ SAVE_32EVRS(0, r4, r3, VCPU_EVR)
+ evxor evr6, evr6, evr6
+ evmwumiaa evr6, evr6, evr6
+ li r4,VCPU_ACC
+ evstddx evr6, r4, r3 /* save acc */
+ blr
+
+_GLOBAL(kvmppc_load_guest_spe)
+ cmpi 0,r3,0
+ beqlr-
+ li r4,VCPU_ACC
+ evlddx evr6,r4,r3
+ evmra evr6,evr6 /* load acc */
+ REST_32EVRS(0, r4, r3, VCPU_EVR)
+ blr
+#endif
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index 318dbc61ba44..797a7447c268 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
*
* Author: Yu Liu, <yu.liu@freescale.com>
*
@@ -41,6 +41,11 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
{
kvmppc_e500_tlb_put(vcpu);
+
+#ifdef CONFIG_SPE
+ if (vcpu->arch.shadow_msr & MSR_SPE)
+ kvmppc_vcpu_disable_spe(vcpu);
+#endif
}
int kvmppc_core_check_processor_compat(void)
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index 69cd665a0caf..d48ae396f41e 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -81,8 +81,12 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
kvmppc_set_pid(vcpu, spr_val);
break;
case SPRN_PID1:
+ if (spr_val != 0)
+ return EMULATE_FAIL;
vcpu_e500->pid[1] = spr_val; break;
case SPRN_PID2:
+ if (spr_val != 0)
+ return EMULATE_FAIL;
vcpu_e500->pid[2] = spr_val; break;
case SPRN_MAS0:
vcpu_e500->mas0 = spr_val; break;
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index b18fe353397d..13c432ea2fa8 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -28,8 +28,196 @@
#define to_htlb1_esel(esel) (tlb1_entry_num - (esel) - 1)
+struct id {
+ unsigned long val;
+ struct id **pentry;
+};
+
+#define NUM_TIDS 256
+
+/*
+ * This table provide mappings from:
+ * (guestAS,guestTID,guestPR) --> ID of physical cpu
+ * guestAS [0..1]
+ * guestTID [0..255]
+ * guestPR [0..1]
+ * ID [1..255]
+ * Each vcpu keeps one vcpu_id_table.
+ */
+struct vcpu_id_table {
+ struct id id[2][NUM_TIDS][2];
+};
+
+/*
+ * This table provide reversed mappings of vcpu_id_table:
+ * ID --> address of vcpu_id_table item.
+ * Each physical core has one pcpu_id_table.
+ */
+struct pcpu_id_table {
+ struct id *entry[NUM_TIDS];
+};
+
+static DEFINE_PER_CPU(struct pcpu_id_table, pcpu_sids);
+
+/* This variable keeps last used shadow ID on local core.
+ * The valid range of shadow ID is [1..255] */
+static DEFINE_PER_CPU(unsigned long, pcpu_last_used_sid);
+
static unsigned int tlb1_entry_num;
+/*
+ * Allocate a free shadow id and setup a valid sid mapping in given entry.
+ * A mapping is only valid when vcpu_id_table and pcpu_id_table are match.
+ *
+ * The caller must have preemption disabled, and keep it that way until
+ * it has finished with the returned shadow id (either written into the
+ * TLB or arch.shadow_pid, or discarded).
+ */
+static inline int local_sid_setup_one(struct id *entry)
+{
+ unsigned long sid;
+ int ret = -1;
+
+ sid = ++(__get_cpu_var(pcpu_last_used_sid));
+ if (sid < NUM_TIDS) {
+ __get_cpu_var(pcpu_sids).entry[sid] = entry;
+ entry->val = sid;
+ entry->pentry = &__get_cpu_var(pcpu_sids).entry[sid];
+ ret = sid;
+ }
+
+ /*
+ * If sid == NUM_TIDS, we've run out of sids. We return -1, and
+ * the caller will invalidate everything and start over.
+ *
+ * sid > NUM_TIDS indicates a race, which we disable preemption to
+ * avoid.
+ */
+ WARN_ON(sid > NUM_TIDS);
+
+ return ret;
+}
+
+/*
+ * Check if given entry contain a valid shadow id mapping.
+ * An ID mapping is considered valid only if
+ * both vcpu and pcpu know this mapping.
+ *
+ * The caller must have preemption disabled, and keep it that way until
+ * it has finished with the returned shadow id (either written into the
+ * TLB or arch.shadow_pid, or discarded).
+ */
+static inline int local_sid_lookup(struct id *entry)
+{
+ if (entry && entry->val != 0 &&
+ __get_cpu_var(pcpu_sids).entry[entry->val] == entry &&
+ entry->pentry == &__get_cpu_var(pcpu_sids).entry[entry->val])
+ return entry->val;
+ return -1;
+}
+
+/* Invalidate all id mappings on local core */
+static inline void local_sid_destroy_all(void)
+{
+ preempt_disable();
+ __get_cpu_var(pcpu_last_used_sid) = 0;
+ memset(&__get_cpu_var(pcpu_sids), 0, sizeof(__get_cpu_var(pcpu_sids)));
+ preempt_enable();
+}
+
+static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+ vcpu_e500->idt = kzalloc(sizeof(struct vcpu_id_table), GFP_KERNEL);
+ return vcpu_e500->idt;
+}
+
+static void kvmppc_e500_id_table_free(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+ kfree(vcpu_e500->idt);
+}
+
+/* Invalidate all mappings on vcpu */
+static void kvmppc_e500_id_table_reset_all(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+ memset(vcpu_e500->idt, 0, sizeof(struct vcpu_id_table));
+
+ /* Update shadow pid when mappings are changed */
+ kvmppc_e500_recalc_shadow_pid(vcpu_e500);
+}
+
+/* Invalidate one ID mapping on vcpu */
+static inline void kvmppc_e500_id_table_reset_one(
+ struct kvmppc_vcpu_e500 *vcpu_e500,
+ int as, int pid, int pr)
+{
+ struct vcpu_id_table *idt = vcpu_e500->idt;
+
+ BUG_ON(as >= 2);
+ BUG_ON(pid >= NUM_TIDS);
+ BUG_ON(pr >= 2);
+
+ idt->id[as][pid][pr].val = 0;
+ idt->id[as][pid][pr].pentry = NULL;
+
+ /* Update shadow pid when mappings are changed */
+ kvmppc_e500_recalc_shadow_pid(vcpu_e500);
+}
+
+/*
+ * Map guest (vcpu,AS,ID,PR) to physical core shadow id.
+ * This function first lookup if a valid mapping exists,
+ * if not, then creates a new one.
+ *
+ * The caller must have preemption disabled, and keep it that way until
+ * it has finished with the returned shadow id (either written into the
+ * TLB or arch.shadow_pid, or discarded).
+ */
+static unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500,
+ unsigned int as, unsigned int gid,
+ unsigned int pr, int avoid_recursion)
+{
+ struct vcpu_id_table *idt = vcpu_e500->idt;
+ int sid;
+
+ BUG_ON(as >= 2);
+ BUG_ON(gid >= NUM_TIDS);
+ BUG_ON(pr >= 2);
+
+ sid = local_sid_lookup(&idt->id[as][gid][pr]);
+
+ while (sid <= 0) {
+ /* No mapping yet */
+ sid = local_sid_setup_one(&idt->id[as][gid][pr]);
+ if (sid <= 0) {
+ _tlbil_all();
+ local_sid_destroy_all();
+ }
+
+ /* Update shadow pid when mappings are changed */
+ if (!avoid_recursion)
+ kvmppc_e500_recalc_shadow_pid(vcpu_e500);
+ }
+
+ return sid;
+}
+
+/* Map guest pid to shadow.
+ * We use PID to keep shadow of current guest non-zero PID,
+ * and use PID1 to keep shadow of guest zero PID.
+ * So that guest tlbe with TID=0 can be accessed at any time */
+void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+ preempt_disable();
+ vcpu_e500->vcpu.arch.shadow_pid = kvmppc_e500_get_sid(vcpu_e500,
+ get_cur_as(&vcpu_e500->vcpu),
+ get_cur_pid(&vcpu_e500->vcpu),
+ get_cur_pr(&vcpu_e500->vcpu), 1);
+ vcpu_e500->vcpu.arch.shadow_pid1 = kvmppc_e500_get_sid(vcpu_e500,
+ get_cur_as(&vcpu_e500->vcpu), 0,
+ get_cur_pr(&vcpu_e500->vcpu), 1);
+ preempt_enable();
+}
+
void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
@@ -41,25 +229,14 @@ void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
for (tlbsel = 0; tlbsel < 2; tlbsel++) {
printk("Guest TLB%d:\n", tlbsel);
- for (i = 0; i < vcpu_e500->guest_tlb_size[tlbsel]; i++) {
- tlbe = &vcpu_e500->guest_tlb[tlbsel][i];
+ for (i = 0; i < vcpu_e500->gtlb_size[tlbsel]; i++) {
+ tlbe = &vcpu_e500->gtlb_arch[tlbsel][i];
if (tlbe->mas1 & MAS1_VALID)
printk(" G[%d][%3d] | %08X | %08X | %08X | %08X |\n",
tlbsel, i, tlbe->mas1, tlbe->mas2,
tlbe->mas3, tlbe->mas7);
}
}
-
- for (tlbsel = 0; tlbsel < 2; tlbsel++) {
- printk("Shadow TLB%d:\n", tlbsel);
- for (i = 0; i < vcpu_e500->shadow_tlb_size[tlbsel]; i++) {
- tlbe = &vcpu_e500->shadow_tlb[tlbsel][i];
- if (tlbe->mas1 & MAS1_VALID)
- printk(" S[%d][%3d] | %08X | %08X | %08X | %08X |\n",
- tlbsel, i, tlbe->mas1, tlbe->mas2,
- tlbe->mas3, tlbe->mas7);
- }
- }
}
static inline unsigned int tlb0_get_next_victim(
@@ -67,16 +244,17 @@ static inline unsigned int tlb0_get_next_victim(
{
unsigned int victim;
- victim = vcpu_e500->guest_tlb_nv[0]++;
- if (unlikely(vcpu_e500->guest_tlb_nv[0] >= KVM_E500_TLB0_WAY_NUM))
- vcpu_e500->guest_tlb_nv[0] = 0;
+ victim = vcpu_e500->gtlb_nv[0]++;
+ if (unlikely(vcpu_e500->gtlb_nv[0] >= KVM_E500_TLB0_WAY_NUM))
+ vcpu_e500->gtlb_nv[0] = 0;
return victim;
}
static inline unsigned int tlb1_max_shadow_size(void)
{
- return tlb1_entry_num - tlbcam_index;
+ /* reserve one entry for magic page */
+ return tlb1_entry_num - tlbcam_index - 1;
}
static inline int tlbe_is_writable(struct tlbe *tlbe)
@@ -112,72 +290,149 @@ static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode)
/*
* writing shadow tlb entry to host TLB
*/
-static inline void __write_host_tlbe(struct tlbe *stlbe)
+static inline void __write_host_tlbe(struct tlbe *stlbe, uint32_t mas0)
{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ mtspr(SPRN_MAS0, mas0);
mtspr(SPRN_MAS1, stlbe->mas1);
mtspr(SPRN_MAS2, stlbe->mas2);
mtspr(SPRN_MAS3, stlbe->mas3);
mtspr(SPRN_MAS7, stlbe->mas7);
- __asm__ __volatile__ ("tlbwe\n" : : );
+ asm volatile("isync; tlbwe" : : : "memory");
+ local_irq_restore(flags);
}
static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
- int tlbsel, int esel)
+ int tlbsel, int esel, struct tlbe *stlbe)
{
- struct tlbe *stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
-
- local_irq_disable();
if (tlbsel == 0) {
- __write_host_tlbe(stlbe);
+ __write_host_tlbe(stlbe,
+ MAS0_TLBSEL(0) |
+ MAS0_ESEL(esel & (KVM_E500_TLB0_WAY_NUM - 1)));
} else {
- unsigned register mas0;
-
- mas0 = mfspr(SPRN_MAS0);
-
- mtspr(SPRN_MAS0, MAS0_TLBSEL(1) | MAS0_ESEL(to_htlb1_esel(esel)));
- __write_host_tlbe(stlbe);
-
- mtspr(SPRN_MAS0, mas0);
+ __write_host_tlbe(stlbe,
+ MAS0_TLBSEL(1) |
+ MAS0_ESEL(to_htlb1_esel(esel)));
}
- local_irq_enable();
+ trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2,
+ stlbe->mas3, stlbe->mas7);
+}
+
+void kvmppc_map_magic(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+ struct tlbe magic;
+ ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
+ unsigned int stid;
+ pfn_t pfn;
+
+ pfn = (pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT;
+ get_page(pfn_to_page(pfn));
+
+ preempt_disable();
+ stid = kvmppc_e500_get_sid(vcpu_e500, 0, 0, 0, 0);
+
+ magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) |
+ MAS1_TSIZE(BOOK3E_PAGESZ_4K);
+ magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
+ magic.mas3 = (pfn << PAGE_SHIFT) |
+ MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
+ magic.mas7 = pfn >> (32 - PAGE_SHIFT);
+
+ __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
+ preempt_enable();
}
void kvmppc_e500_tlb_load(struct kvm_vcpu *vcpu, int cpu)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
- int i;
- unsigned register mas0;
-
- /* Load all valid TLB1 entries to reduce guest tlb miss fault */
- local_irq_disable();
- mas0 = mfspr(SPRN_MAS0);
- for (i = 0; i < tlb1_max_shadow_size(); i++) {
- struct tlbe *stlbe = &vcpu_e500->shadow_tlb[1][i];
-
- if (get_tlb_v(stlbe)) {
- mtspr(SPRN_MAS0, MAS0_TLBSEL(1)
- | MAS0_ESEL(to_htlb1_esel(i)));
- __write_host_tlbe(stlbe);
- }
- }
- mtspr(SPRN_MAS0, mas0);
- local_irq_enable();
+
+ /* Shadow PID may be expired on local core */
+ kvmppc_e500_recalc_shadow_pid(vcpu_e500);
}
void kvmppc_e500_tlb_put(struct kvm_vcpu *vcpu)
{
- _tlbil_all();
+}
+
+static void kvmppc_e500_stlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
+ int tlbsel, int esel)
+{
+ struct tlbe *gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
+ struct vcpu_id_table *idt = vcpu_e500->idt;
+ unsigned int pr, tid, ts, pid;
+ u32 val, eaddr;
+ unsigned long flags;
+
+ ts = get_tlb_ts(gtlbe);
+ tid = get_tlb_tid(gtlbe);
+
+ preempt_disable();
+
+ /* One guest ID may be mapped to two shadow IDs */
+ for (pr = 0; pr < 2; pr++) {
+ /*
+ * The shadow PID can have a valid mapping on at most one
+ * host CPU. In the common case, it will be valid on this
+ * CPU, in which case (for TLB0) we do a local invalidation
+ * of the specific address.
+ *
+ * If the shadow PID is not valid on the current host CPU, or
+ * if we're invalidating a TLB1 entry, we invalidate the
+ * entire shadow PID.
+ */
+ if (tlbsel == 1 ||
+ (pid = local_sid_lookup(&idt->id[ts][tid][pr])) <= 0) {
+ kvmppc_e500_id_table_reset_one(vcpu_e500, ts, tid, pr);
+ continue;
+ }
+
+ /*
+ * The guest is invalidating a TLB0 entry which is in a PID
+ * that has a valid shadow mapping on this host CPU. We
+ * search host TLB0 to invalidate it's shadow TLB entry,
+ * similar to __tlbil_va except that we need to look in AS1.
+ */
+ val = (pid << MAS6_SPID_SHIFT) | MAS6_SAS;
+ eaddr = get_tlb_eaddr(gtlbe);
+
+ local_irq_save(flags);
+
+ mtspr(SPRN_MAS6, val);
+ asm volatile("tlbsx 0, %[eaddr]" : : [eaddr] "r" (eaddr));
+ val = mfspr(SPRN_MAS1);
+ if (val & MAS1_VALID) {
+ mtspr(SPRN_MAS1, val & ~MAS1_VALID);
+ asm volatile("tlbwe");
+ }
+
+ local_irq_restore(flags);
+ }
+
+ preempt_enable();
}
/* Search the guest TLB for a matching entry. */
static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
gva_t eaddr, int tlbsel, unsigned int pid, int as)
{
+ int size = vcpu_e500->gtlb_size[tlbsel];
+ int set_base;
int i;
- /* XXX Replace loop with fancy data structures. */
- for (i = 0; i < vcpu_e500->guest_tlb_size[tlbsel]; i++) {
- struct tlbe *tlbe = &vcpu_e500->guest_tlb[tlbsel][i];
+ if (tlbsel == 0) {
+ int mask = size / KVM_E500_TLB0_WAY_NUM - 1;
+ set_base = (eaddr >> PAGE_SHIFT) & mask;
+ set_base *= KVM_E500_TLB0_WAY_NUM;
+ size = KVM_E500_TLB0_WAY_NUM;
+ } else {
+ set_base = 0;
+ }
+
+ for (i = 0; i < size; i++) {
+ struct tlbe *tlbe = &vcpu_e500->gtlb_arch[tlbsel][set_base + i];
unsigned int tid;
if (eaddr < get_tlb_eaddr(tlbe))
@@ -196,66 +451,32 @@ static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
if (get_tlb_ts(tlbe) != as && as != -1)
continue;
- return i;
+ return set_base + i;
}
return -1;
}
-static void kvmppc_e500_shadow_release(struct kvmppc_vcpu_e500 *vcpu_e500,
- int tlbsel, int esel)
-{
- struct tlbe *stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
- struct page *page = vcpu_e500->shadow_pages[tlbsel][esel];
-
- if (page) {
- vcpu_e500->shadow_pages[tlbsel][esel] = NULL;
-
- if (get_tlb_v(stlbe)) {
- if (tlbe_is_writable(stlbe))
- kvm_release_page_dirty(page);
- else
- kvm_release_page_clean(page);
- }
- }
-}
-
-static void kvmppc_e500_stlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
- int tlbsel, int esel)
+static inline void kvmppc_e500_priv_setup(struct tlbe_priv *priv,
+ struct tlbe *gtlbe,
+ pfn_t pfn)
{
- struct tlbe *stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
+ priv->pfn = pfn;
+ priv->flags = E500_TLB_VALID;
- kvmppc_e500_shadow_release(vcpu_e500, tlbsel, esel);
- stlbe->mas1 = 0;
- trace_kvm_stlb_inval(index_of(tlbsel, esel));
+ if (tlbe_is_writable(gtlbe))
+ priv->flags |= E500_TLB_DIRTY;
}
-static void kvmppc_e500_tlb1_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
- gva_t eaddr, gva_t eend, u32 tid)
+static inline void kvmppc_e500_priv_release(struct tlbe_priv *priv)
{
- unsigned int pid = tid & 0xff;
- unsigned int i;
-
- /* XXX Replace loop with fancy data structures. */
- for (i = 0; i < vcpu_e500->guest_tlb_size[1]; i++) {
- struct tlbe *stlbe = &vcpu_e500->shadow_tlb[1][i];
- unsigned int tid;
-
- if (!get_tlb_v(stlbe))
- continue;
-
- if (eend < get_tlb_eaddr(stlbe))
- continue;
+ if (priv->flags & E500_TLB_VALID) {
+ if (priv->flags & E500_TLB_DIRTY)
+ kvm_release_pfn_dirty(priv->pfn);
+ else
+ kvm_release_pfn_clean(priv->pfn);
- if (eaddr > get_tlb_end(stlbe))
- continue;
-
- tid = get_tlb_tid(stlbe);
- if (tid && (tid != pid))
- continue;
-
- kvmppc_e500_stlbe_invalidate(vcpu_e500, 1, i);
- write_host_tlbe(vcpu_e500, 1, i);
+ priv->flags = 0;
}
}
@@ -273,7 +494,7 @@ static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
tsized = (vcpu_e500->mas4 >> 7) & 0x1f;
vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
- | MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]);
+ | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
vcpu_e500->mas1 = MAS1_VALID | (as ? MAS1_TS : 0)
| MAS1_TID(vcpu_e500->pid[pidsel])
| MAS1_TSIZE(tsized);
@@ -286,56 +507,154 @@ static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
vcpu_e500->mas7 = 0;
}
-static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
- u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, int tlbsel, int esel)
+static inline void kvmppc_e500_setup_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
+ struct tlbe *gtlbe, int tsize,
+ struct tlbe_priv *priv,
+ u64 gvaddr, struct tlbe *stlbe)
{
- struct page *new_page;
- struct tlbe *stlbe;
- hpa_t hpaddr;
-
- stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
-
- /* Get reference to new page. */
- new_page = gfn_to_page(vcpu_e500->vcpu.kvm, gfn);
- if (is_error_page(new_page)) {
- printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n",
- (long)gfn);
- kvm_release_page_clean(new_page);
- return;
- }
- hpaddr = page_to_phys(new_page);
-
- /* Drop reference to old page. */
- kvmppc_e500_shadow_release(vcpu_e500, tlbsel, esel);
+ pfn_t pfn = priv->pfn;
+ unsigned int stid;
- vcpu_e500->shadow_pages[tlbsel][esel] = new_page;
+ stid = kvmppc_e500_get_sid(vcpu_e500, get_tlb_ts(gtlbe),
+ get_tlb_tid(gtlbe),
+ get_cur_pr(&vcpu_e500->vcpu), 0);
- /* Force TS=1 IPROT=0 TSIZE=4KB for all guest mappings. */
- stlbe->mas1 = MAS1_TSIZE(BOOK3E_PAGESZ_4K)
- | MAS1_TID(get_tlb_tid(gtlbe)) | MAS1_TS | MAS1_VALID;
+ /* Force TS=1 IPROT=0 for all guest mappings. */
+ stlbe->mas1 = MAS1_TSIZE(tsize)
+ | MAS1_TID(stid) | MAS1_TS | MAS1_VALID;
stlbe->mas2 = (gvaddr & MAS2_EPN)
| e500_shadow_mas2_attrib(gtlbe->mas2,
vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
- stlbe->mas3 = (hpaddr & MAS3_RPN)
+ stlbe->mas3 = ((pfn << PAGE_SHIFT) & MAS3_RPN)
| e500_shadow_mas3_attrib(gtlbe->mas3,
vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
- stlbe->mas7 = (hpaddr >> 32) & MAS7_RPN;
+ stlbe->mas7 = (pfn >> (32 - PAGE_SHIFT)) & MAS7_RPN;
+}
- trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2,
- stlbe->mas3, stlbe->mas7);
+
+static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
+ u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, int tlbsel, int esel,
+ struct tlbe *stlbe)
+{
+ struct kvm_memory_slot *slot;
+ unsigned long pfn, hva;
+ int pfnmap = 0;
+ int tsize = BOOK3E_PAGESZ_4K;
+ struct tlbe_priv *priv;
+
+ /*
+ * Translate guest physical to true physical, acquiring
+ * a page reference if it is normal, non-reserved memory.
+ *
+ * gfn_to_memslot() must succeed because otherwise we wouldn't
+ * have gotten this far. Eventually we should just pass the slot
+ * pointer through from the first lookup.
+ */
+ slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn);
+ hva = gfn_to_hva_memslot(slot, gfn);
+
+ if (tlbsel == 1) {
+ struct vm_area_struct *vma;
+ down_read(&current->mm->mmap_sem);
+
+ vma = find_vma(current->mm, hva);
+ if (vma && hva >= vma->vm_start &&
+ (vma->vm_flags & VM_PFNMAP)) {
+ /*
+ * This VMA is a physically contiguous region (e.g.
+ * /dev/mem) that bypasses normal Linux page
+ * management. Find the overlap between the
+ * vma and the memslot.
+ */
+
+ unsigned long start, end;
+ unsigned long slot_start, slot_end;
+
+ pfnmap = 1;
+
+ start = vma->vm_pgoff;
+ end = start +
+ ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
+
+ pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT);
+
+ slot_start = pfn - (gfn - slot->base_gfn);
+ slot_end = slot_start + slot->npages;
+
+ if (start < slot_start)
+ start = slot_start;
+ if (end > slot_end)
+ end = slot_end;
+
+ tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
+ MAS1_TSIZE_SHIFT;
+
+ /*
+ * e500 doesn't implement the lowest tsize bit,
+ * or 1K pages.
+ */
+ tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
+
+ /*
+ * Now find the largest tsize (up to what the guest
+ * requested) that will cover gfn, stay within the
+ * range, and for which gfn and pfn are mutually
+ * aligned.
+ */
+
+ for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
+ unsigned long gfn_start, gfn_end, tsize_pages;
+ tsize_pages = 1 << (tsize - 2);
+
+ gfn_start = gfn & ~(tsize_pages - 1);
+ gfn_end = gfn_start + tsize_pages;
+
+ if (gfn_start + pfn - gfn < start)
+ continue;
+ if (gfn_end + pfn - gfn > end)
+ continue;
+ if ((gfn & (tsize_pages - 1)) !=
+ (pfn & (tsize_pages - 1)))
+ continue;
+
+ gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
+ pfn &= ~(tsize_pages - 1);
+ break;
+ }
+ }
+
+ up_read(&current->mm->mmap_sem);
+ }
+
+ if (likely(!pfnmap)) {
+ pfn = gfn_to_pfn_memslot(vcpu_e500->vcpu.kvm, slot, gfn);
+ if (is_error_pfn(pfn)) {
+ printk(KERN_ERR "Couldn't get real page for gfn %lx!\n",
+ (long)gfn);
+ kvm_release_pfn_clean(pfn);
+ return;
+ }
+ }
+
+ /* Drop old priv and setup new one. */
+ priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
+ kvmppc_e500_priv_release(priv);
+ kvmppc_e500_priv_setup(priv, gtlbe, pfn);
+
+ kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, tsize, priv, gvaddr, stlbe);
}
/* XXX only map the one-one case, for now use TLB0 */
-static int kvmppc_e500_stlbe_map(struct kvmppc_vcpu_e500 *vcpu_e500,
- int tlbsel, int esel)
+static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500,
+ int esel, struct tlbe *stlbe)
{
struct tlbe *gtlbe;
- gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
+ gtlbe = &vcpu_e500->gtlb_arch[0][esel];
kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
- gtlbe, tlbsel, esel);
+ gtlbe, 0, esel, stlbe);
return esel;
}
@@ -344,53 +663,37 @@ static int kvmppc_e500_stlbe_map(struct kvmppc_vcpu_e500 *vcpu_e500,
* the shadow TLB. */
/* XXX for both one-one and one-to-many , for now use TLB1 */
static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
- u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe)
+ u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, struct tlbe *stlbe)
{
unsigned int victim;
- victim = vcpu_e500->guest_tlb_nv[1]++;
+ victim = vcpu_e500->gtlb_nv[1]++;
- if (unlikely(vcpu_e500->guest_tlb_nv[1] >= tlb1_max_shadow_size()))
- vcpu_e500->guest_tlb_nv[1] = 0;
+ if (unlikely(vcpu_e500->gtlb_nv[1] >= tlb1_max_shadow_size()))
+ vcpu_e500->gtlb_nv[1] = 0;
- kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, victim);
+ kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, victim, stlbe);
return victim;
}
-/* Invalidate all guest kernel mappings when enter usermode,
- * so that when they fault back in they will get the
- * proper permission bits. */
-void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
+void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
{
- if (usermode) {
- struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
- int i;
-
- /* XXX Replace loop with fancy data structures. */
- for (i = 0; i < tlb1_max_shadow_size(); i++)
- kvmppc_e500_stlbe_invalidate(vcpu_e500, 1, i);
+ struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
- _tlbil_all();
- }
+ /* Recalc shadow pid since MSR changes */
+ kvmppc_e500_recalc_shadow_pid(vcpu_e500);
}
-static int kvmppc_e500_gtlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
- int tlbsel, int esel)
+static inline int kvmppc_e500_gtlbe_invalidate(
+ struct kvmppc_vcpu_e500 *vcpu_e500,
+ int tlbsel, int esel)
{
- struct tlbe *gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
+ struct tlbe *gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
if (unlikely(get_tlb_iprot(gtlbe)))
return -1;
- if (tlbsel == 1) {
- kvmppc_e500_tlb1_invalidate(vcpu_e500, get_tlb_eaddr(gtlbe),
- get_tlb_end(gtlbe),
- get_tlb_tid(gtlbe));
- } else {
- kvmppc_e500_stlbe_invalidate(vcpu_e500, tlbsel, esel);
- }
-
gtlbe->mas1 = 0;
return 0;
@@ -401,13 +704,14 @@ int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value)
int esel;
if (value & MMUCSR0_TLB0FI)
- for (esel = 0; esel < vcpu_e500->guest_tlb_size[0]; esel++)
+ for (esel = 0; esel < vcpu_e500->gtlb_size[0]; esel++)
kvmppc_e500_gtlbe_invalidate(vcpu_e500, 0, esel);
if (value & MMUCSR0_TLB1FI)
- for (esel = 0; esel < vcpu_e500->guest_tlb_size[1]; esel++)
+ for (esel = 0; esel < vcpu_e500->gtlb_size[1]; esel++)
kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel);
- _tlbil_all();
+ /* Invalidate all vcpu id mappings */
+ kvmppc_e500_id_table_reset_all(vcpu_e500);
return EMULATE_DONE;
}
@@ -428,7 +732,7 @@ int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb)
if (ia) {
/* invalidate all entries */
- for (esel = 0; esel < vcpu_e500->guest_tlb_size[tlbsel]; esel++)
+ for (esel = 0; esel < vcpu_e500->gtlb_size[tlbsel]; esel++)
kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
} else {
ea &= 0xfffff000;
@@ -438,7 +742,8 @@ int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb)
kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
}
- _tlbil_all();
+ /* Invalidate all vcpu id mappings */
+ kvmppc_e500_id_table_reset_all(vcpu_e500);
return EMULATE_DONE;
}
@@ -452,9 +757,9 @@ int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu)
tlbsel = get_tlb_tlbsel(vcpu_e500);
esel = get_tlb_esel(vcpu_e500, tlbsel);
- gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
+ gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
vcpu_e500->mas0 &= ~MAS0_NV(~0);
- vcpu_e500->mas0 |= MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]);
+ vcpu_e500->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
vcpu_e500->mas1 = gtlbe->mas1;
vcpu_e500->mas2 = gtlbe->mas2;
vcpu_e500->mas3 = gtlbe->mas3;
@@ -477,14 +782,14 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
for (tlbsel = 0; tlbsel < 2; tlbsel++) {
esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as);
if (esel >= 0) {
- gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
+ gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
break;
}
}
if (gtlbe) {
vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel)
- | MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]);
+ | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
vcpu_e500->mas1 = gtlbe->mas1;
vcpu_e500->mas2 = gtlbe->mas2;
vcpu_e500->mas3 = gtlbe->mas3;
@@ -497,7 +802,7 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
victim = (tlbsel == 0) ? tlb0_get_next_victim(vcpu_e500) : 0;
vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
- | MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]);
+ | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
vcpu_e500->mas1 = (vcpu_e500->mas6 & MAS6_SPID0)
| (vcpu_e500->mas6 & (MAS6_SAS ? MAS1_TS : 0))
| (vcpu_e500->mas4 & MAS4_TSIZED(~0));
@@ -514,23 +819,16 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
- u64 eaddr;
- u64 raddr;
- u32 tid;
struct tlbe *gtlbe;
- int tlbsel, esel, stlbsel, sesel;
+ int tlbsel, esel;
tlbsel = get_tlb_tlbsel(vcpu_e500);
esel = get_tlb_esel(vcpu_e500, tlbsel);
- gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
+ gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
- if (get_tlb_v(gtlbe) && tlbsel == 1) {
- eaddr = get_tlb_eaddr(gtlbe);
- tid = get_tlb_tid(gtlbe);
- kvmppc_e500_tlb1_invalidate(vcpu_e500, eaddr,
- get_tlb_end(gtlbe), tid);
- }
+ if (get_tlb_v(gtlbe))
+ kvmppc_e500_stlbe_invalidate(vcpu_e500, tlbsel, esel);
gtlbe->mas1 = vcpu_e500->mas1;
gtlbe->mas2 = vcpu_e500->mas2;
@@ -542,6 +840,12 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
/* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
if (tlbe_is_host_safe(vcpu, gtlbe)) {
+ struct tlbe stlbe;
+ int stlbsel, sesel;
+ u64 eaddr;
+ u64 raddr;
+
+ preempt_disable();
switch (tlbsel) {
case 0:
/* TLB0 */
@@ -549,7 +853,7 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K);
stlbsel = 0;
- sesel = kvmppc_e500_stlbe_map(vcpu_e500, 0, esel);
+ sesel = kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
break;
@@ -564,13 +868,14 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
* are mapped on the fly. */
stlbsel = 1;
sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr,
- raddr >> PAGE_SHIFT, gtlbe);
+ raddr >> PAGE_SHIFT, gtlbe, &stlbe);
break;
default:
BUG();
}
- write_host_tlbe(vcpu_e500, stlbsel, sesel);
+ write_host_tlbe(vcpu_e500, stlbsel, sesel, &stlbe);
+ preempt_enable();
}
kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
@@ -610,7 +915,7 @@ gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index,
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
struct tlbe *gtlbe =
- &vcpu_e500->guest_tlb[tlbsel_of(index)][esel_of(index)];
+ &vcpu_e500->gtlb_arch[tlbsel_of(index)][esel_of(index)];
u64 pgmask = get_tlb_bytes(gtlbe) - 1;
return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
@@ -618,38 +923,37 @@ gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index,
void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
{
- struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
- int tlbsel, i;
-
- for (tlbsel = 0; tlbsel < 2; tlbsel++)
- for (i = 0; i < vcpu_e500->guest_tlb_size[tlbsel]; i++)
- kvmppc_e500_shadow_release(vcpu_e500, tlbsel, i);
-
- /* discard all guest mapping */
- _tlbil_all();
}
void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
unsigned int index)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+ struct tlbe_priv *priv;
+ struct tlbe *gtlbe, stlbe;
int tlbsel = tlbsel_of(index);
int esel = esel_of(index);
int stlbsel, sesel;
+ gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
+
+ preempt_disable();
switch (tlbsel) {
case 0:
stlbsel = 0;
sesel = esel;
+ priv = &vcpu_e500->gtlb_priv[stlbsel][sesel];
+
+ kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, BOOK3E_PAGESZ_4K,
+ priv, eaddr, &stlbe);
break;
case 1: {
gfn_t gfn = gpaddr >> PAGE_SHIFT;
- struct tlbe *gtlbe
- = &vcpu_e500->guest_tlb[tlbsel][esel];
stlbsel = 1;
- sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe);
+ sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn,
+ gtlbe, &stlbe);
break;
}
@@ -657,7 +961,9 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
BUG();
break;
}
- write_host_tlbe(vcpu_e500, stlbsel, sesel);
+
+ write_host_tlbe(vcpu_e500, stlbsel, sesel, &stlbe);
+ preempt_enable();
}
int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu,
@@ -679,8 +985,10 @@ void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
- vcpu_e500->pid[0] = vcpu->arch.shadow_pid =
- vcpu->arch.pid = pid;
+ if (vcpu->arch.pid != pid) {
+ vcpu_e500->pid[0] = vcpu->arch.pid = pid;
+ kvmppc_e500_recalc_shadow_pid(vcpu_e500);
+ }
}
void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500)
@@ -688,14 +996,14 @@ void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500)
struct tlbe *tlbe;
/* Insert large initial mapping for guest. */
- tlbe = &vcpu_e500->guest_tlb[1][0];
+ tlbe = &vcpu_e500->gtlb_arch[1][0];
tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M);
tlbe->mas2 = 0;
tlbe->mas3 = E500_TLB_SUPER_PERM_MASK;
tlbe->mas7 = 0;
/* 4K map for serial output. Used by kernel wrapper. */
- tlbe = &vcpu_e500->guest_tlb[1][1];
+ tlbe = &vcpu_e500->gtlb_arch[1][1];
tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K);
tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G;
tlbe->mas3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK;
@@ -706,68 +1014,64 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
{
tlb1_entry_num = mfspr(SPRN_TLB1CFG) & 0xFFF;
- vcpu_e500->guest_tlb_size[0] = KVM_E500_TLB0_SIZE;
- vcpu_e500->guest_tlb[0] =
+ vcpu_e500->gtlb_size[0] = KVM_E500_TLB0_SIZE;
+ vcpu_e500->gtlb_arch[0] =
kzalloc(sizeof(struct tlbe) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
- if (vcpu_e500->guest_tlb[0] == NULL)
+ if (vcpu_e500->gtlb_arch[0] == NULL)
goto err_out;
- vcpu_e500->shadow_tlb_size[0] = KVM_E500_TLB0_SIZE;
- vcpu_e500->shadow_tlb[0] =
- kzalloc(sizeof(struct tlbe) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
- if (vcpu_e500->shadow_tlb[0] == NULL)
- goto err_out_guest0;
-
- vcpu_e500->guest_tlb_size[1] = KVM_E500_TLB1_SIZE;
- vcpu_e500->guest_tlb[1] =
+ vcpu_e500->gtlb_size[1] = KVM_E500_TLB1_SIZE;
+ vcpu_e500->gtlb_arch[1] =
kzalloc(sizeof(struct tlbe) * KVM_E500_TLB1_SIZE, GFP_KERNEL);
- if (vcpu_e500->guest_tlb[1] == NULL)
- goto err_out_shadow0;
+ if (vcpu_e500->gtlb_arch[1] == NULL)
+ goto err_out_guest0;
- vcpu_e500->shadow_tlb_size[1] = tlb1_entry_num;
- vcpu_e500->shadow_tlb[1] =
- kzalloc(sizeof(struct tlbe) * tlb1_entry_num, GFP_KERNEL);
- if (vcpu_e500->shadow_tlb[1] == NULL)
+ vcpu_e500->gtlb_priv[0] = (struct tlbe_priv *)
+ kzalloc(sizeof(struct tlbe_priv) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
+ if (vcpu_e500->gtlb_priv[0] == NULL)
goto err_out_guest1;
+ vcpu_e500->gtlb_priv[1] = (struct tlbe_priv *)
+ kzalloc(sizeof(struct tlbe_priv) * KVM_E500_TLB1_SIZE, GFP_KERNEL);
- vcpu_e500->shadow_pages[0] = (struct page **)
- kzalloc(sizeof(struct page *) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
- if (vcpu_e500->shadow_pages[0] == NULL)
- goto err_out_shadow1;
+ if (vcpu_e500->gtlb_priv[1] == NULL)
+ goto err_out_priv0;
- vcpu_e500->shadow_pages[1] = (struct page **)
- kzalloc(sizeof(struct page *) * tlb1_entry_num, GFP_KERNEL);
- if (vcpu_e500->shadow_pages[1] == NULL)
- goto err_out_page0;
+ if (kvmppc_e500_id_table_alloc(vcpu_e500) == NULL)
+ goto err_out_priv1;
/* Init TLB configuration register */
vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) & ~0xfffUL;
- vcpu_e500->tlb0cfg |= vcpu_e500->guest_tlb_size[0];
+ vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_size[0];
vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) & ~0xfffUL;
- vcpu_e500->tlb1cfg |= vcpu_e500->guest_tlb_size[1];
+ vcpu_e500->tlb1cfg |= vcpu_e500->gtlb_size[1];
return 0;
-err_out_page0:
- kfree(vcpu_e500->shadow_pages[0]);
-err_out_shadow1:
- kfree(vcpu_e500->shadow_tlb[1]);
+err_out_priv1:
+ kfree(vcpu_e500->gtlb_priv[1]);
+err_out_priv0:
+ kfree(vcpu_e500->gtlb_priv[0]);
err_out_guest1:
- kfree(vcpu_e500->guest_tlb[1]);
-err_out_shadow0:
- kfree(vcpu_e500->shadow_tlb[0]);
+ kfree(vcpu_e500->gtlb_arch[1]);
err_out_guest0:
- kfree(vcpu_e500->guest_tlb[0]);
+ kfree(vcpu_e500->gtlb_arch[0]);
err_out:
return -1;
}
void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
{
- kfree(vcpu_e500->shadow_pages[1]);
- kfree(vcpu_e500->shadow_pages[0]);
- kfree(vcpu_e500->shadow_tlb[1]);
- kfree(vcpu_e500->guest_tlb[1]);
- kfree(vcpu_e500->shadow_tlb[0]);
- kfree(vcpu_e500->guest_tlb[0]);
+ int stlbsel, i;
+
+ /* release all privs */
+ for (stlbsel = 0; stlbsel < 2; stlbsel++)
+ for (i = 0; i < vcpu_e500->gtlb_size[stlbsel]; i++) {
+ struct tlbe_priv *priv =
+ &vcpu_e500->gtlb_priv[stlbsel][i];
+ kvmppc_e500_priv_release(priv);
+ }
+
+ kvmppc_e500_id_table_free(vcpu_e500);
+ kfree(vcpu_e500->gtlb_arch[1]);
+ kfree(vcpu_e500->gtlb_arch[0]);
}
diff --git a/arch/powerpc/kvm/e500_tlb.h b/arch/powerpc/kvm/e500_tlb.h
index 458946b4775d..59b88e99a235 100644
--- a/arch/powerpc/kvm/e500_tlb.h
+++ b/arch/powerpc/kvm/e500_tlb.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
*
* Author: Yu Liu, yu.liu@freescale.com
*
@@ -55,6 +55,7 @@ extern void kvmppc_e500_tlb_load(struct kvm_vcpu *, int);
extern int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *);
extern void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *);
extern void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *);
+extern void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *);
/* TLB helper functions */
static inline unsigned int get_tlb_size(const struct tlbe *tlbe)
@@ -110,6 +111,16 @@ static inline unsigned int get_cur_pid(struct kvm_vcpu *vcpu)
return vcpu->arch.pid & 0xff;
}
+static inline unsigned int get_cur_as(struct kvm_vcpu *vcpu)
+{
+ return !!(vcpu->arch.shared->msr & (MSR_IS | MSR_DS));
+}
+
+static inline unsigned int get_cur_pr(struct kvm_vcpu *vcpu)
+{
+ return !!(vcpu->arch.shared->msr & MSR_PR);
+}
+
static inline unsigned int get_cur_spid(
const struct kvmppc_vcpu_e500 *vcpu_e500)
{
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 616dd516ca1f..a107c9be0fb1 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -30,6 +30,7 @@
#include <asm/uaccess.h>
#include <asm/kvm_ppc.h>
#include <asm/tlbflush.h>
+#include <asm/cputhreads.h>
#include "timing.h"
#include "../mm/mmu_decl.h"
@@ -38,8 +39,12 @@
int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
{
+#ifndef CONFIG_KVM_BOOK3S_64_HV
return !(v->arch.shared->msr & MSR_WE) ||
!!(v->arch.pending_exceptions);
+#else
+ return !(v->arch.ceded) || !!(v->arch.pending_exceptions);
+#endif
}
int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
@@ -73,7 +78,8 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
}
case HC_VENDOR_KVM | KVM_HC_FEATURES:
r = HC_EV_SUCCESS;
-#if defined(CONFIG_PPC_BOOK3S) /* XXX Missing magic page on BookE */
+#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500)
+ /* XXX Missing magic page on 44x */
r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
#endif
@@ -147,7 +153,7 @@ void kvm_arch_check_processor_compat(void *rtn)
int kvm_arch_init_vm(struct kvm *kvm)
{
- return 0;
+ return kvmppc_core_init_vm(kvm);
}
void kvm_arch_destroy_vm(struct kvm *kvm)
@@ -163,6 +169,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kvm->vcpus[i] = NULL;
atomic_set(&kvm->online_vcpus, 0);
+
+ kvmppc_core_destroy_vm(kvm);
+
mutex_unlock(&kvm->lock);
}
@@ -180,10 +189,13 @@ int kvm_dev_ioctl_check_extension(long ext)
#else
case KVM_CAP_PPC_SEGSTATE:
#endif
- case KVM_CAP_PPC_PAIRED_SINGLES:
case KVM_CAP_PPC_UNSET_IRQ:
case KVM_CAP_PPC_IRQ_LEVEL:
case KVM_CAP_ENABLE_CAP:
+ r = 1;
+ break;
+#ifndef CONFIG_KVM_BOOK3S_64_HV
+ case KVM_CAP_PPC_PAIRED_SINGLES:
case KVM_CAP_PPC_OSI:
case KVM_CAP_PPC_GET_PVINFO:
r = 1;
@@ -191,6 +203,21 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_COALESCED_MMIO:
r = KVM_COALESCED_MMIO_PAGE_OFFSET;
break;
+#endif
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+ case KVM_CAP_SPAPR_TCE:
+ r = 1;
+ break;
+ case KVM_CAP_PPC_SMT:
+ r = threads_per_core;
+ break;
+ case KVM_CAP_PPC_RMA:
+ r = 1;
+ /* PPC970 requires an RMA */
+ if (cpu_has_feature(CPU_FTR_ARCH_201))
+ r = 2;
+ break;
+#endif
default:
r = 0;
break;
@@ -211,7 +238,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
int user_alloc)
{
- return 0;
+ return kvmppc_core_prepare_memory_region(kvm, mem);
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
@@ -219,7 +246,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_memory_slot old,
int user_alloc)
{
- return;
+ kvmppc_core_commit_memory_region(kvm, mem);
}
@@ -287,6 +314,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu);
vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
+ vcpu->arch.dec_expires = ~(u64)0;
#ifdef CONFIG_KVM_EXIT_TIMING
mutex_init(&vcpu->arch.exit_timing_lock);
@@ -313,6 +341,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
#endif
kvmppc_core_vcpu_load(vcpu, cpu);
+ vcpu->cpu = smp_processor_id();
}
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
@@ -321,6 +350,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
#ifdef CONFIG_BOOKE
vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
#endif
+ vcpu->cpu = -1;
}
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
@@ -492,15 +522,18 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
for (i = 0; i < 32; i++)
kvmppc_set_gpr(vcpu, i, gprs[i]);
vcpu->arch.osi_needed = 0;
+ } else if (vcpu->arch.hcall_needed) {
+ int i;
+
+ kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
+ for (i = 0; i < 9; ++i)
+ kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
+ vcpu->arch.hcall_needed = 0;
}
kvmppc_core_deliver_interrupts(vcpu);
- local_irq_disable();
- kvm_guest_enter();
- r = __kvmppc_vcpu_run(run, vcpu);
- kvm_guest_exit();
- local_irq_enable();
+ r = kvmppc_vcpu_run(run, vcpu);
if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &sigsaved, NULL);
@@ -518,6 +551,8 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
if (waitqueue_active(&vcpu->wq)) {
wake_up_interruptible(&vcpu->wq);
vcpu->stat.halt_wakeup++;
+ } else if (vcpu->cpu != -1) {
+ smp_send_reschedule(vcpu->cpu);
}
return 0;
@@ -633,6 +668,29 @@ long kvm_arch_vm_ioctl(struct file *filp,
break;
}
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+ case KVM_CREATE_SPAPR_TCE: {
+ struct kvm_create_spapr_tce create_tce;
+ struct kvm *kvm = filp->private_data;
+
+ r = -EFAULT;
+ if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
+ goto out;
+ r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce);
+ goto out;
+ }
+
+ case KVM_ALLOCATE_RMA: {
+ struct kvm *kvm = filp->private_data;
+ struct kvm_allocate_rma rma;
+
+ r = kvm_vm_ioctl_allocate_rma(kvm, &rma);
+ if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma)))
+ r = -EFAULT;
+ break;
+ }
+#endif /* CONFIG_KVM_BOOK3S_64_HV */
+
default:
r = -ENOTTY;
}
diff --git a/arch/powerpc/kvm/timing.c b/arch/powerpc/kvm/timing.c
index 319177df9587..07b6110a4bb7 100644
--- a/arch/powerpc/kvm/timing.c
+++ b/arch/powerpc/kvm/timing.c
@@ -56,15 +56,6 @@ static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type)
{
u64 old;
- do_div(duration, tb_ticks_per_usec);
- if (unlikely(duration > 0xFFFFFFFF)) {
- printk(KERN_ERR"%s - duration too big -> overflow"
- " duration %lld type %d exit #%d\n",
- __func__, duration, type,
- vcpu->arch.timing_count_type[type]);
- return;
- }
-
mutex_lock(&vcpu->arch.exit_timing_lock);
vcpu->arch.timing_count_type[type]++;
diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h
index 3aca1b042b8c..b135d3d397db 100644
--- a/arch/powerpc/kvm/trace.h
+++ b/arch/powerpc/kvm/trace.h
@@ -103,7 +103,7 @@ TRACE_EVENT(kvm_gtlb_write,
* Book3S trace points *
*************************************************************************/
-#ifdef CONFIG_PPC_BOOK3S
+#ifdef CONFIG_KVM_BOOK3S_PR
TRACE_EVENT(kvm_book3s_exit,
TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu),
@@ -252,7 +252,7 @@ TRACE_EVENT(kvm_book3s_mmu_flush,
),
TP_fast_assign(
- __entry->count = vcpu->arch.hpte_cache_count;
+ __entry->count = to_book3s(vcpu)->hpte_cache_count;
__entry->p1 = p1;
__entry->p2 = p2;
__entry->type = type;
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index dfd764896db0..90039bc64119 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -37,7 +37,7 @@
#define HPTE_LOCK_BIT 3
-static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
+DEFINE_RAW_SPINLOCK(native_tlbie_lock);
static inline void __tlbie(unsigned long va, int psize, int ssize)
{
@@ -51,7 +51,7 @@ static inline void __tlbie(unsigned long va, int psize, int ssize)
va &= ~0xffful;
va |= ssize << 8;
asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
- : : "r" (va), "r"(0), "i" (CPU_FTR_HVMODE_206)
+ : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
: "memory");
break;
default:
@@ -61,7 +61,7 @@ static inline void __tlbie(unsigned long va, int psize, int ssize)
va |= ssize << 8;
va |= 1; /* L */
asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
- : : "r" (va), "r"(0), "i" (CPU_FTR_HVMODE_206)
+ : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
: "memory");
break;
}
diff --git a/arch/powerpc/platforms/iseries/exception.S b/arch/powerpc/platforms/iseries/exception.S
index 29c02f36b32f..f519ee17ff7d 100644
--- a/arch/powerpc/platforms/iseries/exception.S
+++ b/arch/powerpc/platforms/iseries/exception.S
@@ -167,7 +167,7 @@ BEGIN_FTR_SECTION
std r12,PACA_EXGEN+EX_R13(r13)
EXCEPTION_PROLOG_ISERIES_1
FTR_SECTION_ELSE
- EXCEPTION_PROLOG_1(PACA_EXGEN)
+ EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0)
EXCEPTION_PROLOG_ISERIES_1
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_SLB)
b data_access_common
diff --git a/arch/powerpc/platforms/iseries/exception.h b/arch/powerpc/platforms/iseries/exception.h
index bae3fba5ad8e..50271b550a99 100644
--- a/arch/powerpc/platforms/iseries/exception.h
+++ b/arch/powerpc/platforms/iseries/exception.h
@@ -39,7 +39,7 @@
label##_iSeries: \
HMT_MEDIUM; \
mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \
- EXCEPTION_PROLOG_1(area); \
+ EXCEPTION_PROLOG_1(area, NOTEST, 0); \
EXCEPTION_PROLOG_ISERIES_1; \
b label##_common
@@ -48,7 +48,7 @@ label##_iSeries: \
label##_iSeries: \
HMT_MEDIUM; \
mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \
- EXCEPTION_PROLOG_1(PACA_EXGEN); \
+ EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0); \
lbz r10,PACASOFTIRQEN(r13); \
cmpwi 0,r10,0; \
beq- label##_iSeries_masked; \
diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c
index 1f15ad436140..ba382b59b926 100644
--- a/arch/powerpc/sysdev/xics/icp-native.c
+++ b/arch/powerpc/sysdev/xics/icp-native.c
@@ -17,6 +17,7 @@
#include <linux/cpu.h>
#include <linux/of.h>
#include <linux/spinlock.h>
+#include <linux/module.h>
#include <asm/prom.h>
#include <asm/io.h>
@@ -24,6 +25,7 @@
#include <asm/irq.h>
#include <asm/errno.h>
#include <asm/xics.h>
+#include <asm/kvm_ppc.h>
struct icp_ipl {
union {
@@ -139,6 +141,12 @@ static void icp_native_cause_ipi(int cpu, unsigned long data)
icp_native_set_qirr(cpu, IPI_PRIORITY);
}
+void xics_wake_cpu(int cpu)
+{
+ icp_native_set_qirr(cpu, IPI_PRIORITY);
+}
+EXPORT_SYMBOL_GPL(xics_wake_cpu);
+
static irqreturn_t icp_native_ipi_action(int irq, void *dev_id)
{
int cpu = smp_processor_id();
@@ -185,6 +193,7 @@ static int __init icp_native_map_one_cpu(int hw_id, unsigned long addr,
}
icp_native_regs[cpu] = ioremap(addr, size);
+ kvmppc_set_xics_phys(cpu, addr);
if (!icp_native_regs[cpu]) {
pr_warning("icp_native: Failed ioremap for CPU %d, "
"interrupt server #0x%x, addr %#lx\n",
diff --git a/arch/s390/boot/compressed/head31.S b/arch/s390/boot/compressed/head31.S
index 2a5523a32bcc..e8c9e18b8039 100644
--- a/arch/s390/boot/compressed/head31.S
+++ b/arch/s390/boot/compressed/head31.S
@@ -7,14 +7,14 @@
*/
#include <linux/init.h>
+#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/page.h>
#include "sizes.h"
__HEAD
- .globl startup_continue
-startup_continue:
+ENTRY(startup_continue)
basr %r13,0 # get base
.LPG1:
# setup stack
diff --git a/arch/s390/boot/compressed/head64.S b/arch/s390/boot/compressed/head64.S
index 2982cb140550..f86a4eef28a9 100644
--- a/arch/s390/boot/compressed/head64.S
+++ b/arch/s390/boot/compressed/head64.S
@@ -7,14 +7,14 @@
*/
#include <linux/init.h>
+#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/page.h>
#include "sizes.h"
__HEAD
- .globl startup_continue
-startup_continue:
+ENTRY(startup_continue)
basr %r13,0 # get base
.LPG1:
# setup stack
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
index 5ed8d64fc2ed..0317a3547cb9 100644
--- a/arch/s390/crypto/sha256_s390.c
+++ b/arch/s390/crypto/sha256_s390.c
@@ -1,15 +1,12 @@
/*
* Cryptographic API.
*
- * s390 implementation of the SHA256 Secure Hash Algorithm.
+ * s390 implementation of the SHA256 and SHA224 Secure Hash Algorithm.
*
* s390 Version:
- * Copyright IBM Corp. 2005,2007
+ * Copyright IBM Corp. 2005,2011
* Author(s): Jan Glauber (jang@de.ibm.com)
*
- * Derived from "crypto/sha256_generic.c"
- * and "arch/s390/crypto/sha1_s390.c"
- *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
@@ -65,7 +62,7 @@ static int sha256_import(struct shash_desc *desc, const void *in)
return 0;
}
-static struct shash_alg alg = {
+static struct shash_alg sha256_alg = {
.digestsize = SHA256_DIGEST_SIZE,
.init = sha256_init,
.update = s390_sha_update,
@@ -84,22 +81,69 @@ static struct shash_alg alg = {
}
};
-static int sha256_s390_init(void)
+static int sha224_init(struct shash_desc *desc)
{
+ struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
+
+ sctx->state[0] = SHA224_H0;
+ sctx->state[1] = SHA224_H1;
+ sctx->state[2] = SHA224_H2;
+ sctx->state[3] = SHA224_H3;
+ sctx->state[4] = SHA224_H4;
+ sctx->state[5] = SHA224_H5;
+ sctx->state[6] = SHA224_H6;
+ sctx->state[7] = SHA224_H7;
+ sctx->count = 0;
+ sctx->func = KIMD_SHA_256;
+
+ return 0;
+}
+
+static struct shash_alg sha224_alg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .init = sha224_init,
+ .update = s390_sha_update,
+ .final = s390_sha_final,
+ .export = sha256_export,
+ .import = sha256_import,
+ .descsize = sizeof(struct s390_sha_ctx),
+ .statesize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "sha224",
+ .cra_driver_name= "sha224-s390",
+ .cra_priority = CRYPT_S390_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+static int __init sha256_s390_init(void)
+{
+ int ret;
+
if (!crypt_s390_func_available(KIMD_SHA_256, CRYPT_S390_MSA))
return -EOPNOTSUPP;
-
- return crypto_register_shash(&alg);
+ ret = crypto_register_shash(&sha256_alg);
+ if (ret < 0)
+ goto out;
+ ret = crypto_register_shash(&sha224_alg);
+ if (ret < 0)
+ crypto_unregister_shash(&sha256_alg);
+out:
+ return ret;
}
static void __exit sha256_s390_fini(void)
{
- crypto_unregister_shash(&alg);
+ crypto_unregister_shash(&sha224_alg);
+ crypto_unregister_shash(&sha256_alg);
}
module_init(sha256_s390_init);
module_exit(sha256_s390_fini);
MODULE_ALIAS("sha256");
+MODULE_ALIAS("sha224");
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm");
+MODULE_DESCRIPTION("SHA256 and SHA224 Secure Hash Algorithm");
diff --git a/arch/s390/include/asm/irqflags.h b/arch/s390/include/asm/irqflags.h
index 865d6d891ace..38fdf451febb 100644
--- a/arch/s390/include/asm/irqflags.h
+++ b/arch/s390/include/asm/irqflags.h
@@ -29,42 +29,42 @@
})
/* set system mask. */
-static inline void __arch_local_irq_ssm(unsigned long flags)
+static inline notrace void __arch_local_irq_ssm(unsigned long flags)
{
asm volatile("ssm %0" : : "Q" (flags) : "memory");
}
-static inline unsigned long arch_local_save_flags(void)
+static inline notrace unsigned long arch_local_save_flags(void)
{
return __arch_local_irq_stosm(0x00);
}
-static inline unsigned long arch_local_irq_save(void)
+static inline notrace unsigned long arch_local_irq_save(void)
{
return __arch_local_irq_stnsm(0xfc);
}
-static inline void arch_local_irq_disable(void)
+static inline notrace void arch_local_irq_disable(void)
{
arch_local_irq_save();
}
-static inline void arch_local_irq_enable(void)
+static inline notrace void arch_local_irq_enable(void)
{
__arch_local_irq_stosm(0x03);
}
-static inline void arch_local_irq_restore(unsigned long flags)
+static inline notrace void arch_local_irq_restore(unsigned long flags)
{
__arch_local_irq_ssm(flags);
}
-static inline bool arch_irqs_disabled_flags(unsigned long flags)
+static inline notrace bool arch_irqs_disabled_flags(unsigned long flags)
{
return !(flags & (3UL << (BITS_PER_LONG - 8)));
}
-static inline bool arch_irqs_disabled(void)
+static inline notrace bool arch_irqs_disabled(void)
{
return arch_irqs_disabled_flags(arch_local_save_flags());
}
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index cef7dbf69dfc..00ff00dfb24c 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -93,9 +93,7 @@ struct kvm_s390_sie_block {
__u32 scaol; /* 0x0064 */
__u8 reserved68[4]; /* 0x0068 */
__u32 todpr; /* 0x006c */
- __u8 reserved70[16]; /* 0x0070 */
- __u64 gmsor; /* 0x0080 */
- __u64 gmslm; /* 0x0088 */
+ __u8 reserved70[32]; /* 0x0070 */
psw_t gpsw; /* 0x0090 */
__u64 gg14; /* 0x00a0 */
__u64 gg15; /* 0x00a8 */
@@ -138,6 +136,7 @@ struct kvm_vcpu_stat {
u32 instruction_chsc;
u32 instruction_stsi;
u32 instruction_stfl;
+ u32 instruction_tprot;
u32 instruction_sigp_sense;
u32 instruction_sigp_emergency;
u32 instruction_sigp_stop;
@@ -175,6 +174,10 @@ struct kvm_s390_prefix_info {
__u32 address;
};
+struct kvm_s390_emerg_info {
+ __u16 code;
+};
+
struct kvm_s390_interrupt_info {
struct list_head list;
u64 type;
@@ -182,6 +185,7 @@ struct kvm_s390_interrupt_info {
struct kvm_s390_io_info io;
struct kvm_s390_ext_info ext;
struct kvm_s390_pgm_info pgm;
+ struct kvm_s390_emerg_info emerg;
struct kvm_s390_prefix_info prefix;
};
};
@@ -226,6 +230,7 @@ struct kvm_vcpu_arch {
struct cpuid cpu_id;
u64 stidp_data;
};
+ struct gmap *gmap;
};
struct kvm_vm_stat {
@@ -236,6 +241,7 @@ struct kvm_arch{
struct sca_block *sca;
debug_info_t *dbf;
struct kvm_s390_float_interrupt float_int;
+ struct gmap *gmap;
};
extern int sie64a(struct kvm_s390_sie_block *, unsigned long *);
diff --git a/arch/s390/include/asm/linkage.h b/arch/s390/include/asm/linkage.h
index 291c2d01c44f..fc8a8284778e 100644
--- a/arch/s390/include/asm/linkage.h
+++ b/arch/s390/include/asm/linkage.h
@@ -1,6 +1,9 @@
#ifndef __ASM_LINKAGE_H
#define __ASM_LINKAGE_H
-/* Nothing to see here... */
+#include <linux/stringify.h>
+
+#define __ALIGN .align 4, 0x07
+#define __ALIGN_STR __stringify(__ALIGN)
#endif
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 228cf0b295db..f26280d9e88d 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -268,7 +268,7 @@ struct _lowcore {
__u64 vdso_per_cpu_data; /* 0x0358 */
__u64 machine_flags; /* 0x0360 */
__u64 ftrace_func; /* 0x0368 */
- __u64 sie_hook; /* 0x0370 */
+ __u64 gmap; /* 0x0370 */
__u64 cmf_hpp; /* 0x0378 */
/* Interrupt response block. */
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index 82d0847896a0..4506791adcd5 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -6,6 +6,7 @@ typedef struct {
unsigned int flush_mm;
spinlock_t list_lock;
struct list_head pgtable_list;
+ struct list_head gmap_list;
unsigned long asce_bits;
unsigned long asce_limit;
unsigned long vdso_base;
@@ -17,6 +18,7 @@ typedef struct {
#define INIT_MM_CONTEXT(name) \
.context.list_lock = __SPIN_LOCK_UNLOCKED(name.context.list_lock), \
- .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list),
+ .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \
+ .context.gmap_list = LIST_HEAD_INIT(name.context.gmap_list),
#endif
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index 38e71ebcd3c2..8eef9b5b3cf4 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -20,7 +20,7 @@
unsigned long *crst_table_alloc(struct mm_struct *);
void crst_table_free(struct mm_struct *, unsigned long *);
-unsigned long *page_table_alloc(struct mm_struct *);
+unsigned long *page_table_alloc(struct mm_struct *, unsigned long);
void page_table_free(struct mm_struct *, unsigned long *);
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
void page_table_free_rcu(struct mmu_gather *, unsigned long *);
@@ -115,6 +115,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
spin_lock_init(&mm->context.list_lock);
INIT_LIST_HEAD(&mm->context.pgtable_list);
+ INIT_LIST_HEAD(&mm->context.gmap_list);
return (pgd_t *) crst_table_alloc(mm);
}
#define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd)
@@ -133,8 +134,8 @@ static inline void pmd_populate(struct mm_struct *mm,
/*
* page table entry allocation/free routines.
*/
-#define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
-#define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
+#define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm, vmaddr))
+#define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm, vmaddr))
#define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
#define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 801fbe1d837d..519eb5f187ef 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -654,6 +654,48 @@ static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste)
#endif
}
+/**
+ * struct gmap_struct - guest address space
+ * @mm: pointer to the parent mm_struct
+ * @table: pointer to the page directory
+ * @crst_list: list of all crst tables used in the guest address space
+ */
+struct gmap {
+ struct list_head list;
+ struct mm_struct *mm;
+ unsigned long *table;
+ struct list_head crst_list;
+};
+
+/**
+ * struct gmap_rmap - reverse mapping for segment table entries
+ * @next: pointer to the next gmap_rmap structure in the list
+ * @entry: pointer to a segment table entry
+ */
+struct gmap_rmap {
+ struct list_head list;
+ unsigned long *entry;
+};
+
+/**
+ * struct gmap_pgtable - gmap information attached to a page table
+ * @vmaddr: address of the 1MB segment in the process virtual memory
+ * @mapper: list of segment table entries maping a page table
+ */
+struct gmap_pgtable {
+ unsigned long vmaddr;
+ struct list_head mapper;
+};
+
+struct gmap *gmap_alloc(struct mm_struct *mm);
+void gmap_free(struct gmap *gmap);
+void gmap_enable(struct gmap *gmap);
+void gmap_disable(struct gmap *gmap);
+int gmap_map_segment(struct gmap *gmap, unsigned long from,
+ unsigned long to, unsigned long length);
+int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
+unsigned long gmap_fault(unsigned long address, struct gmap *);
+
/*
* Certain architectures need to do special things when PTEs
* within a page table are directly modified. Thus, the following
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 1300c3025334..55dfcc8bdc0d 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -80,6 +80,7 @@ struct thread_struct {
mm_segment_t mm_segment;
unsigned long prot_addr; /* address of protection-excep. */
unsigned int trap_no;
+ unsigned long gmap_addr; /* address of last gmap fault. */
struct per_regs per_user; /* User specified PER registers */
struct per_event per_event; /* Cause of the last PER trap */
/* pfault_wait is used to block the process on a pfault event */
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index ad1382f7932e..1a5dbb6f1495 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -94,6 +94,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */
#define TIF_SECCOMP 10 /* secure computing */
#define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
+#define TIF_SIE 12 /* guest execution active */
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling
TIF_NEED_RESCHED */
#define TIF_31BIT 17 /* 32bit process */
@@ -113,6 +114,7 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1<<TIF_SECCOMP)
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
+#define _TIF_SIE (1<<TIF_SIE)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_31BIT (1<<TIF_31BIT)
#define _TIF_SINGLE_STEP (1<<TIF_FREEZE)
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index b7a4f2eb0057..304445382382 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -80,7 +80,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
* on all cpus instead of doing a local flush if the mm
* only ran on the local cpu.
*/
- if (MACHINE_HAS_IDTE)
+ if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list))
__tlb_flush_idte((unsigned long) mm->pgd |
mm->context.asce_bits);
else
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index edfbd17d7082..05d8f38734ec 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -151,7 +151,7 @@ int main(void)
DEFINE(__LC_FP_CREG_SAVE_AREA, offsetof(struct _lowcore, fpt_creg_save_area));
DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr));
DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data));
- DEFINE(__LC_SIE_HOOK, offsetof(struct _lowcore, sie_hook));
+ DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap));
DEFINE(__LC_CMF_HPP, offsetof(struct _lowcore, cmf_hpp));
#endif /* CONFIG_32BIT */
return 0;
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S
index 15e46ca94335..209938c1dfc8 100644
--- a/arch/s390/kernel/base.S
+++ b/arch/s390/kernel/base.S
@@ -6,13 +6,13 @@
* Michael Holzheu <holzheu@de.ibm.com>
*/
+#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
#ifdef CONFIG_64BIT
- .globl s390_base_mcck_handler
-s390_base_mcck_handler:
+ENTRY(s390_base_mcck_handler)
basr %r13,0
0: lg %r15,__LC_PANIC_STACK # load panic stack
aghi %r15,-STACK_FRAME_OVERHEAD
@@ -26,13 +26,13 @@ s390_base_mcck_handler:
lpswe __LC_MCK_OLD_PSW
.section .bss
+ .align 8
.globl s390_base_mcck_handler_fn
s390_base_mcck_handler_fn:
.quad 0
.previous
- .globl s390_base_ext_handler
-s390_base_ext_handler:
+ENTRY(s390_base_ext_handler)
stmg %r0,%r15,__LC_SAVE_AREA
basr %r13,0
0: aghi %r15,-STACK_FRAME_OVERHEAD
@@ -46,13 +46,13 @@ s390_base_ext_handler:
lpswe __LC_EXT_OLD_PSW
.section .bss
+ .align 8
.globl s390_base_ext_handler_fn
s390_base_ext_handler_fn:
.quad 0
.previous
- .globl s390_base_pgm_handler
-s390_base_pgm_handler:
+ENTRY(s390_base_pgm_handler)
stmg %r0,%r15,__LC_SAVE_AREA
basr %r13,0
0: aghi %r15,-STACK_FRAME_OVERHEAD
@@ -70,6 +70,7 @@ disabled_wait_psw:
.quad 0x0002000180000000,0x0000000000000000 + s390_base_pgm_handler
.section .bss
+ .align 8
.globl s390_base_pgm_handler_fn
s390_base_pgm_handler_fn:
.quad 0
@@ -77,8 +78,7 @@ s390_base_pgm_handler_fn:
#else /* CONFIG_64BIT */
- .globl s390_base_mcck_handler
-s390_base_mcck_handler:
+ENTRY(s390_base_mcck_handler)
basr %r13,0
0: l %r15,__LC_PANIC_STACK # load panic stack
ahi %r15,-STACK_FRAME_OVERHEAD
@@ -93,13 +93,13 @@ s390_base_mcck_handler:
2: .long s390_base_mcck_handler_fn
.section .bss
+ .align 4
.globl s390_base_mcck_handler_fn
s390_base_mcck_handler_fn:
.long 0
.previous
- .globl s390_base_ext_handler
-s390_base_ext_handler:
+ENTRY(s390_base_ext_handler)
stm %r0,%r15,__LC_SAVE_AREA
basr %r13,0
0: ahi %r15,-STACK_FRAME_OVERHEAD
@@ -115,13 +115,13 @@ s390_base_ext_handler:
2: .long s390_base_ext_handler_fn
.section .bss
+ .align 4
.globl s390_base_ext_handler_fn
s390_base_ext_handler_fn:
.long 0
.previous
- .globl s390_base_pgm_handler
-s390_base_pgm_handler:
+ENTRY(s390_base_pgm_handler)
stm %r0,%r15,__LC_SAVE_AREA
basr %r13,0
0: ahi %r15,-STACK_FRAME_OVERHEAD
@@ -142,6 +142,7 @@ disabled_wait_psw:
.long 0x000a0000,0x00000000 + s390_base_pgm_handler
.section .bss
+ .align 4
.globl s390_base_pgm_handler_fn
s390_base_pgm_handler_fn:
.long 0
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index 1f5eb789c3a7..08ab9aa6a0d5 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -7,86 +7,74 @@
* Thomas Spatzier (tspat@de.ibm.com)
*/
- .globl sys32_exit_wrapper
-sys32_exit_wrapper:
+#include <linux/linkage.h>
+
+ENTRY(sys32_exit_wrapper)
lgfr %r2,%r2 # int
jg sys_exit # branch to sys_exit
- .globl sys32_read_wrapper
-sys32_read_wrapper:
+ENTRY(sys32_read_wrapper)
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # char *
llgfr %r4,%r4 # size_t
jg sys32_read # branch to sys_read
- .globl sys32_write_wrapper
-sys32_write_wrapper:
+ENTRY(sys32_write_wrapper)
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # const char *
llgfr %r4,%r4 # size_t
jg sys32_write # branch to system call
- .globl sys32_open_wrapper
-sys32_open_wrapper:
+ENTRY(sys32_open_wrapper)
llgtr %r2,%r2 # const char *
lgfr %r3,%r3 # int
lgfr %r4,%r4 # int
jg sys_open # branch to system call
- .globl sys32_close_wrapper
-sys32_close_wrapper:
+ENTRY(sys32_close_wrapper)
llgfr %r2,%r2 # unsigned int
jg sys_close # branch to system call
- .globl sys32_creat_wrapper
-sys32_creat_wrapper:
+ENTRY(sys32_creat_wrapper)
llgtr %r2,%r2 # const char *
lgfr %r3,%r3 # int
jg sys_creat # branch to system call
- .globl sys32_link_wrapper
-sys32_link_wrapper:
+ENTRY(sys32_link_wrapper)
llgtr %r2,%r2 # const char *
llgtr %r3,%r3 # const char *
jg sys_link # branch to system call
- .globl sys32_unlink_wrapper
-sys32_unlink_wrapper:
+ENTRY(sys32_unlink_wrapper)
llgtr %r2,%r2 # const char *
jg sys_unlink # branch to system call
- .globl sys32_chdir_wrapper
-sys32_chdir_wrapper:
+ENTRY(sys32_chdir_wrapper)
llgtr %r2,%r2 # const char *
jg sys_chdir # branch to system call
- .globl sys32_time_wrapper
-sys32_time_wrapper:
+ENTRY(sys32_time_wrapper)
llgtr %r2,%r2 # int *
jg compat_sys_time # branch to system call
- .globl sys32_mknod_wrapper
-sys32_mknod_wrapper:
+ENTRY(sys32_mknod_wrapper)
llgtr %r2,%r2 # const char *
lgfr %r3,%r3 # int
llgfr %r4,%r4 # dev
jg sys_mknod # branch to system call
- .globl sys32_chmod_wrapper
-sys32_chmod_wrapper:
+ENTRY(sys32_chmod_wrapper)
llgtr %r2,%r2 # const char *
llgfr %r3,%r3 # mode_t
jg sys_chmod # branch to system call
- .globl sys32_lchown16_wrapper
-sys32_lchown16_wrapper:
+ENTRY(sys32_lchown16_wrapper)
llgtr %r2,%r2 # const char *
llgfr %r3,%r3 # __kernel_old_uid_emu31_t
llgfr %r4,%r4 # __kernel_old_uid_emu31_t
jg sys32_lchown16 # branch to system call
- .globl sys32_lseek_wrapper
-sys32_lseek_wrapper:
+ENTRY(sys32_lseek_wrapper)
llgfr %r2,%r2 # unsigned int
lgfr %r3,%r3 # off_t
llgfr %r4,%r4 # unsigned int
@@ -94,8 +82,7 @@ sys32_lseek_wrapper:
#sys32_getpid_wrapper # void
- .globl sys32_mount_wrapper
-sys32_mount_wrapper:
+ENTRY(sys32_mount_wrapper)
llgtr %r2,%r2 # char *
llgtr %r3,%r3 # char *
llgtr %r4,%r4 # char *
@@ -103,102 +90,85 @@ sys32_mount_wrapper:
llgtr %r6,%r6 # void *
jg compat_sys_mount # branch to system call
- .globl sys32_oldumount_wrapper
-sys32_oldumount_wrapper:
+ENTRY(sys32_oldumount_wrapper)
llgtr %r2,%r2 # char *
jg sys_oldumount # branch to system call
- .globl sys32_setuid16_wrapper
-sys32_setuid16_wrapper:
+ENTRY(sys32_setuid16_wrapper)
llgfr %r2,%r2 # __kernel_old_uid_emu31_t
jg sys32_setuid16 # branch to system call
#sys32_getuid16_wrapper # void
- .globl sys32_ptrace_wrapper
-sys32_ptrace_wrapper:
+ENTRY(sys32_ptrace_wrapper)
lgfr %r2,%r2 # long
lgfr %r3,%r3 # long
llgtr %r4,%r4 # long
llgfr %r5,%r5 # long
jg compat_sys_ptrace # branch to system call
- .globl sys32_alarm_wrapper
-sys32_alarm_wrapper:
+ENTRY(sys32_alarm_wrapper)
llgfr %r2,%r2 # unsigned int
jg sys_alarm # branch to system call
- .globl compat_sys_utime_wrapper
-compat_sys_utime_wrapper:
+ENTRY(compat_sys_utime_wrapper)
llgtr %r2,%r2 # char *
llgtr %r3,%r3 # struct compat_utimbuf *
jg compat_sys_utime # branch to system call
- .globl sys32_access_wrapper
-sys32_access_wrapper:
+ENTRY(sys32_access_wrapper)
llgtr %r2,%r2 # const char *
lgfr %r3,%r3 # int
jg sys_access # branch to system call
- .globl sys32_nice_wrapper
-sys32_nice_wrapper:
+ENTRY(sys32_nice_wrapper)
lgfr %r2,%r2 # int
jg sys_nice # branch to system call
#sys32_sync_wrapper # void
- .globl sys32_kill_wrapper
-sys32_kill_wrapper:
+ENTRY(sys32_kill_wrapper)
lgfr %r2,%r2 # int
lgfr %r3,%r3 # int
jg sys_kill # branch to system call
- .globl sys32_rename_wrapper
-sys32_rename_wrapper:
+ENTRY(sys32_rename_wrapper)
llgtr %r2,%r2 # const char *
llgtr %r3,%r3 # const char *
jg sys_rename # branch to system call
- .globl sys32_mkdir_wrapper
-sys32_mkdir_wrapper:
+ENTRY(sys32_mkdir_wrapper)
llgtr %r2,%r2 # const char *
lgfr %r3,%r3 # int
jg sys_mkdir # branch to system call
- .globl sys32_rmdir_wrapper
-sys32_rmdir_wrapper:
+ENTRY(sys32_rmdir_wrapper)
llgtr %r2,%r2 # const char *
jg sys_rmdir # branch to system call
- .globl sys32_dup_wrapper
-sys32_dup_wrapper:
+ENTRY(sys32_dup_wrapper)
llgfr %r2,%r2 # unsigned int
jg sys_dup # branch to system call
- .globl sys32_pipe_wrapper
-sys32_pipe_wrapper:
+ENTRY(sys32_pipe_wrapper)
llgtr %r2,%r2 # u32 *
jg sys_pipe # branch to system call
- .globl compat_sys_times_wrapper
-compat_sys_times_wrapper:
+ENTRY(compat_sys_times_wrapper)
llgtr %r2,%r2 # struct compat_tms *
jg compat_sys_times # branch to system call
- .globl sys32_brk_wrapper
-sys32_brk_wrapper:
+ENTRY(sys32_brk_wrapper)
llgtr %r2,%r2 # unsigned long
jg sys_brk # branch to system call
- .globl sys32_setgid16_wrapper
-sys32_setgid16_wrapper:
+ENTRY(sys32_setgid16_wrapper)
llgfr %r2,%r2 # __kernel_old_gid_emu31_t
jg sys32_setgid16 # branch to system call
#sys32_getgid16_wrapper # void
- .globl sys32_signal_wrapper
-sys32_signal_wrapper:
+ENTRY(sys32_signal_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # __sighandler_t
jg sys_signal
@@ -207,55 +177,46 @@ sys32_signal_wrapper:
#sys32_getegid16_wrapper # void
- .globl sys32_acct_wrapper
-sys32_acct_wrapper:
+ENTRY(sys32_acct_wrapper)
llgtr %r2,%r2 # char *
jg sys_acct # branch to system call
- .globl sys32_umount_wrapper
-sys32_umount_wrapper:
+ENTRY(sys32_umount_wrapper)
llgtr %r2,%r2 # char *
lgfr %r3,%r3 # int
jg sys_umount # branch to system call
- .globl compat_sys_ioctl_wrapper
-compat_sys_ioctl_wrapper:
+ENTRY(compat_sys_ioctl_wrapper)
llgfr %r2,%r2 # unsigned int
llgfr %r3,%r3 # unsigned int
llgfr %r4,%r4 # unsigned int
jg compat_sys_ioctl # branch to system call
- .globl compat_sys_fcntl_wrapper
-compat_sys_fcntl_wrapper:
+ENTRY(compat_sys_fcntl_wrapper)
llgfr %r2,%r2 # unsigned int
llgfr %r3,%r3 # unsigned int
llgfr %r4,%r4 # unsigned long
jg compat_sys_fcntl # branch to system call
- .globl sys32_setpgid_wrapper
-sys32_setpgid_wrapper:
+ENTRY(sys32_setpgid_wrapper)
lgfr %r2,%r2 # pid_t
lgfr %r3,%r3 # pid_t
jg sys_setpgid # branch to system call
- .globl sys32_umask_wrapper
-sys32_umask_wrapper:
+ENTRY(sys32_umask_wrapper)
lgfr %r2,%r2 # int
jg sys_umask # branch to system call
- .globl sys32_chroot_wrapper
-sys32_chroot_wrapper:
+ENTRY(sys32_chroot_wrapper)
llgtr %r2,%r2 # char *
jg sys_chroot # branch to system call
- .globl sys32_ustat_wrapper
-sys32_ustat_wrapper:
+ENTRY(sys32_ustat_wrapper)
llgfr %r2,%r2 # dev_t
llgtr %r3,%r3 # struct ustat *
jg compat_sys_ustat
- .globl sys32_dup2_wrapper
-sys32_dup2_wrapper:
+ENTRY(sys32_dup2_wrapper)
llgfr %r2,%r2 # unsigned int
llgfr %r3,%r3 # unsigned int
jg sys_dup2 # branch to system call
@@ -266,262 +227,220 @@ sys32_dup2_wrapper:
#sys32_setsid_wrapper # void
- .globl sys32_sigaction_wrapper
-sys32_sigaction_wrapper:
+ENTRY(sys32_sigaction_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # const struct old_sigaction *
llgtr %r4,%r4 # struct old_sigaction32 *
jg sys32_sigaction # branch to system call
- .globl sys32_setreuid16_wrapper
-sys32_setreuid16_wrapper:
+ENTRY(sys32_setreuid16_wrapper)
llgfr %r2,%r2 # __kernel_old_uid_emu31_t
llgfr %r3,%r3 # __kernel_old_uid_emu31_t
jg sys32_setreuid16 # branch to system call
- .globl sys32_setregid16_wrapper
-sys32_setregid16_wrapper:
+ENTRY(sys32_setregid16_wrapper)
llgfr %r2,%r2 # __kernel_old_gid_emu31_t
llgfr %r3,%r3 # __kernel_old_gid_emu31_t
jg sys32_setregid16 # branch to system call
- .globl sys_sigsuspend_wrapper
-sys_sigsuspend_wrapper:
+ENTRY(sys_sigsuspend_wrapper)
lgfr %r2,%r2 # int
lgfr %r3,%r3 # int
llgfr %r4,%r4 # old_sigset_t
jg sys_sigsuspend
- .globl compat_sys_sigpending_wrapper
-compat_sys_sigpending_wrapper:
+ENTRY(compat_sys_sigpending_wrapper)
llgtr %r2,%r2 # compat_old_sigset_t *
jg compat_sys_sigpending # branch to system call
- .globl sys32_sethostname_wrapper
-sys32_sethostname_wrapper:
+ENTRY(sys32_sethostname_wrapper)
llgtr %r2,%r2 # char *
lgfr %r3,%r3 # int
jg sys_sethostname # branch to system call
- .globl compat_sys_setrlimit_wrapper
-compat_sys_setrlimit_wrapper:
+ENTRY(compat_sys_setrlimit_wrapper)
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # struct rlimit_emu31 *
jg compat_sys_setrlimit # branch to system call
- .globl compat_sys_old_getrlimit_wrapper
-compat_sys_old_getrlimit_wrapper:
+ENTRY(compat_sys_old_getrlimit_wrapper)
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # struct rlimit_emu31 *
jg compat_sys_old_getrlimit # branch to system call
- .globl compat_sys_getrlimit_wrapper
-compat_sys_getrlimit_wrapper:
+ENTRY(compat_sys_getrlimit_wrapper)
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # struct rlimit_emu31 *
jg compat_sys_getrlimit # branch to system call
- .globl sys32_mmap2_wrapper
-sys32_mmap2_wrapper:
+ENTRY(sys32_mmap2_wrapper)
llgtr %r2,%r2 # struct mmap_arg_struct_emu31 *
jg sys32_mmap2 # branch to system call
- .globl compat_sys_getrusage_wrapper
-compat_sys_getrusage_wrapper:
+ENTRY(compat_sys_getrusage_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # struct rusage_emu31 *
jg compat_sys_getrusage # branch to system call
- .globl compat_sys_gettimeofday_wrapper
-compat_sys_gettimeofday_wrapper:
+ENTRY(compat_sys_gettimeofday_wrapper)
llgtr %r2,%r2 # struct timeval_emu31 *
llgtr %r3,%r3 # struct timezone *
jg compat_sys_gettimeofday # branch to system call
- .globl compat_sys_settimeofday_wrapper
-compat_sys_settimeofday_wrapper:
+ENTRY(compat_sys_settimeofday_wrapper)
llgtr %r2,%r2 # struct timeval_emu31 *
llgtr %r3,%r3 # struct timezone *
jg compat_sys_settimeofday # branch to system call
- .globl sys32_getgroups16_wrapper
-sys32_getgroups16_wrapper:
+ENTRY(sys32_getgroups16_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # __kernel_old_gid_emu31_t *
jg sys32_getgroups16 # branch to system call
- .globl sys32_setgroups16_wrapper
-sys32_setgroups16_wrapper:
+ENTRY(sys32_setgroups16_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # __kernel_old_gid_emu31_t *
jg sys32_setgroups16 # branch to system call
- .globl sys32_symlink_wrapper
-sys32_symlink_wrapper:
+ENTRY(sys32_symlink_wrapper)
llgtr %r2,%r2 # const char *
llgtr %r3,%r3 # const char *
jg sys_symlink # branch to system call
- .globl sys32_readlink_wrapper
-sys32_readlink_wrapper:
+ENTRY(sys32_readlink_wrapper)
llgtr %r2,%r2 # const char *
llgtr %r3,%r3 # char *
lgfr %r4,%r4 # int
jg sys_readlink # branch to system call
- .globl sys32_uselib_wrapper
-sys32_uselib_wrapper:
+ENTRY(sys32_uselib_wrapper)
llgtr %r2,%r2 # const char *
jg sys_uselib # branch to system call
- .globl sys32_swapon_wrapper
-sys32_swapon_wrapper:
+ENTRY(sys32_swapon_wrapper)
llgtr %r2,%r2 # const char *
lgfr %r3,%r3 # int
jg sys_swapon # branch to system call
- .globl sys32_reboot_wrapper
-sys32_reboot_wrapper:
+ENTRY(sys32_reboot_wrapper)
lgfr %r2,%r2 # int
lgfr %r3,%r3 # int
llgfr %r4,%r4 # unsigned int
llgtr %r5,%r5 # void *
jg sys_reboot # branch to system call
- .globl old32_readdir_wrapper
-old32_readdir_wrapper:
+ENTRY(old32_readdir_wrapper)
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # void *
llgfr %r4,%r4 # unsigned int
jg compat_sys_old_readdir # branch to system call
- .globl old32_mmap_wrapper
-old32_mmap_wrapper:
+ENTRY(old32_mmap_wrapper)
llgtr %r2,%r2 # struct mmap_arg_struct_emu31 *
jg old32_mmap # branch to system call
- .globl sys32_munmap_wrapper
-sys32_munmap_wrapper:
+ENTRY(sys32_munmap_wrapper)
llgfr %r2,%r2 # unsigned long
llgfr %r3,%r3 # size_t
jg sys_munmap # branch to system call
- .globl sys32_truncate_wrapper
-sys32_truncate_wrapper:
+ENTRY(sys32_truncate_wrapper)
llgtr %r2,%r2 # const char *
lgfr %r3,%r3 # long
jg sys_truncate # branch to system call
- .globl sys32_ftruncate_wrapper
-sys32_ftruncate_wrapper:
+ENTRY(sys32_ftruncate_wrapper)
llgfr %r2,%r2 # unsigned int
llgfr %r3,%r3 # unsigned long
jg sys_ftruncate # branch to system call
- .globl sys32_fchmod_wrapper
-sys32_fchmod_wrapper:
+ENTRY(sys32_fchmod_wrapper)
llgfr %r2,%r2 # unsigned int
llgfr %r3,%r3 # mode_t
jg sys_fchmod # branch to system call
- .globl sys32_fchown16_wrapper
-sys32_fchown16_wrapper:
+ENTRY(sys32_fchown16_wrapper)
llgfr %r2,%r2 # unsigned int
llgfr %r3,%r3 # compat_uid_t
llgfr %r4,%r4 # compat_uid_t
jg sys32_fchown16 # branch to system call
- .globl sys32_getpriority_wrapper
-sys32_getpriority_wrapper:
+ENTRY(sys32_getpriority_wrapper)
lgfr %r2,%r2 # int
lgfr %r3,%r3 # int
jg sys_getpriority # branch to system call
- .globl sys32_setpriority_wrapper
-sys32_setpriority_wrapper:
+ENTRY(sys32_setpriority_wrapper)
lgfr %r2,%r2 # int
lgfr %r3,%r3 # int
lgfr %r4,%r4 # int
jg sys_setpriority # branch to system call
- .globl compat_sys_statfs_wrapper
-compat_sys_statfs_wrapper:
+ENTRY(compat_sys_statfs_wrapper)
llgtr %r2,%r2 # char *
llgtr %r3,%r3 # struct compat_statfs *
jg compat_sys_statfs # branch to system call
- .globl compat_sys_fstatfs_wrapper
-compat_sys_fstatfs_wrapper:
+ENTRY(compat_sys_fstatfs_wrapper)
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # struct compat_statfs *
jg compat_sys_fstatfs # branch to system call
- .globl compat_sys_socketcall_wrapper
-compat_sys_socketcall_wrapper:
+ENTRY(compat_sys_socketcall_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # u32 *
jg compat_sys_socketcall # branch to system call
- .globl sys32_syslog_wrapper
-sys32_syslog_wrapper:
+ENTRY(sys32_syslog_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # char *
lgfr %r4,%r4 # int
jg sys_syslog # branch to system call
- .globl compat_sys_setitimer_wrapper
-compat_sys_setitimer_wrapper:
+ENTRY(compat_sys_setitimer_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # struct itimerval_emu31 *
llgtr %r4,%r4 # struct itimerval_emu31 *
jg compat_sys_setitimer # branch to system call
- .globl compat_sys_getitimer_wrapper
-compat_sys_getitimer_wrapper:
+ENTRY(compat_sys_getitimer_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # struct itimerval_emu31 *
jg compat_sys_getitimer # branch to system call
- .globl compat_sys_newstat_wrapper
-compat_sys_newstat_wrapper:
+ENTRY(compat_sys_newstat_wrapper)
llgtr %r2,%r2 # char *
llgtr %r3,%r3 # struct stat_emu31 *
jg compat_sys_newstat # branch to system call
- .globl compat_sys_newlstat_wrapper
-compat_sys_newlstat_wrapper:
+ENTRY(compat_sys_newlstat_wrapper)
llgtr %r2,%r2 # char *
llgtr %r3,%r3 # struct stat_emu31 *
jg compat_sys_newlstat # branch to system call
- .globl compat_sys_newfstat_wrapper
-compat_sys_newfstat_wrapper:
+ENTRY(compat_sys_newfstat_wrapper)
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # struct stat_emu31 *
jg compat_sys_newfstat # branch to system call
#sys32_vhangup_wrapper # void
- .globl compat_sys_wait4_wrapper
-compat_sys_wait4_wrapper:
+ENTRY(compat_sys_wait4_wrapper)
lgfr %r2,%r2 # pid_t
llgtr %r3,%r3 # unsigned int *
lgfr %r4,%r4 # int
llgtr %r5,%r5 # struct rusage *
jg compat_sys_wait4 # branch to system call
- .globl sys32_swapoff_wrapper
-sys32_swapoff_wrapper:
+ENTRY(sys32_swapoff_wrapper)
llgtr %r2,%r2 # const char *
jg sys_swapoff # branch to system call
- .globl compat_sys_sysinfo_wrapper
-compat_sys_sysinfo_wrapper:
+ENTRY(compat_sys_sysinfo_wrapper)
llgtr %r2,%r2 # struct sysinfo_emu31 *
jg compat_sys_sysinfo # branch to system call
- .globl sys32_ipc_wrapper
-sys32_ipc_wrapper:
+ENTRY(sys32_ipc_wrapper)
llgfr %r2,%r2 # uint
lgfr %r3,%r3 # int
lgfr %r4,%r4 # int
@@ -529,8 +448,7 @@ sys32_ipc_wrapper:
llgfr %r6,%r6 # u32
jg sys32_ipc # branch to system call
- .globl sys32_fsync_wrapper
-sys32_fsync_wrapper:
+ENTRY(sys32_fsync_wrapper)
llgfr %r2,%r2 # unsigned int
jg sys_fsync # branch to system call
@@ -538,97 +456,81 @@ sys32_fsync_wrapper:
#sys32_clone_wrapper # done in clone_glue
- .globl sys32_setdomainname_wrapper
-sys32_setdomainname_wrapper:
+ENTRY(sys32_setdomainname_wrapper)
llgtr %r2,%r2 # char *
lgfr %r3,%r3 # int
jg sys_setdomainname # branch to system call
- .globl sys32_newuname_wrapper
-sys32_newuname_wrapper:
+ENTRY(sys32_newuname_wrapper)
llgtr %r2,%r2 # struct new_utsname *
jg sys_newuname # branch to system call
- .globl compat_sys_adjtimex_wrapper
-compat_sys_adjtimex_wrapper:
+ENTRY(compat_sys_adjtimex_wrapper)
llgtr %r2,%r2 # struct compat_timex *
jg compat_sys_adjtimex # branch to system call
- .globl sys32_mprotect_wrapper
-sys32_mprotect_wrapper:
+ENTRY(sys32_mprotect_wrapper)
llgtr %r2,%r2 # unsigned long (actually pointer
llgfr %r3,%r3 # size_t
llgfr %r4,%r4 # unsigned long
jg sys_mprotect # branch to system call
- .globl compat_sys_sigprocmask_wrapper
-compat_sys_sigprocmask_wrapper:
+ENTRY(compat_sys_sigprocmask_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # compat_old_sigset_t *
llgtr %r4,%r4 # compat_old_sigset_t *
jg compat_sys_sigprocmask # branch to system call
- .globl sys_init_module_wrapper
-sys_init_module_wrapper:
+ENTRY(sys_init_module_wrapper)
llgtr %r2,%r2 # void *
llgfr %r3,%r3 # unsigned long
llgtr %r4,%r4 # char *
jg sys_init_module # branch to system call
- .globl sys_delete_module_wrapper
-sys_delete_module_wrapper:
+ENTRY(sys_delete_module_wrapper)
llgtr %r2,%r2 # const char *
llgfr %r3,%r3 # unsigned int
jg sys_delete_module # branch to system call
- .globl sys32_quotactl_wrapper
-sys32_quotactl_wrapper:
+ENTRY(sys32_quotactl_wrapper)
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # const char *
llgfr %r4,%r4 # qid_t
llgtr %r5,%r5 # caddr_t
jg sys_quotactl # branch to system call
- .globl sys32_getpgid_wrapper
-sys32_getpgid_wrapper:
+ENTRY(sys32_getpgid_wrapper)
lgfr %r2,%r2 # pid_t
jg sys_getpgid # branch to system call
- .globl sys32_fchdir_wrapper
-sys32_fchdir_wrapper:
+ENTRY(sys32_fchdir_wrapper)
llgfr %r2,%r2 # unsigned int
jg sys_fchdir # branch to system call
- .globl sys32_bdflush_wrapper
-sys32_bdflush_wrapper:
+ENTRY(sys32_bdflush_wrapper)
lgfr %r2,%r2 # int
lgfr %r3,%r3 # long
jg sys_bdflush # branch to system call
- .globl sys32_sysfs_wrapper
-sys32_sysfs_wrapper:
+ENTRY(sys32_sysfs_wrapper)
lgfr %r2,%r2 # int
llgfr %r3,%r3 # unsigned long
llgfr %r4,%r4 # unsigned long
jg sys_sysfs # branch to system call
- .globl sys32_personality_wrapper
-sys32_personality_wrapper:
+ENTRY(sys32_personality_wrapper)
llgfr %r2,%r2 # unsigned int
jg sys_s390_personality # branch to system call
- .globl sys32_setfsuid16_wrapper
-sys32_setfsuid16_wrapper:
+ENTRY(sys32_setfsuid16_wrapper)
llgfr %r2,%r2 # __kernel_old_uid_emu31_t
jg sys32_setfsuid16 # branch to system call
- .globl sys32_setfsgid16_wrapper
-sys32_setfsgid16_wrapper:
+ENTRY(sys32_setfsgid16_wrapper)
llgfr %r2,%r2 # __kernel_old_gid_emu31_t
jg sys32_setfsgid16 # branch to system call
- .globl sys32_llseek_wrapper
-sys32_llseek_wrapper:
+ENTRY(sys32_llseek_wrapper)
llgfr %r2,%r2 # unsigned int
llgfr %r3,%r3 # unsigned long
llgfr %r4,%r4 # unsigned long
@@ -636,15 +538,13 @@ sys32_llseek_wrapper:
llgfr %r6,%r6 # unsigned int
jg sys_llseek # branch to system call
- .globl sys32_getdents_wrapper
-sys32_getdents_wrapper:
+ENTRY(sys32_getdents_wrapper)
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # void *
llgfr %r4,%r4 # unsigned int
jg compat_sys_getdents # branch to system call
- .globl compat_sys_select_wrapper
-compat_sys_select_wrapper:
+ENTRY(compat_sys_select_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # compat_fd_set *
llgtr %r4,%r4 # compat_fd_set *
@@ -652,112 +552,94 @@ compat_sys_select_wrapper:
llgtr %r6,%r6 # struct compat_timeval *
jg compat_sys_select # branch to system call
- .globl sys32_flock_wrapper
-sys32_flock_wrapper:
+ENTRY(sys32_flock_wrapper)
llgfr %r2,%r2 # unsigned int
llgfr %r3,%r3 # unsigned int
jg sys_flock # branch to system call
- .globl sys32_msync_wrapper
-sys32_msync_wrapper:
+ENTRY(sys32_msync_wrapper)
llgfr %r2,%r2 # unsigned long
llgfr %r3,%r3 # size_t
lgfr %r4,%r4 # int
jg sys_msync # branch to system call
- .globl compat_sys_readv_wrapper
-compat_sys_readv_wrapper:
+ENTRY(compat_sys_readv_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # const struct compat_iovec *
llgfr %r4,%r4 # unsigned long
jg compat_sys_readv # branch to system call
- .globl compat_sys_writev_wrapper
-compat_sys_writev_wrapper:
+ENTRY(compat_sys_writev_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # const struct compat_iovec *
llgfr %r4,%r4 # unsigned long
jg compat_sys_writev # branch to system call
- .globl sys32_getsid_wrapper
-sys32_getsid_wrapper:
+ENTRY(sys32_getsid_wrapper)
lgfr %r2,%r2 # pid_t
jg sys_getsid # branch to system call
- .globl sys32_fdatasync_wrapper
-sys32_fdatasync_wrapper:
+ENTRY(sys32_fdatasync_wrapper)
llgfr %r2,%r2 # unsigned int
jg sys_fdatasync # branch to system call
- .globl sys32_mlock_wrapper
-sys32_mlock_wrapper:
+ENTRY(sys32_mlock_wrapper)
llgfr %r2,%r2 # unsigned long
llgfr %r3,%r3 # size_t
jg sys_mlock # branch to system call
- .globl sys32_munlock_wrapper
-sys32_munlock_wrapper:
+ENTRY(sys32_munlock_wrapper)
llgfr %r2,%r2 # unsigned long
llgfr %r3,%r3 # size_t
jg sys_munlock # branch to system call
- .globl sys32_mlockall_wrapper
-sys32_mlockall_wrapper:
+ENTRY(sys32_mlockall_wrapper)
lgfr %r2,%r2 # int
jg sys_mlockall # branch to system call
#sys32_munlockall_wrapper # void
- .globl sys32_sched_setparam_wrapper
-sys32_sched_setparam_wrapper:
+ENTRY(sys32_sched_setparam_wrapper)
lgfr %r2,%r2 # pid_t
llgtr %r3,%r3 # struct sched_param *
jg sys_sched_setparam # branch to system call
- .globl sys32_sched_getparam_wrapper
-sys32_sched_getparam_wrapper:
+ENTRY(sys32_sched_getparam_wrapper)
lgfr %r2,%r2 # pid_t
llgtr %r3,%r3 # struct sched_param *
jg sys_sched_getparam # branch to system call
- .globl sys32_sched_setscheduler_wrapper
-sys32_sched_setscheduler_wrapper:
+ENTRY(sys32_sched_setscheduler_wrapper)
lgfr %r2,%r2 # pid_t
lgfr %r3,%r3 # int
llgtr %r4,%r4 # struct sched_param *
jg sys_sched_setscheduler # branch to system call
- .globl sys32_sched_getscheduler_wrapper
-sys32_sched_getscheduler_wrapper:
+ENTRY(sys32_sched_getscheduler_wrapper)
lgfr %r2,%r2 # pid_t
jg sys_sched_getscheduler # branch to system call
#sys32_sched_yield_wrapper # void
- .globl sys32_sched_get_priority_max_wrapper
-sys32_sched_get_priority_max_wrapper:
+ENTRY(sys32_sched_get_priority_max_wrapper)
lgfr %r2,%r2 # int
jg sys_sched_get_priority_max # branch to system call
- .globl sys32_sched_get_priority_min_wrapper
-sys32_sched_get_priority_min_wrapper:
+ENTRY(sys32_sched_get_priority_min_wrapper)
lgfr %r2,%r2 # int
jg sys_sched_get_priority_min # branch to system call
- .globl sys32_sched_rr_get_interval_wrapper
-sys32_sched_rr_get_interval_wrapper:
+ENTRY(sys32_sched_rr_get_interval_wrapper)
lgfr %r2,%r2 # pid_t
llgtr %r3,%r3 # struct compat_timespec *
jg sys32_sched_rr_get_interval # branch to system call
- .globl compat_sys_nanosleep_wrapper
-compat_sys_nanosleep_wrapper:
+ENTRY(compat_sys_nanosleep_wrapper)
llgtr %r2,%r2 # struct compat_timespec *
llgtr %r3,%r3 # struct compat_timespec *
jg compat_sys_nanosleep # branch to system call
- .globl sys32_mremap_wrapper
-sys32_mremap_wrapper:
+ENTRY(sys32_mremap_wrapper)
llgfr %r2,%r2 # unsigned long
llgfr %r3,%r3 # unsigned long
llgfr %r4,%r4 # unsigned long
@@ -765,50 +647,43 @@ sys32_mremap_wrapper:
llgfr %r6,%r6 # unsigned long
jg sys_mremap # branch to system call
- .globl sys32_setresuid16_wrapper
-sys32_setresuid16_wrapper:
+ENTRY(sys32_setresuid16_wrapper)
llgfr %r2,%r2 # __kernel_old_uid_emu31_t
llgfr %r3,%r3 # __kernel_old_uid_emu31_t
llgfr %r4,%r4 # __kernel_old_uid_emu31_t
jg sys32_setresuid16 # branch to system call
- .globl sys32_getresuid16_wrapper
-sys32_getresuid16_wrapper:
+ENTRY(sys32_getresuid16_wrapper)
llgtr %r2,%r2 # __kernel_old_uid_emu31_t *
llgtr %r3,%r3 # __kernel_old_uid_emu31_t *
llgtr %r4,%r4 # __kernel_old_uid_emu31_t *
jg sys32_getresuid16 # branch to system call
- .globl sys32_poll_wrapper
-sys32_poll_wrapper:
+ENTRY(sys32_poll_wrapper)
llgtr %r2,%r2 # struct pollfd *
llgfr %r3,%r3 # unsigned int
lgfr %r4,%r4 # long
jg sys_poll # branch to system call
- .globl compat_sys_nfsservctl_wrapper
-compat_sys_nfsservctl_wrapper:
+ENTRY(compat_sys_nfsservctl_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # struct compat_nfsctl_arg*
llgtr %r4,%r4 # union compat_nfsctl_res*
jg compat_sys_nfsservctl # branch to system call
- .globl sys32_setresgid16_wrapper
-sys32_setresgid16_wrapper:
+ENTRY(sys32_setresgid16_wrapper)
llgfr %r2,%r2 # __kernel_old_gid_emu31_t
llgfr %r3,%r3 # __kernel_old_gid_emu31_t
llgfr %r4,%r4 # __kernel_old_gid_emu31_t
jg sys32_setresgid16 # branch to system call
- .globl sys32_getresgid16_wrapper
-sys32_getresgid16_wrapper:
+ENTRY(sys32_getresgid16_wrapper)
llgtr %r2,%r2 # __kernel_old_gid_emu31_t *
llgtr %r3,%r3 # __kernel_old_gid_emu31_t *
llgtr %r4,%r4 # __kernel_old_gid_emu31_t *
jg sys32_getresgid16 # branch to system call
- .globl sys32_prctl_wrapper
-sys32_prctl_wrapper:
+ENTRY(sys32_prctl_wrapper)
lgfr %r2,%r2 # int
llgfr %r3,%r3 # unsigned long
llgfr %r4,%r4 # unsigned long
@@ -818,51 +693,44 @@ sys32_prctl_wrapper:
#sys32_rt_sigreturn_wrapper # done in rt_sigreturn_glue
- .globl sys32_rt_sigaction_wrapper
-sys32_rt_sigaction_wrapper:
+ENTRY(sys32_rt_sigaction_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # const struct sigaction_emu31 *
llgtr %r4,%r4 # const struct sigaction_emu31 *
llgfr %r5,%r5 # size_t
jg sys32_rt_sigaction # branch to system call
- .globl sys32_rt_sigprocmask_wrapper
-sys32_rt_sigprocmask_wrapper:
+ENTRY(sys32_rt_sigprocmask_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # old_sigset_emu31 *
llgtr %r4,%r4 # old_sigset_emu31 *
llgfr %r5,%r5 # size_t
jg sys32_rt_sigprocmask # branch to system call
- .globl sys32_rt_sigpending_wrapper
-sys32_rt_sigpending_wrapper:
+ENTRY(sys32_rt_sigpending_wrapper)
llgtr %r2,%r2 # sigset_emu31 *
llgfr %r3,%r3 # size_t
jg sys32_rt_sigpending # branch to system call
- .globl compat_sys_rt_sigtimedwait_wrapper
-compat_sys_rt_sigtimedwait_wrapper:
+ENTRY(compat_sys_rt_sigtimedwait_wrapper)
llgtr %r2,%r2 # const sigset_emu31_t *
llgtr %r3,%r3 # siginfo_emu31_t *
llgtr %r4,%r4 # const struct compat_timespec *
llgfr %r5,%r5 # size_t
jg compat_sys_rt_sigtimedwait # branch to system call
- .globl sys32_rt_sigqueueinfo_wrapper
-sys32_rt_sigqueueinfo_wrapper:
+ENTRY(sys32_rt_sigqueueinfo_wrapper)
lgfr %r2,%r2 # int
lgfr %r3,%r3 # int
llgtr %r4,%r4 # siginfo_emu31_t *
jg sys32_rt_sigqueueinfo # branch to system call
- .globl compat_sys_rt_sigsuspend_wrapper
-compat_sys_rt_sigsuspend_wrapper:
+ENTRY(compat_sys_rt_sigsuspend_wrapper)
llgtr %r2,%r2 # compat_sigset_t *
llgfr %r3,%r3 # compat_size_t
jg compat_sys_rt_sigsuspend
- .globl sys32_pread64_wrapper
-sys32_pread64_wrapper:
+ENTRY(sys32_pread64_wrapper)
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # char *
llgfr %r4,%r4 # size_t
@@ -870,8 +738,7 @@ sys32_pread64_wrapper:
llgfr %r6,%r6 # u32
jg sys32_pread64 # branch to system call
- .globl sys32_pwrite64_wrapper
-sys32_pwrite64_wrapper:
+ENTRY(sys32_pwrite64_wrapper)
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # const char *
llgfr %r4,%r4 # size_t
@@ -879,39 +746,33 @@ sys32_pwrite64_wrapper:
llgfr %r6,%r6 # u32
jg sys32_pwrite64 # branch to system call
- .globl sys32_chown16_wrapper
-sys32_chown16_wrapper:
+ENTRY(sys32_chown16_wrapper)
llgtr %r2,%r2 # const char *
llgfr %r3,%r3 # __kernel_old_uid_emu31_t
llgfr %r4,%r4 # __kernel_old_gid_emu31_t
jg sys32_chown16 # branch to system call
- .globl sys32_getcwd_wrapper
-sys32_getcwd_wrapper:
+ENTRY(sys32_getcwd_wrapper)
llgtr %r2,%r2 # char *
llgfr %r3,%r3 # unsigned long
jg sys_getcwd # branch to system call
- .globl sys32_capget_wrapper
-sys32_capget_wrapper:
+ENTRY(sys32_capget_wrapper)
llgtr %r2,%r2 # cap_user_header_t
llgtr %r3,%r3 # cap_user_data_t
jg sys_capget # branch to system call
- .globl sys32_capset_wrapper
-sys32_capset_wrapper:
+ENTRY(sys32_capset_wrapper)
llgtr %r2,%r2 # cap_user_header_t
llgtr %r3,%r3 # const cap_user_data_t
jg sys_capset # branch to system call
- .globl sys32_sigaltstack_wrapper
-sys32_sigaltstack_wrapper:
+ENTRY(sys32_sigaltstack_wrapper)
llgtr %r2,%r2 # const stack_emu31_t *
llgtr %r3,%r3 # stack_emu31_t *
jg sys32_sigaltstack
- .globl sys32_sendfile_wrapper
-sys32_sendfile_wrapper:
+ENTRY(sys32_sendfile_wrapper)
lgfr %r2,%r2 # int
lgfr %r3,%r3 # int
llgtr %r4,%r4 # __kernel_off_emu31_t *
@@ -920,22 +781,19 @@ sys32_sendfile_wrapper:
#sys32_vfork_wrapper # done in vfork_glue
- .globl sys32_truncate64_wrapper
-sys32_truncate64_wrapper:
+ENTRY(sys32_truncate64_wrapper)
llgtr %r2,%r2 # const char *
llgfr %r3,%r3 # unsigned long
llgfr %r4,%r4 # unsigned long
jg sys32_truncate64 # branch to system call
- .globl sys32_ftruncate64_wrapper
-sys32_ftruncate64_wrapper:
+ENTRY(sys32_ftruncate64_wrapper)
llgfr %r2,%r2 # unsigned int
llgfr %r3,%r3 # unsigned long
llgfr %r4,%r4 # unsigned long
jg sys32_ftruncate64 # branch to system call
- .globl sys32_lchown_wrapper
-sys32_lchown_wrapper:
+ENTRY(sys32_lchown_wrapper)
llgtr %r2,%r2 # const char *
llgfr %r3,%r3 # uid_t
llgfr %r4,%r4 # gid_t
@@ -946,156 +804,131 @@ sys32_lchown_wrapper:
#sys32_geteuid_wrapper # void
#sys32_getegid_wrapper # void
- .globl sys32_setreuid_wrapper
-sys32_setreuid_wrapper:
+ENTRY(sys32_setreuid_wrapper)
llgfr %r2,%r2 # uid_t
llgfr %r3,%r3 # uid_t
jg sys_setreuid # branch to system call
- .globl sys32_setregid_wrapper
-sys32_setregid_wrapper:
+ENTRY(sys32_setregid_wrapper)
llgfr %r2,%r2 # gid_t
llgfr %r3,%r3 # gid_t
jg sys_setregid # branch to system call
- .globl sys32_getgroups_wrapper
-sys32_getgroups_wrapper:
+ENTRY(sys32_getgroups_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # gid_t *
jg sys_getgroups # branch to system call
- .globl sys32_setgroups_wrapper
-sys32_setgroups_wrapper:
+ENTRY(sys32_setgroups_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # gid_t *
jg sys_setgroups # branch to system call
- .globl sys32_fchown_wrapper
-sys32_fchown_wrapper:
+ENTRY(sys32_fchown_wrapper)
llgfr %r2,%r2 # unsigned int
llgfr %r3,%r3 # uid_t
llgfr %r4,%r4 # gid_t
jg sys_fchown # branch to system call
- .globl sys32_setresuid_wrapper
-sys32_setresuid_wrapper:
+ENTRY(sys32_setresuid_wrapper)
llgfr %r2,%r2 # uid_t
llgfr %r3,%r3 # uid_t
llgfr %r4,%r4 # uid_t
jg sys_setresuid # branch to system call
- .globl sys32_getresuid_wrapper
-sys32_getresuid_wrapper:
+ENTRY(sys32_getresuid_wrapper)
llgtr %r2,%r2 # uid_t *
llgtr %r3,%r3 # uid_t *
llgtr %r4,%r4 # uid_t *
jg sys_getresuid # branch to system call
- .globl sys32_setresgid_wrapper
-sys32_setresgid_wrapper:
+ENTRY(sys32_setresgid_wrapper)
llgfr %r2,%r2 # gid_t
llgfr %r3,%r3 # gid_t
llgfr %r4,%r4 # gid_t
jg sys_setresgid # branch to system call
- .globl sys32_getresgid_wrapper
-sys32_getresgid_wrapper:
+ENTRY(sys32_getresgid_wrapper)
llgtr %r2,%r2 # gid_t *
llgtr %r3,%r3 # gid_t *
llgtr %r4,%r4 # gid_t *
jg sys_getresgid # branch to system call
- .globl sys32_chown_wrapper
-sys32_chown_wrapper:
+ENTRY(sys32_chown_wrapper)
llgtr %r2,%r2 # const char *
llgfr %r3,%r3 # uid_t
llgfr %r4,%r4 # gid_t
jg sys_chown # branch to system call
- .globl sys32_setuid_wrapper
-sys32_setuid_wrapper:
+ENTRY(sys32_setuid_wrapper)
llgfr %r2,%r2 # uid_t
jg sys_setuid # branch to system call
- .globl sys32_setgid_wrapper
-sys32_setgid_wrapper:
+ENTRY(sys32_setgid_wrapper)
llgfr %r2,%r2 # gid_t
jg sys_setgid # branch to system call
- .globl sys32_setfsuid_wrapper
-sys32_setfsuid_wrapper:
+ENTRY(sys32_setfsuid_wrapper)
llgfr %r2,%r2 # uid_t
jg sys_setfsuid # branch to system call
- .globl sys32_setfsgid_wrapper
-sys32_setfsgid_wrapper:
+ENTRY(sys32_setfsgid_wrapper)
llgfr %r2,%r2 # gid_t
jg sys_setfsgid # branch to system call
- .globl sys32_pivot_root_wrapper
-sys32_pivot_root_wrapper:
+ENTRY(sys32_pivot_root_wrapper)
llgtr %r2,%r2 # const char *
llgtr %r3,%r3 # const char *
jg sys_pivot_root # branch to system call
- .globl sys32_mincore_wrapper
-sys32_mincore_wrapper:
+ENTRY(sys32_mincore_wrapper)
llgfr %r2,%r2 # unsigned long
llgfr %r3,%r3 # size_t
llgtr %r4,%r4 # unsigned char *
jg sys_mincore # branch to system call
- .globl sys32_madvise_wrapper
-sys32_madvise_wrapper:
+ENTRY(sys32_madvise_wrapper)
llgfr %r2,%r2 # unsigned long
llgfr %r3,%r3 # size_t
lgfr %r4,%r4 # int
jg sys_madvise # branch to system call
- .globl sys32_getdents64_wrapper
-sys32_getdents64_wrapper:
+ENTRY(sys32_getdents64_wrapper)
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # void *
llgfr %r4,%r4 # unsigned int
jg sys_getdents64 # branch to system call
- .globl compat_sys_fcntl64_wrapper
-compat_sys_fcntl64_wrapper:
+ENTRY(compat_sys_fcntl64_wrapper)
llgfr %r2,%r2 # unsigned int
llgfr %r3,%r3 # unsigned int
llgfr %r4,%r4 # unsigned long
jg compat_sys_fcntl64 # branch to system call
- .globl sys32_stat64_wrapper
-sys32_stat64_wrapper:
+ENTRY(sys32_stat64_wrapper)
llgtr %r2,%r2 # char *
llgtr %r3,%r3 # struct stat64 *
jg sys32_stat64 # branch to system call
- .globl sys32_lstat64_wrapper
-sys32_lstat64_wrapper:
+ENTRY(sys32_lstat64_wrapper)
llgtr %r2,%r2 # char *
llgtr %r3,%r3 # struct stat64 *
jg sys32_lstat64 # branch to system call
- .globl sys32_stime_wrapper
-sys32_stime_wrapper:
+ENTRY(sys32_stime_wrapper)
llgtr %r2,%r2 # long *
jg compat_sys_stime # branch to system call
- .globl sys32_sysctl_wrapper
-sys32_sysctl_wrapper:
+ENTRY(sys32_sysctl_wrapper)
llgtr %r2,%r2 # struct compat_sysctl_args *
jg compat_sys_sysctl
- .globl sys32_fstat64_wrapper
-sys32_fstat64_wrapper:
+ENTRY(sys32_fstat64_wrapper)
llgfr %r2,%r2 # unsigned long
llgtr %r3,%r3 # struct stat64 *
jg sys32_fstat64 # branch to system call
- .globl compat_sys_futex_wrapper
-compat_sys_futex_wrapper:
+ENTRY(compat_sys_futex_wrapper)
llgtr %r2,%r2 # u32 *
lgfr %r3,%r3 # int
lgfr %r4,%r4 # int
@@ -1105,8 +938,7 @@ compat_sys_futex_wrapper:
stg %r0,160(%r15)
jg compat_sys_futex # branch to system call
- .globl sys32_setxattr_wrapper
-sys32_setxattr_wrapper:
+ENTRY(sys32_setxattr_wrapper)
llgtr %r2,%r2 # char *
llgtr %r3,%r3 # char *
llgtr %r4,%r4 # void *
@@ -1114,8 +946,7 @@ sys32_setxattr_wrapper:
lgfr %r6,%r6 # int
jg sys_setxattr
- .globl sys32_lsetxattr_wrapper
-sys32_lsetxattr_wrapper:
+ENTRY(sys32_lsetxattr_wrapper)
llgtr %r2,%r2 # char *
llgtr %r3,%r3 # char *
llgtr %r4,%r4 # void *
@@ -1123,8 +954,7 @@ sys32_lsetxattr_wrapper:
lgfr %r6,%r6 # int
jg sys_lsetxattr
- .globl sys32_fsetxattr_wrapper
-sys32_fsetxattr_wrapper:
+ENTRY(sys32_fsetxattr_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # char *
llgtr %r4,%r4 # void *
@@ -1132,124 +962,106 @@ sys32_fsetxattr_wrapper:
lgfr %r6,%r6 # int
jg sys_fsetxattr
- .globl sys32_getxattr_wrapper
-sys32_getxattr_wrapper:
+ENTRY(sys32_getxattr_wrapper)
llgtr %r2,%r2 # char *
llgtr %r3,%r3 # char *
llgtr %r4,%r4 # void *
llgfr %r5,%r5 # size_t
jg sys_getxattr
- .globl sys32_lgetxattr_wrapper
-sys32_lgetxattr_wrapper:
+ENTRY(sys32_lgetxattr_wrapper)
llgtr %r2,%r2 # char *
llgtr %r3,%r3 # char *
llgtr %r4,%r4 # void *
llgfr %r5,%r5 # size_t
jg sys_lgetxattr
- .globl sys32_fgetxattr_wrapper
-sys32_fgetxattr_wrapper:
+ENTRY(sys32_fgetxattr_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # char *
llgtr %r4,%r4 # void *
llgfr %r5,%r5 # size_t
jg sys_fgetxattr
- .globl sys32_listxattr_wrapper
-sys32_listxattr_wrapper:
+ENTRY(sys32_listxattr_wrapper)
llgtr %r2,%r2 # char *
llgtr %r3,%r3 # char *
llgfr %r4,%r4 # size_t
jg sys_listxattr
- .globl sys32_llistxattr_wrapper
-sys32_llistxattr_wrapper:
+ENTRY(sys32_llistxattr_wrapper)
llgtr %r2,%r2 # char *
llgtr %r3,%r3 # char *
llgfr %r4,%r4 # size_t
jg sys_llistxattr
- .globl sys32_flistxattr_wrapper
-sys32_flistxattr_wrapper:
+ENTRY(sys32_flistxattr_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # char *
llgfr %r4,%r4 # size_t
jg sys_flistxattr
- .globl sys32_removexattr_wrapper
-sys32_removexattr_wrapper:
+ENTRY(sys32_removexattr_wrapper)
llgtr %r2,%r2 # char *
llgtr %r3,%r3 # char *
jg sys_removexattr
- .globl sys32_lremovexattr_wrapper
-sys32_lremovexattr_wrapper:
+ENTRY(sys32_lremovexattr_wrapper)
llgtr %r2,%r2 # char *
llgtr %r3,%r3 # char *
jg sys_lremovexattr
- .globl sys32_fremovexattr_wrapper
-sys32_fremovexattr_wrapper:
+ENTRY(sys32_fremovexattr_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # char *
jg sys_fremovexattr
- .globl sys32_sched_setaffinity_wrapper
-sys32_sched_setaffinity_wrapper:
+ENTRY(sys32_sched_setaffinity_wrapper)
lgfr %r2,%r2 # int
llgfr %r3,%r3 # unsigned int
llgtr %r4,%r4 # unsigned long *
jg compat_sys_sched_setaffinity
- .globl sys32_sched_getaffinity_wrapper
-sys32_sched_getaffinity_wrapper:
+ENTRY(sys32_sched_getaffinity_wrapper)
lgfr %r2,%r2 # int
llgfr %r3,%r3 # unsigned int
llgtr %r4,%r4 # unsigned long *
jg compat_sys_sched_getaffinity
- .globl sys32_exit_group_wrapper
-sys32_exit_group_wrapper:
+ENTRY(sys32_exit_group_wrapper)
lgfr %r2,%r2 # int
jg sys_exit_group # branch to system call
- .globl sys32_set_tid_address_wrapper
-sys32_set_tid_address_wrapper:
+ENTRY(sys32_set_tid_address_wrapper)
llgtr %r2,%r2 # int *
jg sys_set_tid_address # branch to system call
- .globl sys_epoll_create_wrapper
-sys_epoll_create_wrapper:
+ENTRY(sys_epoll_create_wrapper)
lgfr %r2,%r2 # int
jg sys_epoll_create # branch to system call
- .globl sys_epoll_ctl_wrapper
-sys_epoll_ctl_wrapper:
+ENTRY(sys_epoll_ctl_wrapper)
lgfr %r2,%r2 # int
lgfr %r3,%r3 # int
lgfr %r4,%r4 # int
llgtr %r5,%r5 # struct epoll_event *
jg sys_epoll_ctl # branch to system call
- .globl sys_epoll_wait_wrapper
-sys_epoll_wait_wrapper:
+ENTRY(sys_epoll_wait_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # struct epoll_event *
lgfr %r4,%r4 # int
lgfr %r5,%r5 # int
jg sys_epoll_wait # branch to system call
- .globl sys32_lookup_dcookie_wrapper
-sys32_lookup_dcookie_wrapper:
+ENTRY(sys32_lookup_dcookie_wrapper)
sllg %r2,%r2,32 # get high word of 64bit dcookie
or %r2,%r3 # get low word of 64bit dcookie
llgtr %r3,%r4 # char *
llgfr %r4,%r5 # size_t
jg sys_lookup_dcookie
- .globl sys32_fadvise64_wrapper
-sys32_fadvise64_wrapper:
+ENTRY(sys32_fadvise64_wrapper)
lgfr %r2,%r2 # int
sllg %r3,%r3,32 # get high word of 64bit loff_t
or %r3,%r4 # get low word of 64bit loff_t
@@ -1257,81 +1069,68 @@ sys32_fadvise64_wrapper:
lgfr %r5,%r6 # int
jg sys32_fadvise64
- .globl sys32_fadvise64_64_wrapper
-sys32_fadvise64_64_wrapper:
+ENTRY(sys32_fadvise64_64_wrapper)
llgtr %r2,%r2 # struct fadvise64_64_args *
jg sys32_fadvise64_64
- .globl sys32_clock_settime_wrapper
-sys32_clock_settime_wrapper:
+ENTRY(sys32_clock_settime_wrapper)
lgfr %r2,%r2 # clockid_t (int)
llgtr %r3,%r3 # struct compat_timespec *
jg compat_sys_clock_settime
- .globl sys32_clock_gettime_wrapper
-sys32_clock_gettime_wrapper:
+ENTRY(sys32_clock_gettime_wrapper)
lgfr %r2,%r2 # clockid_t (int)
llgtr %r3,%r3 # struct compat_timespec *
jg compat_sys_clock_gettime
- .globl sys32_clock_getres_wrapper
-sys32_clock_getres_wrapper:
+ENTRY(sys32_clock_getres_wrapper)
lgfr %r2,%r2 # clockid_t (int)
llgtr %r3,%r3 # struct compat_timespec *
jg compat_sys_clock_getres
- .globl sys32_clock_nanosleep_wrapper
-sys32_clock_nanosleep_wrapper:
+ENTRY(sys32_clock_nanosleep_wrapper)
lgfr %r2,%r2 # clockid_t (int)
lgfr %r3,%r3 # int
llgtr %r4,%r4 # struct compat_timespec *
llgtr %r5,%r5 # struct compat_timespec *
jg compat_sys_clock_nanosleep
- .globl sys32_timer_create_wrapper
-sys32_timer_create_wrapper:
+ENTRY(sys32_timer_create_wrapper)
lgfr %r2,%r2 # timer_t (int)
llgtr %r3,%r3 # struct compat_sigevent *
llgtr %r4,%r4 # timer_t *
jg compat_sys_timer_create
- .globl sys32_timer_settime_wrapper
-sys32_timer_settime_wrapper:
+ENTRY(sys32_timer_settime_wrapper)
lgfr %r2,%r2 # timer_t (int)
lgfr %r3,%r3 # int
llgtr %r4,%r4 # struct compat_itimerspec *
llgtr %r5,%r5 # struct compat_itimerspec *
jg compat_sys_timer_settime
- .globl sys32_timer_gettime_wrapper
-sys32_timer_gettime_wrapper:
+ENTRY(sys32_timer_gettime_wrapper)
lgfr %r2,%r2 # timer_t (int)
llgtr %r3,%r3 # struct compat_itimerspec *
jg compat_sys_timer_gettime
- .globl sys32_timer_getoverrun_wrapper
-sys32_timer_getoverrun_wrapper:
+ENTRY(sys32_timer_getoverrun_wrapper)
lgfr %r2,%r2 # timer_t (int)
jg sys_timer_getoverrun
- .globl sys32_timer_delete_wrapper
-sys32_timer_delete_wrapper:
+ENTRY(sys32_timer_delete_wrapper)
lgfr %r2,%r2 # timer_t (int)
jg sys_timer_delete
- .globl sys32_io_setup_wrapper
-sys32_io_setup_wrapper:
+ENTRY(sys32_io_setup_wrapper)
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # u32 *
jg compat_sys_io_setup
- .globl sys32_io_destroy_wrapper
-sys32_io_destroy_wrapper:
+ENTRY(sys32_io_destroy_wrapper)
llgfr %r2,%r2 # (aio_context_t) u32
jg sys_io_destroy
- .globl sys32_io_getevents_wrapper
-sys32_io_getevents_wrapper:
+ENTRY(sys32_io_getevents_wrapper)
llgfr %r2,%r2 # (aio_context_t) u32
lgfr %r3,%r3 # long
lgfr %r4,%r4 # long
@@ -1339,49 +1138,42 @@ sys32_io_getevents_wrapper:
llgtr %r6,%r6 # struct compat_timespec *
jg compat_sys_io_getevents
- .globl sys32_io_submit_wrapper
-sys32_io_submit_wrapper:
+ENTRY(sys32_io_submit_wrapper)
llgfr %r2,%r2 # (aio_context_t) u32
lgfr %r3,%r3 # long
llgtr %r4,%r4 # struct iocb **
jg compat_sys_io_submit
- .globl sys32_io_cancel_wrapper
-sys32_io_cancel_wrapper:
+ENTRY(sys32_io_cancel_wrapper)
llgfr %r2,%r2 # (aio_context_t) u32
llgtr %r3,%r3 # struct iocb *
llgtr %r4,%r4 # struct io_event *
jg sys_io_cancel
- .globl compat_sys_statfs64_wrapper
-compat_sys_statfs64_wrapper:
+ENTRY(compat_sys_statfs64_wrapper)
llgtr %r2,%r2 # const char *
llgfr %r3,%r3 # compat_size_t
llgtr %r4,%r4 # struct compat_statfs64 *
jg compat_sys_statfs64
- .globl compat_sys_fstatfs64_wrapper
-compat_sys_fstatfs64_wrapper:
+ENTRY(compat_sys_fstatfs64_wrapper)
llgfr %r2,%r2 # unsigned int fd
llgfr %r3,%r3 # compat_size_t
llgtr %r4,%r4 # struct compat_statfs64 *
jg compat_sys_fstatfs64
- .globl compat_sys_mq_open_wrapper
-compat_sys_mq_open_wrapper:
+ENTRY(compat_sys_mq_open_wrapper)
llgtr %r2,%r2 # const char *
lgfr %r3,%r3 # int
llgfr %r4,%r4 # mode_t
llgtr %r5,%r5 # struct compat_mq_attr *
jg compat_sys_mq_open
- .globl sys32_mq_unlink_wrapper
-sys32_mq_unlink_wrapper:
+ENTRY(sys32_mq_unlink_wrapper)
llgtr %r2,%r2 # const char *
jg sys_mq_unlink
- .globl compat_sys_mq_timedsend_wrapper
-compat_sys_mq_timedsend_wrapper:
+ENTRY(compat_sys_mq_timedsend_wrapper)
lgfr %r2,%r2 # mqd_t
llgtr %r3,%r3 # const char *
llgfr %r4,%r4 # size_t
@@ -1389,8 +1181,7 @@ compat_sys_mq_timedsend_wrapper:
llgtr %r6,%r6 # const struct compat_timespec *
jg compat_sys_mq_timedsend
- .globl compat_sys_mq_timedreceive_wrapper
-compat_sys_mq_timedreceive_wrapper:
+ENTRY(compat_sys_mq_timedreceive_wrapper)
lgfr %r2,%r2 # mqd_t
llgtr %r3,%r3 # char *
llgfr %r4,%r4 # size_t
@@ -1398,21 +1189,18 @@ compat_sys_mq_timedreceive_wrapper:
llgtr %r6,%r6 # const struct compat_timespec *
jg compat_sys_mq_timedreceive
- .globl compat_sys_mq_notify_wrapper
-compat_sys_mq_notify_wrapper:
+ENTRY(compat_sys_mq_notify_wrapper)
lgfr %r2,%r2 # mqd_t
llgtr %r3,%r3 # struct compat_sigevent *
jg compat_sys_mq_notify
- .globl compat_sys_mq_getsetattr_wrapper
-compat_sys_mq_getsetattr_wrapper:
+ENTRY(compat_sys_mq_getsetattr_wrapper)
lgfr %r2,%r2 # mqd_t
llgtr %r3,%r3 # struct compat_mq_attr *
llgtr %r4,%r4 # struct compat_mq_attr *
jg compat_sys_mq_getsetattr
- .globl compat_sys_add_key_wrapper
-compat_sys_add_key_wrapper:
+ENTRY(compat_sys_add_key_wrapper)
llgtr %r2,%r2 # const char *
llgtr %r3,%r3 # const char *
llgtr %r4,%r4 # const void *
@@ -1420,16 +1208,14 @@ compat_sys_add_key_wrapper:
llgfr %r6,%r6 # (key_serial_t) u32
jg sys_add_key
- .globl compat_sys_request_key_wrapper
-compat_sys_request_key_wrapper:
+ENTRY(compat_sys_request_key_wrapper)
llgtr %r2,%r2 # const char *
llgtr %r3,%r3 # const char *
llgtr %r4,%r4 # const void *
llgfr %r5,%r5 # (key_serial_t) u32
jg sys_request_key
- .globl sys32_remap_file_pages_wrapper
-sys32_remap_file_pages_wrapper:
+ENTRY(sys32_remap_file_pages_wrapper)
llgfr %r2,%r2 # unsigned long
llgfr %r3,%r3 # unsigned long
llgfr %r4,%r4 # unsigned long
@@ -1437,8 +1223,7 @@ sys32_remap_file_pages_wrapper:
llgfr %r6,%r6 # unsigned long
jg sys_remap_file_pages
- .globl compat_sys_waitid_wrapper
-compat_sys_waitid_wrapper:
+ENTRY(compat_sys_waitid_wrapper)
lgfr %r2,%r2 # int
lgfr %r3,%r3 # pid_t
llgtr %r4,%r4 # siginfo_emu31_t *
@@ -1446,65 +1231,56 @@ compat_sys_waitid_wrapper:
llgtr %r6,%r6 # struct rusage_emu31 *
jg compat_sys_waitid
- .globl compat_sys_kexec_load_wrapper
-compat_sys_kexec_load_wrapper:
+ENTRY(compat_sys_kexec_load_wrapper)
llgfr %r2,%r2 # unsigned long
llgfr %r3,%r3 # unsigned long
llgtr %r4,%r4 # struct kexec_segment *
llgfr %r5,%r5 # unsigned long
jg compat_sys_kexec_load
- .globl sys_ioprio_set_wrapper
-sys_ioprio_set_wrapper:
+ENTRY(sys_ioprio_set_wrapper)
lgfr %r2,%r2 # int
lgfr %r3,%r3 # int
lgfr %r4,%r4 # int
jg sys_ioprio_set
- .globl sys_ioprio_get_wrapper
-sys_ioprio_get_wrapper:
+ENTRY(sys_ioprio_get_wrapper)
lgfr %r2,%r2 # int
lgfr %r3,%r3 # int
jg sys_ioprio_get
- .globl sys_inotify_add_watch_wrapper
-sys_inotify_add_watch_wrapper:
+ENTRY(sys_inotify_add_watch_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # const char *
llgfr %r4,%r4 # u32
jg sys_inotify_add_watch
- .globl sys_inotify_rm_watch_wrapper
-sys_inotify_rm_watch_wrapper:
+ENTRY(sys_inotify_rm_watch_wrapper)
lgfr %r2,%r2 # int
llgfr %r3,%r3 # u32
jg sys_inotify_rm_watch
- .globl compat_sys_openat_wrapper
-compat_sys_openat_wrapper:
+ENTRY(compat_sys_openat_wrapper)
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # const char *
lgfr %r4,%r4 # int
lgfr %r5,%r5 # int
jg compat_sys_openat
- .globl sys_mkdirat_wrapper
-sys_mkdirat_wrapper:
+ENTRY(sys_mkdirat_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # const char *
lgfr %r4,%r4 # int
jg sys_mkdirat
- .globl sys_mknodat_wrapper
-sys_mknodat_wrapper:
+ENTRY(sys_mknodat_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # const char *
lgfr %r4,%r4 # int
llgfr %r5,%r5 # unsigned int
jg sys_mknodat
- .globl sys_fchownat_wrapper
-sys_fchownat_wrapper:
+ENTRY(sys_fchownat_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # const char *
llgfr %r4,%r4 # uid_t
@@ -1512,38 +1288,33 @@ sys_fchownat_wrapper:
lgfr %r6,%r6 # int
jg sys_fchownat
- .globl compat_sys_futimesat_wrapper
-compat_sys_futimesat_wrapper:
+ENTRY(compat_sys_futimesat_wrapper)
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # char *
llgtr %r4,%r4 # struct timeval *
jg compat_sys_futimesat
- .globl sys32_fstatat64_wrapper
-sys32_fstatat64_wrapper:
+ENTRY(sys32_fstatat64_wrapper)
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # char *
llgtr %r4,%r4 # struct stat64 *
lgfr %r5,%r5 # int
jg sys32_fstatat64
- .globl sys_unlinkat_wrapper
-sys_unlinkat_wrapper:
+ENTRY(sys_unlinkat_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # const char *
lgfr %r4,%r4 # int
jg sys_unlinkat
- .globl sys_renameat_wrapper
-sys_renameat_wrapper:
+ENTRY(sys_renameat_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # const char *
lgfr %r4,%r4 # int
llgtr %r5,%r5 # const char *
jg sys_renameat
- .globl sys_linkat_wrapper
-sys_linkat_wrapper:
+ENTRY(sys_linkat_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # const char *
lgfr %r4,%r4 # int
@@ -1551,37 +1322,32 @@ sys_linkat_wrapper:
lgfr %r6,%r6 # int
jg sys_linkat
- .globl sys_symlinkat_wrapper
-sys_symlinkat_wrapper:
+ENTRY(sys_symlinkat_wrapper)
llgtr %r2,%r2 # const char *
lgfr %r3,%r3 # int
llgtr %r4,%r4 # const char *
jg sys_symlinkat
- .globl sys_readlinkat_wrapper
-sys_readlinkat_wrapper:
+ENTRY(sys_readlinkat_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # const char *
llgtr %r4,%r4 # char *
lgfr %r5,%r5 # int
jg sys_readlinkat
- .globl sys_fchmodat_wrapper
-sys_fchmodat_wrapper:
+ENTRY(sys_fchmodat_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # const char *
llgfr %r4,%r4 # mode_t
jg sys_fchmodat
- .globl sys_faccessat_wrapper
-sys_faccessat_wrapper:
+ENTRY(sys_faccessat_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # const char *
lgfr %r4,%r4 # int
jg sys_faccessat
- .globl compat_sys_pselect6_wrapper
-compat_sys_pselect6_wrapper:
+ENTRY(compat_sys_pselect6_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # fd_set *
llgtr %r4,%r4 # fd_set *
@@ -1591,8 +1357,7 @@ compat_sys_pselect6_wrapper:
stg %r0,160(%r15)
jg compat_sys_pselect6
- .globl compat_sys_ppoll_wrapper
-compat_sys_ppoll_wrapper:
+ENTRY(compat_sys_ppoll_wrapper)
llgtr %r2,%r2 # struct pollfd *
llgfr %r3,%r3 # unsigned int
llgtr %r4,%r4 # struct timespec *
@@ -1600,26 +1365,22 @@ compat_sys_ppoll_wrapper:
llgfr %r6,%r6 # size_t
jg compat_sys_ppoll
- .globl sys_unshare_wrapper
-sys_unshare_wrapper:
+ENTRY(sys_unshare_wrapper)
llgfr %r2,%r2 # unsigned long
jg sys_unshare
- .globl compat_sys_set_robust_list_wrapper
-compat_sys_set_robust_list_wrapper:
+ENTRY(compat_sys_set_robust_list_wrapper)
llgtr %r2,%r2 # struct compat_robust_list_head *
llgfr %r3,%r3 # size_t
jg compat_sys_set_robust_list
- .globl compat_sys_get_robust_list_wrapper
-compat_sys_get_robust_list_wrapper:
+ENTRY(compat_sys_get_robust_list_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # compat_uptr_t_t *
llgtr %r4,%r4 # compat_size_t *
jg compat_sys_get_robust_list
- .globl sys_splice_wrapper
-sys_splice_wrapper:
+ENTRY(sys_splice_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # loff_t *
lgfr %r4,%r4 # int
@@ -1629,8 +1390,7 @@ sys_splice_wrapper:
stg %r0,160(%r15)
jg sys_splice
- .globl sys_sync_file_range_wrapper
-sys_sync_file_range_wrapper:
+ENTRY(sys_sync_file_range_wrapper)
lgfr %r2,%r2 # int
sllg %r3,%r3,32 # get high word of 64bit loff_t
or %r3,%r4 # get low word of 64bit loff_t
@@ -1639,31 +1399,27 @@ sys_sync_file_range_wrapper:
llgf %r5,164(%r15) # unsigned int
jg sys_sync_file_range
- .globl sys_tee_wrapper
-sys_tee_wrapper:
+ENTRY(sys_tee_wrapper)
lgfr %r2,%r2 # int
lgfr %r3,%r3 # int
llgfr %r4,%r4 # size_t
llgfr %r5,%r5 # unsigned int
jg sys_tee
- .globl compat_sys_vmsplice_wrapper
-compat_sys_vmsplice_wrapper:
+ENTRY(compat_sys_vmsplice_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # compat_iovec *
llgfr %r4,%r4 # unsigned int
llgfr %r5,%r5 # unsigned int
jg compat_sys_vmsplice
- .globl sys_getcpu_wrapper
-sys_getcpu_wrapper:
+ENTRY(sys_getcpu_wrapper)
llgtr %r2,%r2 # unsigned *
llgtr %r3,%r3 # unsigned *
llgtr %r4,%r4 # struct getcpu_cache *
jg sys_getcpu
- .globl compat_sys_epoll_pwait_wrapper
-compat_sys_epoll_pwait_wrapper:
+ENTRY(compat_sys_epoll_pwait_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # struct compat_epoll_event *
lgfr %r4,%r4 # int
@@ -1673,34 +1429,29 @@ compat_sys_epoll_pwait_wrapper:
stg %r0,160(%r15)
jg compat_sys_epoll_pwait
- .globl compat_sys_utimes_wrapper
-compat_sys_utimes_wrapper:
+ENTRY(compat_sys_utimes_wrapper)
llgtr %r2,%r2 # char *
llgtr %r3,%r3 # struct compat_timeval *
jg compat_sys_utimes
- .globl compat_sys_utimensat_wrapper
-compat_sys_utimensat_wrapper:
+ENTRY(compat_sys_utimensat_wrapper)
llgfr %r2,%r2 # unsigned int
llgtr %r3,%r3 # char *
llgtr %r4,%r4 # struct compat_timespec *
lgfr %r5,%r5 # int
jg compat_sys_utimensat
- .globl compat_sys_signalfd_wrapper
-compat_sys_signalfd_wrapper:
+ENTRY(compat_sys_signalfd_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # compat_sigset_t *
llgfr %r4,%r4 # compat_size_t
jg compat_sys_signalfd
- .globl sys_eventfd_wrapper
-sys_eventfd_wrapper:
+ENTRY(sys_eventfd_wrapper)
llgfr %r2,%r2 # unsigned int
jg sys_eventfd
- .globl sys_fallocate_wrapper
-sys_fallocate_wrapper:
+ENTRY(sys_fallocate_wrapper)
lgfr %r2,%r2 # int
lgfr %r3,%r3 # int
sllg %r4,%r4,32 # get high word of 64bit loff_t
@@ -1709,94 +1460,80 @@ sys_fallocate_wrapper:
l %r5,164(%r15) # get low word of 64bit loff_t
jg sys_fallocate
- .globl sys_timerfd_create_wrapper
-sys_timerfd_create_wrapper:
+ENTRY(sys_timerfd_create_wrapper)
lgfr %r2,%r2 # int
lgfr %r3,%r3 # int
jg sys_timerfd_create
- .globl compat_sys_timerfd_settime_wrapper
-compat_sys_timerfd_settime_wrapper:
+ENTRY(compat_sys_timerfd_settime_wrapper)
lgfr %r2,%r2 # int
lgfr %r3,%r3 # int
llgtr %r4,%r4 # struct compat_itimerspec *
llgtr %r5,%r5 # struct compat_itimerspec *
jg compat_sys_timerfd_settime
- .globl compat_sys_timerfd_gettime_wrapper
-compat_sys_timerfd_gettime_wrapper:
+ENTRY(compat_sys_timerfd_gettime_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # struct compat_itimerspec *
jg compat_sys_timerfd_gettime
- .globl compat_sys_signalfd4_wrapper
-compat_sys_signalfd4_wrapper:
+ENTRY(compat_sys_signalfd4_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # compat_sigset_t *
llgfr %r4,%r4 # compat_size_t
lgfr %r5,%r5 # int
jg compat_sys_signalfd4
- .globl sys_eventfd2_wrapper
-sys_eventfd2_wrapper:
+ENTRY(sys_eventfd2_wrapper)
llgfr %r2,%r2 # unsigned int
lgfr %r3,%r3 # int
jg sys_eventfd2
- .globl sys_inotify_init1_wrapper
-sys_inotify_init1_wrapper:
+ENTRY(sys_inotify_init1_wrapper)
lgfr %r2,%r2 # int
jg sys_inotify_init1
- .globl sys_pipe2_wrapper
-sys_pipe2_wrapper:
+ENTRY(sys_pipe2_wrapper)
llgtr %r2,%r2 # u32 *
lgfr %r3,%r3 # int
jg sys_pipe2 # branch to system call
- .globl sys_dup3_wrapper
-sys_dup3_wrapper:
+ENTRY(sys_dup3_wrapper)
llgfr %r2,%r2 # unsigned int
llgfr %r3,%r3 # unsigned int
lgfr %r4,%r4 # int
jg sys_dup3 # branch to system call
- .globl sys_epoll_create1_wrapper
-sys_epoll_create1_wrapper:
+ENTRY(sys_epoll_create1_wrapper)
lgfr %r2,%r2 # int
jg sys_epoll_create1 # branch to system call
- .globl sys32_readahead_wrapper
-sys32_readahead_wrapper:
+ENTRY(sys32_readahead_wrapper)
lgfr %r2,%r2 # int
llgfr %r3,%r3 # u32
llgfr %r4,%r4 # u32
lgfr %r5,%r5 # s32
jg sys32_readahead # branch to system call
- .globl sys32_sendfile64_wrapper
-sys32_sendfile64_wrapper:
+ENTRY(sys32_sendfile64_wrapper)
lgfr %r2,%r2 # int
lgfr %r3,%r3 # int
llgtr %r4,%r4 # compat_loff_t *
lgfr %r5,%r5 # s32
jg sys32_sendfile64 # branch to system call
- .globl sys_tkill_wrapper
-sys_tkill_wrapper:
+ENTRY(sys_tkill_wrapper)
lgfr %r2,%r2 # pid_t
lgfr %r3,%r3 # int
jg sys_tkill # branch to system call
- .globl sys_tgkill_wrapper
-sys_tgkill_wrapper:
+ENTRY(sys_tgkill_wrapper)
lgfr %r2,%r2 # pid_t
lgfr %r3,%r3 # pid_t
lgfr %r4,%r4 # int
jg sys_tgkill # branch to system call
- .globl compat_sys_keyctl_wrapper
-compat_sys_keyctl_wrapper:
+ENTRY(compat_sys_keyctl_wrapper)
llgfr %r2,%r2 # u32
llgfr %r3,%r3 # u32
llgfr %r4,%r4 # u32
@@ -1804,8 +1541,7 @@ compat_sys_keyctl_wrapper:
llgfr %r6,%r6 # u32
jg compat_sys_keyctl # branch to system call
- .globl compat_sys_preadv_wrapper
-compat_sys_preadv_wrapper:
+ENTRY(compat_sys_preadv_wrapper)
llgfr %r2,%r2 # unsigned long
llgtr %r3,%r3 # compat_iovec *
llgfr %r4,%r4 # unsigned long
@@ -1813,8 +1549,7 @@ compat_sys_preadv_wrapper:
llgfr %r6,%r6 # u32
jg compat_sys_preadv # branch to system call
- .globl compat_sys_pwritev_wrapper
-compat_sys_pwritev_wrapper:
+ENTRY(compat_sys_pwritev_wrapper)
llgfr %r2,%r2 # unsigned long
llgtr %r3,%r3 # compat_iovec *
llgfr %r4,%r4 # unsigned long
@@ -1822,16 +1557,14 @@ compat_sys_pwritev_wrapper:
llgfr %r6,%r6 # u32
jg compat_sys_pwritev # branch to system call
- .globl compat_sys_rt_tgsigqueueinfo_wrapper
-compat_sys_rt_tgsigqueueinfo_wrapper:
+ENTRY(compat_sys_rt_tgsigqueueinfo_wrapper)
lgfr %r2,%r2 # compat_pid_t
lgfr %r3,%r3 # compat_pid_t
lgfr %r4,%r4 # int
llgtr %r5,%r5 # struct compat_siginfo *
jg compat_sys_rt_tgsigqueueinfo_wrapper # branch to system call
- .globl sys_perf_event_open_wrapper
-sys_perf_event_open_wrapper:
+ENTRY(sys_perf_event_open_wrapper)
llgtr %r2,%r2 # const struct perf_event_attr *
lgfr %r3,%r3 # pid_t
lgfr %r4,%r4 # int
@@ -1839,29 +1572,25 @@ sys_perf_event_open_wrapper:
llgfr %r6,%r6 # unsigned long
jg sys_perf_event_open # branch to system call
- .globl sys_clone_wrapper
-sys_clone_wrapper:
+ENTRY(sys_clone_wrapper)
llgfr %r2,%r2 # unsigned long
llgfr %r3,%r3 # unsigned long
llgtr %r4,%r4 # int *
llgtr %r5,%r5 # int *
jg sys_clone # branch to system call
- .globl sys32_execve_wrapper
-sys32_execve_wrapper:
+ENTRY(sys32_execve_wrapper)
llgtr %r2,%r2 # char *
llgtr %r3,%r3 # compat_uptr_t *
llgtr %r4,%r4 # compat_uptr_t *
jg sys32_execve # branch to system call
- .globl sys_fanotify_init_wrapper
-sys_fanotify_init_wrapper:
+ENTRY(sys_fanotify_init_wrapper)
llgfr %r2,%r2 # unsigned int
llgfr %r3,%r3 # unsigned int
jg sys_fanotify_init # branch to system call
- .globl sys_fanotify_mark_wrapper
-sys_fanotify_mark_wrapper:
+ENTRY(sys_fanotify_mark_wrapper)
lgfr %r2,%r2 # int
llgfr %r3,%r3 # unsigned int
sllg %r4,%r4,32 # get high word of 64bit mask
@@ -1870,16 +1599,14 @@ sys_fanotify_mark_wrapper:
llgt %r6,164(%r15) # char *
jg sys_fanotify_mark # branch to system call
- .globl sys_prlimit64_wrapper
-sys_prlimit64_wrapper:
+ENTRY(sys_prlimit64_wrapper)
lgfr %r2,%r2 # pid_t
llgfr %r3,%r3 # unsigned int
llgtr %r4,%r4 # const struct rlimit64 __user *
llgtr %r5,%r5 # struct rlimit64 __user *
jg sys_prlimit64 # branch to system call
- .globl sys_name_to_handle_at_wrapper
-sys_name_to_handle_at_wrapper:
+ENTRY(sys_name_to_handle_at_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # const char __user *
llgtr %r4,%r4 # struct file_handle __user *
@@ -1887,21 +1614,18 @@ sys_name_to_handle_at_wrapper:
lgfr %r6,%r6 # int
jg sys_name_to_handle_at
- .globl compat_sys_open_by_handle_at_wrapper
-compat_sys_open_by_handle_at_wrapper:
+ENTRY(compat_sys_open_by_handle_at_wrapper)
lgfr %r2,%r2 # int
llgtr %r3,%r3 # struct file_handle __user *
lgfr %r4,%r4 # int
jg compat_sys_open_by_handle_at
- .globl compat_sys_clock_adjtime_wrapper
-compat_sys_clock_adjtime_wrapper:
+ENTRY(compat_sys_clock_adjtime_wrapper)
lgfr %r2,%r2 # clockid_t (int)
llgtr %r3,%r3 # struct compat_timex __user *
jg compat_sys_clock_adjtime
- .globl sys_syncfs_wrapper
-sys_syncfs_wrapper:
+ENTRY(sys_syncfs_wrapper)
lgfr %r2,%r2 # int
jg sys_syncfs
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 0476174dfff5..3eab7cfab07c 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -9,8 +9,8 @@
* Heiko Carstens <heiko.carstens@de.ibm.com>
*/
-#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/linkage.h>
#include <asm/cache.h>
#include <asm/errno.h>
#include <asm/ptrace.h>
@@ -197,8 +197,7 @@ STACK_SIZE = 1 << STACK_SHIFT
* Returns:
* gpr2 = prev
*/
- .globl __switch_to
-__switch_to:
+ENTRY(__switch_to)
basr %r1,0
0: l %r4,__THREAD_info(%r2) # get thread_info of prev
l %r5,__THREAD_info(%r3) # get thread_info of next
@@ -224,8 +223,7 @@ __critical_start:
* are executed with interrupts enabled.
*/
- .globl system_call
-system_call:
+ENTRY(system_call)
stpt __LC_SYNC_ENTER_TIMER
sysc_saveall:
SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
@@ -388,8 +386,7 @@ sysc_tracenogo:
#
# a new process exits the kernel with ret_from_fork
#
- .globl ret_from_fork
-ret_from_fork:
+ENTRY(ret_from_fork)
l %r13,__LC_SVC_NEW_PSW+4
l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
tm SP_PSW+1(%r15),0x01 # forking a kernel thread ?
@@ -405,8 +402,7 @@ ret_from_fork:
# kernel_execve function needs to deal with pt_regs that is not
# at the usual place
#
- .globl kernel_execve
-kernel_execve:
+ENTRY(kernel_execve)
stm %r12,%r15,48(%r15)
lr %r14,%r15
l %r13,__LC_SVC_NEW_PSW+4
@@ -438,8 +434,7 @@ kernel_execve:
* Program check handler routine
*/
- .globl pgm_check_handler
-pgm_check_handler:
+ENTRY(pgm_check_handler)
/*
* First we need to check for a special case:
* Single stepping an instruction that disables the PER event mask will
@@ -565,8 +560,7 @@ kernel_per:
* IO interrupt handler routine
*/
- .globl io_int_handler
-io_int_handler:
+ENTRY(io_int_handler)
stck __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER
SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
@@ -703,8 +697,7 @@ io_notify_resume:
* External interrupt handler routine
*/
- .globl ext_int_handler
-ext_int_handler:
+ENTRY(ext_int_handler)
stck __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER
SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
@@ -731,8 +724,7 @@ __critical_end:
* Machine check handler routines
*/
- .globl mcck_int_handler
-mcck_int_handler:
+ENTRY(mcck_int_handler)
stck __LC_MCCK_CLOCK
spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer
lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs
@@ -818,8 +810,7 @@ mcck_return:
*/
#ifdef CONFIG_SMP
__CPUINIT
- .globl restart_int_handler
-restart_int_handler:
+ENTRY(restart_int_handler)
basr %r1,0
restart_base:
spt restart_vtime-restart_base(%r1)
@@ -848,8 +839,7 @@ restart_vtime:
/*
* If we do not run with SMP enabled, let the new CPU crash ...
*/
- .globl restart_int_handler
-restart_int_handler:
+ENTRY(restart_int_handler)
basr %r1,0
restart_base:
lpsw restart_crash-restart_base(%r1)
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index 17a6f83a2d67..66729eb7bbc5 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -5,10 +5,9 @@
#include <linux/signal.h>
#include <asm/ptrace.h>
-typedef void pgm_check_handler_t(struct pt_regs *, long, unsigned long);
-extern pgm_check_handler_t *pgm_check_table[128];
-pgm_check_handler_t do_protection_exception;
-pgm_check_handler_t do_dat_exception;
+void do_protection_exception(struct pt_regs *, long, unsigned long);
+void do_dat_exception(struct pt_regs *, long, unsigned long);
+void do_asce_exception(struct pt_regs *, long, unsigned long);
extern int sysctl_userprocess_debug;
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index d61967e2eab0..7a0fd426ca92 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -9,8 +9,8 @@
* Heiko Carstens <heiko.carstens@de.ibm.com>
*/
-#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/linkage.h>
#include <asm/cache.h>
#include <asm/errno.h>
#include <asm/ptrace.h>
@@ -56,15 +56,28 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
_TIF_MCCK_PENDING)
_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
_TIF_SECCOMP>>8 | _TIF_SYSCALL_TRACEPOINT>>8)
+_TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING)
#define BASED(name) name-system_call(%r13)
+ .macro SPP newpp
+#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
+ tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_SPP
+ jz .+8
+ .insn s,0xb2800000,\newpp
+#endif
+ .endm
+
.macro HANDLE_SIE_INTERCEPT
#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
- lg %r3,__LC_SIE_HOOK
- ltgr %r3,%r3
+ tm __TI_flags+6(%r12),_TIF_SIE>>8
jz 0f
- basr %r14,%r3
+ SPP __LC_CMF_HPP # set host id
+ clc SP_PSW+8(8,%r15),BASED(.Lsie_loop)
+ jl 0f
+ clc SP_PSW+8(8,%r15),BASED(.Lsie_done)
+ jhe 0f
+ mvc SP_PSW+8(8,%r15),BASED(.Lsie_loop)
0:
#endif
.endm
@@ -206,8 +219,7 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
* Returns:
* gpr2 = prev
*/
- .globl __switch_to
-__switch_to:
+ENTRY(__switch_to)
lg %r4,__THREAD_info(%r2) # get thread_info of prev
lg %r5,__THREAD_info(%r3) # get thread_info of next
tm __TI_flags+7(%r4),_TIF_MCCK_PENDING # machine check pending?
@@ -232,8 +244,7 @@ __critical_start:
* are executed with interrupts enabled.
*/
- .globl system_call
-system_call:
+ENTRY(system_call)
stpt __LC_SYNC_ENTER_TIMER
sysc_saveall:
SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
@@ -395,8 +406,7 @@ sysc_tracenogo:
#
# a new process exits the kernel with ret_from_fork
#
- .globl ret_from_fork
-ret_from_fork:
+ENTRY(ret_from_fork)
lg %r13,__LC_SVC_NEW_PSW+8
lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
tm SP_PSW+1(%r15),0x01 # forking a kernel thread ?
@@ -411,8 +421,7 @@ ret_from_fork:
# kernel_execve function needs to deal with pt_regs that is not
# at the usual place
#
- .globl kernel_execve
-kernel_execve:
+ENTRY(kernel_execve)
stmg %r12,%r15,96(%r15)
lgr %r14,%r15
aghi %r15,-SP_SIZE
@@ -442,8 +451,7 @@ kernel_execve:
* Program check handler routine
*/
- .globl pgm_check_handler
-pgm_check_handler:
+ENTRY(pgm_check_handler)
/*
* First we need to check for a special case:
* Single stepping an instruction that disables the PER event mask will
@@ -465,6 +473,7 @@ pgm_check_handler:
xc SP_ILC(4,%r15),SP_ILC(%r15)
mvc SP_PSW(16,%r15),__LC_PGM_OLD_PSW
lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
+ HANDLE_SIE_INTERCEPT
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
jz pgm_no_vtime
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
@@ -472,7 +481,6 @@ pgm_check_handler:
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
LAST_BREAK
pgm_no_vtime:
- HANDLE_SIE_INTERCEPT
stg %r11,SP_ARGS(%r15)
lgf %r3,__LC_PGM_ILC # load program interruption code
lg %r4,__LC_TRANS_EXC_CODE
@@ -507,6 +515,7 @@ pgm_per_std:
CREATE_STACK_FRAME __LC_SAVE_AREA
mvc SP_PSW(16,%r15),__LC_PGM_OLD_PSW
lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
+ HANDLE_SIE_INTERCEPT
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
jz pgm_no_vtime2
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
@@ -514,7 +523,6 @@ pgm_per_std:
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
LAST_BREAK
pgm_no_vtime2:
- HANDLE_SIE_INTERCEPT
lg %r1,__TI_task(%r12)
tm SP_PSW+1(%r15),0x01 # kernel per event ?
jz kernel_per
@@ -571,14 +579,14 @@ kernel_per:
/*
* IO interrupt handler routine
*/
- .globl io_int_handler
-io_int_handler:
+ENTRY(io_int_handler)
stck __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER
SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+40
CREATE_STACK_FRAME __LC_SAVE_AREA+40
mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack
lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
+ HANDLE_SIE_INTERCEPT
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
jz io_no_vtime
UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
@@ -586,7 +594,6 @@ io_int_handler:
mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
LAST_BREAK
io_no_vtime:
- HANDLE_SIE_INTERCEPT
TRACE_IRQS_OFF
la %r2,SP_PTREGS(%r15) # address of register-save area
brasl %r14,do_IRQ # call standard irq handler
@@ -706,14 +713,14 @@ io_notify_resume:
/*
* External interrupt handler routine
*/
- .globl ext_int_handler
-ext_int_handler:
+ENTRY(ext_int_handler)
stck __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER
SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+40
CREATE_STACK_FRAME __LC_SAVE_AREA+40
mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack
lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
+ HANDLE_SIE_INTERCEPT
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
jz ext_no_vtime
UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
@@ -721,7 +728,6 @@ ext_int_handler:
mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
LAST_BREAK
ext_no_vtime:
- HANDLE_SIE_INTERCEPT
TRACE_IRQS_OFF
lghi %r1,4096
la %r2,SP_PTREGS(%r15) # address of register-save area
@@ -736,8 +742,7 @@ __critical_end:
/*
* Machine check handler routines
*/
- .globl mcck_int_handler
-mcck_int_handler:
+ENTRY(mcck_int_handler)
stck __LC_MCCK_CLOCK
la %r1,4095 # revalidate r1
spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
@@ -785,6 +790,7 @@ mcck_int_main:
lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid?
jno mcck_no_vtime # no -> no timer update
+ HANDLE_SIE_INTERCEPT
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
jz mcck_no_vtime
UPDATE_VTIME __LC_EXIT_TIMER,__LC_MCCK_ENTER_TIMER,__LC_USER_TIMER
@@ -804,7 +810,6 @@ mcck_no_vtime:
stosm __SF_EMPTY(%r15),0x04 # turn dat on
tm __TI_flags+7(%r12),_TIF_MCCK_PENDING
jno mcck_return
- HANDLE_SIE_INTERCEPT
TRACE_IRQS_OFF
brasl %r14,s390_handle_mcck
TRACE_IRQS_ON
@@ -823,8 +828,7 @@ mcck_done:
*/
#ifdef CONFIG_SMP
__CPUINIT
- .globl restart_int_handler
-restart_int_handler:
+ENTRY(restart_int_handler)
basr %r1,0
restart_base:
spt restart_vtime-restart_base(%r1)
@@ -851,8 +855,7 @@ restart_vtime:
/*
* If we do not run with SMP enabled, let the new CPU crash ...
*/
- .globl restart_int_handler
-restart_int_handler:
+ENTRY(restart_int_handler)
basr %r1,0
restart_base:
lpswe restart_crash-restart_base(%r1)
@@ -1036,6 +1039,56 @@ cleanup_io_restore_insn:
.Lcritical_end:
.quad __critical_end
+#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
+/*
+ * sie64a calling convention:
+ * %r2 pointer to sie control block
+ * %r3 guest register save area
+ */
+ENTRY(sie64a)
+ stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers
+ stg %r2,__SF_EMPTY(%r15) # save control block pointer
+ stg %r3,__SF_EMPTY+8(%r15) # save guest register save area
+ lmg %r0,%r13,0(%r3) # load guest gprs 0-13
+ lg %r14,__LC_THREAD_INFO # pointer thread_info struct
+ oi __TI_flags+6(%r14),_TIF_SIE>>8
+sie_loop:
+ lg %r14,__LC_THREAD_INFO # pointer thread_info struct
+ tm __TI_flags+7(%r14),_TIF_EXIT_SIE
+ jnz sie_exit
+ lg %r14,__SF_EMPTY(%r15) # get control block pointer
+ SPP __SF_EMPTY(%r15) # set guest id
+ sie 0(%r14)
+sie_done:
+ SPP __LC_CMF_HPP # set host id
+ lg %r14,__LC_THREAD_INFO # pointer thread_info struct
+sie_exit:
+ ni __TI_flags+6(%r14),255-(_TIF_SIE>>8)
+ lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
+ stmg %r0,%r13,0(%r14) # save guest gprs 0-13
+ lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
+ lghi %r2,0
+ br %r14
+sie_fault:
+ lg %r14,__LC_THREAD_INFO # pointer thread_info struct
+ ni __TI_flags+6(%r14),255-(_TIF_SIE>>8)
+ lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
+ stmg %r0,%r13,0(%r14) # save guest gprs 0-13
+ lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
+ lghi %r2,-EFAULT
+ br %r14
+
+ .align 8
+.Lsie_loop:
+ .quad sie_loop
+.Lsie_done:
+ .quad sie_done
+
+ .section __ex_table,"a"
+ .quad sie_loop,sie_fault
+ .previous
+#endif
+
.section .rodata, "a"
#define SYSCALL(esa,esame,emu) .long esame
.globl sys_call_table
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index fb317bf2c378..2d781bab37bb 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -22,6 +22,7 @@
*/
#include <linux/init.h>
+#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/page.h>
@@ -383,8 +384,7 @@ iplstart:
# doesn't need a builtin ipl record.
#
.org 0x800
- .globl start
-start:
+ENTRY(start)
stm %r0,%r15,0x07b0 # store registers
basr %r12,%r0
.base:
@@ -448,8 +448,7 @@ start:
# or linload or SALIPL
#
.org 0x10000
- .globl startup
-startup:
+ENTRY(startup)
basr %r13,0 # get base
.LPG0:
xc 0x200(256),0x200 # partially clear lowcore
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S
index b8f8dc126102..f21954b44dc1 100644
--- a/arch/s390/kernel/head31.S
+++ b/arch/s390/kernel/head31.S
@@ -11,13 +11,13 @@
*/
#include <linux/init.h>
+#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/page.h>
__HEAD
- .globl startup_continue
-startup_continue:
+ENTRY(startup_continue)
basr %r13,0 # get base
.LPG1:
@@ -45,7 +45,7 @@ startup_continue:
# virtual and never return ...
.align 8
.Lentry:.long 0x00080000,0x80000000 + _stext
-.Lctl: .long 0x04b50002 # cr0: various things
+.Lctl: .long 0x04b50000 # cr0: various things
.long 0 # cr1: primary space segment table
.long .Lduct # cr2: dispatchable unit control table
.long 0 # cr3: instruction authorization
@@ -78,8 +78,7 @@ startup_continue:
.Lbase_cc:
.long sched_clock_base_cc
- .globl _ehead
-_ehead:
+ENTRY(_ehead)
#ifdef CONFIG_SHARED_KERNEL
.org 0x100000 - 0x11000 # head.o ends at 0x11000
@@ -88,8 +87,8 @@ _ehead:
#
# startup-code, running in absolute addressing mode
#
- .globl _stext
-_stext: basr %r13,0 # get base
+ENTRY(_stext)
+ basr %r13,0 # get base
.LPG3:
# check control registers
stctl %c0,%c15,0(%r15)
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index cdef68717416..ae5d492b069e 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -11,13 +11,13 @@
*/
#include <linux/init.h>
+#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/page.h>
__HEAD
- .globl startup_continue
-startup_continue:
+ENTRY(startup_continue)
larl %r1,sched_clock_base_cc
mvc 0(8,%r1),__LC_LAST_UPDATE_CLOCK
larl %r13,.LPG1 # get base
@@ -46,7 +46,7 @@ startup_continue:
.align 16
.LPG1:
.Lentry:.quad 0x0000000180000000,_stext
-.Lctl: .quad 0x04350002 # cr0: various things
+.Lctl: .quad 0x04040000 # cr0: AFP registers & secondary space
.quad 0 # cr1: primary space segment table
.quad .Lduct # cr2: dispatchable unit control table
.quad 0 # cr3: instruction authorization
@@ -76,8 +76,7 @@ startup_continue:
.long 0x80000000,0,0,0 # invalid access-list entries
.endr
- .globl _ehead
-_ehead:
+ENTRY(_ehead)
#ifdef CONFIG_SHARED_KERNEL
.org 0x100000 - 0x11000 # head.o ends at 0x11000
@@ -86,8 +85,8 @@ _ehead:
#
# startup-code, running in absolute addressing mode
#
- .globl _stext
-_stext: basr %r13,0 # get base
+ENTRY(_stext)
+ basr %r13,0 # get base
.LPG3:
# check control registers
stctg %c0,%c15,0(%r15)
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index e3264f6a9720..1f4050d45f78 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -88,15 +88,6 @@ int show_interrupts(struct seq_file *p, void *v)
}
/*
- * For compatibilty only. S/390 specific setup of interrupts et al. is done
- * much later in init_channel_subsystem().
- */
-void __init init_IRQ(void)
-{
- /* nothing... */
-}
-
-/*
* Switch to the asynchronous interrupt stack for softirq execution.
*/
asmlinkage void do_softirq(void)
@@ -144,28 +135,45 @@ void init_irq_proc(void)
#endif
/*
- * ext_int_hash[index] is the start of the list for all external interrupts
- * that hash to this index. With the current set of external interrupts
- * (0x1202 external call, 0x1004 cpu timer, 0x2401 hwc console, 0x4000
- * iucv and 0x2603 pfault) this is always the first element.
+ * ext_int_hash[index] is the list head for all external interrupts that hash
+ * to this index.
*/
+static struct list_head ext_int_hash[256];
struct ext_int_info {
- struct ext_int_info *next;
ext_int_handler_t handler;
u16 code;
+ struct list_head entry;
+ struct rcu_head rcu;
};
-static struct ext_int_info *ext_int_hash[256];
+/* ext_int_hash_lock protects the handler lists for external interrupts */
+DEFINE_SPINLOCK(ext_int_hash_lock);
+
+static void __init init_external_interrupts(void)
+{
+ int idx;
+
+ for (idx = 0; idx < ARRAY_SIZE(ext_int_hash); idx++)
+ INIT_LIST_HEAD(&ext_int_hash[idx]);
+}
static inline int ext_hash(u16 code)
{
return (code + (code >> 9)) & 0xff;
}
+static void ext_int_hash_update(struct rcu_head *head)
+{
+ struct ext_int_info *p = container_of(head, struct ext_int_info, rcu);
+
+ kfree(p);
+}
+
int register_external_interrupt(u16 code, ext_int_handler_t handler)
{
struct ext_int_info *p;
+ unsigned long flags;
int index;
p = kmalloc(sizeof(*p), GFP_ATOMIC);
@@ -174,33 +182,27 @@ int register_external_interrupt(u16 code, ext_int_handler_t handler)
p->code = code;
p->handler = handler;
index = ext_hash(code);
- p->next = ext_int_hash[index];
- ext_int_hash[index] = p;
+
+ spin_lock_irqsave(&ext_int_hash_lock, flags);
+ list_add_rcu(&p->entry, &ext_int_hash[index]);
+ spin_unlock_irqrestore(&ext_int_hash_lock, flags);
return 0;
}
EXPORT_SYMBOL(register_external_interrupt);
int unregister_external_interrupt(u16 code, ext_int_handler_t handler)
{
- struct ext_int_info *p, *q;
- int index;
+ struct ext_int_info *p;
+ unsigned long flags;
+ int index = ext_hash(code);
- index = ext_hash(code);
- q = NULL;
- p = ext_int_hash[index];
- while (p) {
- if (p->code == code && p->handler == handler)
- break;
- q = p;
- p = p->next;
- }
- if (!p)
- return -ENOENT;
- if (q)
- q->next = p->next;
- else
- ext_int_hash[index] = p->next;
- kfree(p);
+ spin_lock_irqsave(&ext_int_hash_lock, flags);
+ list_for_each_entry_rcu(p, &ext_int_hash[index], entry)
+ if (p->code == code && p->handler == handler) {
+ list_del_rcu(&p->entry);
+ call_rcu(&p->rcu, ext_int_hash_update);
+ }
+ spin_unlock_irqrestore(&ext_int_hash_lock, flags);
return 0;
}
EXPORT_SYMBOL(unregister_external_interrupt);
@@ -224,15 +226,22 @@ void __irq_entry do_extint(struct pt_regs *regs, unsigned int ext_int_code,
kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
if (code != 0x1004)
__get_cpu_var(s390_idle).nohz_delay = 1;
+
index = ext_hash(code);
- for (p = ext_int_hash[index]; p; p = p->next) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(p, &ext_int_hash[index], entry)
if (likely(p->code == code))
p->handler(ext_int_code, param32, param64);
- }
+ rcu_read_unlock();
irq_exit();
set_irq_regs(old_regs);
}
+void __init init_IRQ(void)
+{
+ init_external_interrupts();
+}
+
static DEFINE_SPINLOCK(sc_irq_lock);
static int sc_irq_refcount;
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index 1e6a55795628..7e2c38ba1373 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -5,21 +5,19 @@
*
*/
+#include <linux/linkage.h>
#include <asm/asm-offsets.h>
.section .kprobes.text, "ax"
- .globl ftrace_stub
-ftrace_stub:
+ENTRY(ftrace_stub)
br %r14
- .globl _mcount
-_mcount:
+ENTRY(_mcount)
#ifdef CONFIG_DYNAMIC_FTRACE
br %r14
- .globl ftrace_caller
-ftrace_caller:
+ENTRY(ftrace_caller)
#endif
stm %r2,%r5,16(%r15)
bras %r1,2f
@@ -41,8 +39,7 @@ ftrace_caller:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
l %r2,100(%r15)
l %r3,152(%r15)
- .globl ftrace_graph_caller
-ftrace_graph_caller:
+ENTRY(ftrace_graph_caller)
# The bras instruction gets runtime patched to call prepare_ftrace_return.
# See ftrace_enable_ftrace_graph_caller. The patched instruction is:
# bras %r14,prepare_ftrace_return
@@ -56,8 +53,7 @@ ftrace_graph_caller:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- .globl return_to_handler
-return_to_handler:
+ENTRY(return_to_handler)
stm %r2,%r5,16(%r15)
st %r14,56(%r15)
lr %r0,%r15
diff --git a/arch/s390/kernel/mcount64.S b/arch/s390/kernel/mcount64.S
index e73667286ac0..f70cadec68fc 100644
--- a/arch/s390/kernel/mcount64.S
+++ b/arch/s390/kernel/mcount64.S
@@ -5,21 +5,19 @@
*
*/
+#include <linux/linkage.h>
#include <asm/asm-offsets.h>
.section .kprobes.text, "ax"
- .globl ftrace_stub
-ftrace_stub:
+ENTRY(ftrace_stub)
br %r14
- .globl _mcount
-_mcount:
+ENTRY(_mcount)
#ifdef CONFIG_DYNAMIC_FTRACE
br %r14
- .globl ftrace_caller
-ftrace_caller:
+ENTRY(ftrace_caller)
#endif
larl %r1,function_trace_stop
icm %r1,0xf,0(%r1)
@@ -37,8 +35,7 @@ ftrace_caller:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
lg %r2,168(%r15)
lg %r3,272(%r15)
- .globl ftrace_graph_caller
-ftrace_graph_caller:
+ENTRY(ftrace_graph_caller)
# The bras instruction gets runtime patched to call prepare_ftrace_return.
# See ftrace_enable_ftrace_graph_caller. The patched instruction is:
# bras %r14,prepare_ftrace_return
@@ -52,8 +49,7 @@ ftrace_graph_caller:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- .globl return_to_handler
-return_to_handler:
+ENTRY(return_to_handler)
stmg %r2,%r5,32(%r15)
lgr %r1,%r15
aghi %r15,-160
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index f7167ee4604c..dfcb3436bad0 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -45,13 +45,6 @@
#define PLT_ENTRY_SIZE 20
#endif /* CONFIG_64BIT */
-void *module_alloc(unsigned long size)
-{
- if (size == 0)
- return NULL;
- return vmalloc(size);
-}
-
/* Free memory returned from module_alloc */
void module_free(struct module *mod, void *module_region)
{
@@ -176,15 +169,6 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
return 0;
}
-int
-apply_relocate(Elf_Shdr *sechdrs, const char *strtab, unsigned int symindex,
- unsigned int relsec, struct module *me)
-{
- printk(KERN_ERR "module %s: RELOCATION unsupported\n",
- me->name);
- return -ENOEXEC;
-}
-
static int
apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
struct module *me)
@@ -409,7 +393,3 @@ int module_finalize(const Elf_Ehdr *hdr,
me->arch.syminfo = NULL;
return 0;
}
-
-void module_arch_cleanup(struct module *mod)
-{
-}
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S
index cb899d9f8505..303d961c3bb5 100644
--- a/arch/s390/kernel/reipl.S
+++ b/arch/s390/kernel/reipl.S
@@ -6,14 +6,15 @@
* Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com)
*/
+#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#
# do_reipl_asm
# Parameter: r2 = schid of reipl device
#
- .globl do_reipl_asm
-do_reipl_asm: basr %r13,0
+ENTRY(do_reipl_asm)
+ basr %r13,0
.Lpg0: lpsw .Lnewpsw-.Lpg0(%r13)
.Lpg1: # do store status of all registers
diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S
index 9eabbc90795d..78eb7cfbd3d1 100644
--- a/arch/s390/kernel/reipl64.S
+++ b/arch/s390/kernel/reipl64.S
@@ -4,6 +4,7 @@
* Denis Joseph Barrow,
*/
+#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#
@@ -11,8 +12,8 @@
# Parameter: r2 = schid of reipl device
#
- .globl do_reipl_asm
-do_reipl_asm: basr %r13,0
+ENTRY(do_reipl_asm)
+ basr %r13,0
.Lpg0: lpswe .Lnewpsw-.Lpg0(%r13)
.Lpg1: # do store status of all registers
diff --git a/arch/s390/kernel/relocate_kernel.S b/arch/s390/kernel/relocate_kernel.S
index 3b456b80bcee..c91d70aede91 100644
--- a/arch/s390/kernel/relocate_kernel.S
+++ b/arch/s390/kernel/relocate_kernel.S
@@ -8,6 +8,8 @@
*
*/
+#include <linux/linkage.h>
+
/*
* moves the new kernel to its destination...
* %r2 = pointer to first kimage_entry_t
@@ -22,8 +24,7 @@
*/
.text
- .globl relocate_kernel
- relocate_kernel:
+ENTRY(relocate_kernel)
basr %r13,0 # base address
.base:
stnsm sys_msk-.base(%r13),0xfb # disable DAT
@@ -112,6 +113,7 @@
.byte 0
.align 8
relocate_kernel_end:
+ .align 8
.globl relocate_kernel_len
relocate_kernel_len:
.quad relocate_kernel_end - relocate_kernel
diff --git a/arch/s390/kernel/relocate_kernel64.S b/arch/s390/kernel/relocate_kernel64.S
index 1f9ea2067b59..7c3ce589a7f0 100644
--- a/arch/s390/kernel/relocate_kernel64.S
+++ b/arch/s390/kernel/relocate_kernel64.S
@@ -8,6 +8,8 @@
*
*/
+#include <linux/linkage.h>
+
/*
* moves the new kernel to its destination...
* %r2 = pointer to first kimage_entry_t
@@ -23,8 +25,7 @@
*/
.text
- .globl relocate_kernel
- relocate_kernel:
+ENTRY(relocate_kernel)
basr %r13,0 # base address
.base:
stnsm sys_msk-.base(%r13),0xfb # disable DAT
@@ -115,6 +116,7 @@
.byte 0
.align 8
relocate_kernel_end:
+ .align 8
.globl relocate_kernel_len
relocate_kernel_len:
.quad relocate_kernel_end - relocate_kernel
diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c
index 656fcbb9bd83..57b536649b00 100644
--- a/arch/s390/kernel/s390_ksyms.c
+++ b/arch/s390/kernel/s390_ksyms.c
@@ -1,6 +1,10 @@
#include <linux/module.h>
+#include <linux/kvm_host.h>
#include <asm/ftrace.h>
#ifdef CONFIG_FUNCTION_TRACER
EXPORT_SYMBOL(_mcount);
#endif
+#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
+EXPORT_SYMBOL(sie64a);
+#endif
diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S
index 2e82fdd89320..95792d846bb6 100644
--- a/arch/s390/kernel/sclp.S
+++ b/arch/s390/kernel/sclp.S
@@ -8,6 +8,8 @@
*
*/
+#include <linux/linkage.h>
+
LC_EXT_NEW_PSW = 0x58 # addr of ext int handler
LC_EXT_NEW_PSW_64 = 0x1b0 # addr of ext int handler 64 bit
LC_EXT_INT_PARAM = 0x80 # addr of ext int parameter
@@ -260,8 +262,7 @@ _sclp_print:
# R2 = 0 on success, 1 on failure
#
- .globl _sclp_print_early
-_sclp_print_early:
+ENTRY(_sclp_print_early)
stm %r6,%r15,24(%r15) # save registers
ahi %r15,-96 # create stack frame
#ifdef CONFIG_64BIT
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 1d55c95f617c..a6d85c0a7f20 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -654,7 +654,8 @@ int __cpu_disable(void)
/* disable all external interrupts */
cr_parms.orvals[0] = 0;
cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 11 |
- 1 << 10 | 1 << 9 | 1 << 6 | 1 << 4);
+ 1 << 10 | 1 << 9 | 1 << 6 | 1 << 5 |
+ 1 << 4);
/* disable all I/O interrupts */
cr_parms.orvals[6] = 0;
cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 |
diff --git a/arch/s390/kernel/switch_cpu.S b/arch/s390/kernel/switch_cpu.S
index 20530dd2eab1..bfe070bc7659 100644
--- a/arch/s390/kernel/switch_cpu.S
+++ b/arch/s390/kernel/switch_cpu.S
@@ -5,6 +5,7 @@
*
*/
+#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
@@ -16,9 +17,7 @@
# %r6 - destination cpu
.section .text
- .align 4
- .globl smp_switch_to_cpu
-smp_switch_to_cpu:
+ENTRY(smp_switch_to_cpu)
stm %r6,%r15,__SF_GPRS(%r15)
lr %r1,%r15
ahi %r15,-STACK_FRAME_OVERHEAD
@@ -33,8 +32,7 @@ smp_switch_to_cpu:
brc 2,2b /* busy, try again */
3: j 3b
- .globl smp_restart_cpu
-smp_restart_cpu:
+ENTRY(smp_restart_cpu)
basr %r13,0
0: la %r1,.gprregs_addr-0b(%r13)
l %r1,0(%r1)
diff --git a/arch/s390/kernel/switch_cpu64.S b/arch/s390/kernel/switch_cpu64.S
index 5be3f43898f9..fcc42d799e41 100644
--- a/arch/s390/kernel/switch_cpu64.S
+++ b/arch/s390/kernel/switch_cpu64.S
@@ -5,6 +5,7 @@
*
*/
+#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
@@ -16,9 +17,7 @@
# %r6 - destination cpu
.section .text
- .align 4
- .globl smp_switch_to_cpu
-smp_switch_to_cpu:
+ENTRY(smp_switch_to_cpu)
stmg %r6,%r15,__SF_GPRS(%r15)
lgr %r1,%r15
aghi %r15,-STACK_FRAME_OVERHEAD
@@ -31,8 +30,7 @@ smp_switch_to_cpu:
brc 2,2b /* busy, try again */
3: j 3b
- .globl smp_restart_cpu
-smp_restart_cpu:
+ENTRY(smp_restart_cpu)
larl %r1,.gprregs
lmg %r0,%r15,0(%r1)
1: sigp %r0,%r5,__SIGP_SENSE /* Wait for calling CPU */
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S
index 1f066e46e83e..51bcdb50a230 100644
--- a/arch/s390/kernel/swsusp_asm64.S
+++ b/arch/s390/kernel/swsusp_asm64.S
@@ -7,6 +7,7 @@
* Michael Holzheu <holzheu@linux.vnet.ibm.com>
*/
+#include <linux/linkage.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/thread_info.h>
@@ -22,9 +23,7 @@
* This function runs with disabled interrupts.
*/
.section .text
- .align 4
- .globl swsusp_arch_suspend
-swsusp_arch_suspend:
+ENTRY(swsusp_arch_suspend)
stmg %r6,%r15,__SF_GPRS(%r15)
lgr %r1,%r15
aghi %r15,-STACK_FRAME_OVERHEAD
@@ -112,8 +111,7 @@ swsusp_arch_suspend:
* Then we return to the function that called swsusp_arch_suspend().
* swsusp_arch_resume() runs with disabled interrupts.
*/
- .globl swsusp_arch_resume
-swsusp_arch_resume:
+ENTRY(swsusp_arch_resume)
stmg %r6,%r15,__SF_GPRS(%r15)
lgr %r1,%r15
aghi %r15,-STACK_FRAME_OVERHEAD
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index a63d34c3611e..e9372c77cced 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -18,7 +18,7 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
-#include <linux/tracehook.h>
+#include <linux/ptrace.h>
#include <linux/timer.h>
#include <linux/mm.h>
#include <linux/smp.h>
@@ -43,14 +43,10 @@
#include <asm/debug.h>
#include "entry.h"
-pgm_check_handler_t *pgm_check_table[128];
+void (*pgm_check_table[128])(struct pt_regs *, long, unsigned long);
int show_unhandled_signals;
-extern pgm_check_handler_t do_protection_exception;
-extern pgm_check_handler_t do_dat_exception;
-extern pgm_check_handler_t do_asce_exception;
-
#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
#ifndef CONFIG_64BIT
@@ -329,10 +325,17 @@ static inline void __user *get_psw_address(struct pt_regs *regs,
void __kprobes do_per_trap(struct pt_regs *regs)
{
+ siginfo_t info;
+
if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP)
return;
- if (current->ptrace)
- force_sig(SIGTRAP, current);
+ if (!current->ptrace)
+ return;
+ info.si_signo = SIGTRAP;
+ info.si_errno = 0;
+ info.si_code = TRAP_HWBKPT;
+ info.si_addr = (void *) current->thread.per_event.address;
+ force_sig_info(SIGTRAP, &info, current);
}
static void default_trap_handler(struct pt_regs *regs, long pgm_int_code,
@@ -425,9 +428,13 @@ static void __kprobes illegal_op(struct pt_regs *regs, long pgm_int_code,
if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
return;
if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
- if (current->ptrace)
- force_sig(SIGTRAP, current);
- else
+ if (current->ptrace) {
+ info.si_signo = SIGTRAP;
+ info.si_errno = 0;
+ info.si_code = TRAP_BRKPT;
+ info.si_addr = location;
+ force_sig_info(SIGTRAP, &info, current);
+ } else
signal = SIGILL;
#ifdef CONFIG_MATHEMU
} else if (opcode[0] == 0xb3) {
@@ -489,9 +496,8 @@ static void __kprobes illegal_op(struct pt_regs *regs, long pgm_int_code,
#ifdef CONFIG_MATHEMU
-asmlinkage void specification_exception(struct pt_regs *regs,
- long pgm_int_code,
- unsigned long trans_exc_code)
+void specification_exception(struct pt_regs *regs, long pgm_int_code,
+ unsigned long trans_exc_code)
{
__u8 opcode[6];
__u16 __user *location = NULL;
@@ -648,7 +654,7 @@ static void space_switch_exception(struct pt_regs *regs, long pgm_int_code,
do_trap(pgm_int_code, SIGILL, "space switch event", regs, &info);
}
-asmlinkage void __kprobes kernel_stack_overflow(struct pt_regs * regs)
+void __kprobes kernel_stack_overflow(struct pt_regs * regs)
{
bust_spinlocks(1);
printk("Kernel stack overflow.\n");
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
index f66a1bdbb61d..a21634173a66 100644
--- a/arch/s390/kvm/Kconfig
+++ b/arch/s390/kvm/Kconfig
@@ -37,6 +37,5 @@ config KVM
# OK, it's a little counter-intuitive to do this, but it puts it neatly under
# the virtualization menu.
source drivers/vhost/Kconfig
-source drivers/virtio/Kconfig
endif # VIRTUALIZATION
diff --git a/arch/s390/kvm/Makefile b/arch/s390/kvm/Makefile
index 860d26514c08..3975722bb19d 100644
--- a/arch/s390/kvm/Makefile
+++ b/arch/s390/kvm/Makefile
@@ -10,5 +10,5 @@ common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o)
ccflags-y := -Ivirt/kvm -Iarch/s390/kvm
-kvm-objs := $(common-objs) kvm-s390.o sie64a.o intercept.o interrupt.o priv.o sigp.o diag.o
+kvm-objs := $(common-objs) kvm-s390.o intercept.o interrupt.o priv.o sigp.o diag.o
obj-$(CONFIG_KVM) += kvm.o
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
index 03c716a0f01f..c86f6ae43f76 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/gaccess.h
@@ -1,5 +1,5 @@
/*
- * gaccess.h - access guest memory
+ * access.h - access guest memory
*
* Copyright IBM Corp. 2008,2009
*
@@ -22,20 +22,13 @@ static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu,
unsigned long guestaddr)
{
unsigned long prefix = vcpu->arch.sie_block->prefix;
- unsigned long origin = vcpu->arch.sie_block->gmsor;
- unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
if (guestaddr < 2 * PAGE_SIZE)
guestaddr += prefix;
else if ((guestaddr >= prefix) && (guestaddr < prefix + 2 * PAGE_SIZE))
guestaddr -= prefix;
- if (guestaddr > memsize)
- return (void __user __force *) ERR_PTR(-EFAULT);
-
- guestaddr += origin;
-
- return (void __user *) guestaddr;
+ return (void __user *) gmap_fault(guestaddr, vcpu->arch.gmap);
}
static inline int get_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
@@ -141,11 +134,11 @@ static inline int put_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu,
unsigned long guestdest,
- const void *from, unsigned long n)
+ void *from, unsigned long n)
{
int rc;
unsigned long i;
- const u8 *data = from;
+ u8 *data = from;
for (i = 0; i < n; i++) {
rc = put_guest_u8(vcpu, guestdest++, *(data++));
@@ -155,12 +148,95 @@ static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu,
return 0;
}
+static inline int __copy_to_guest_fast(struct kvm_vcpu *vcpu,
+ unsigned long guestdest,
+ void *from, unsigned long n)
+{
+ int r;
+ void __user *uptr;
+ unsigned long size;
+
+ if (guestdest + n < guestdest)
+ return -EFAULT;
+
+ /* simple case: all within one segment table entry? */
+ if ((guestdest & PMD_MASK) == ((guestdest+n) & PMD_MASK)) {
+ uptr = (void __user *) gmap_fault(guestdest, vcpu->arch.gmap);
+
+ if (IS_ERR((void __force *) uptr))
+ return PTR_ERR((void __force *) uptr);
+
+ r = copy_to_user(uptr, from, n);
+
+ if (r)
+ r = -EFAULT;
+
+ goto out;
+ }
+
+ /* copy first segment */
+ uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
+
+ if (IS_ERR((void __force *) uptr))
+ return PTR_ERR((void __force *) uptr);
+
+ size = PMD_SIZE - (guestdest & ~PMD_MASK);
+
+ r = copy_to_user(uptr, from, size);
+
+ if (r) {
+ r = -EFAULT;
+ goto out;
+ }
+ from += size;
+ n -= size;
+ guestdest += size;
+
+ /* copy full segments */
+ while (n >= PMD_SIZE) {
+ uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
+
+ if (IS_ERR((void __force *) uptr))
+ return PTR_ERR((void __force *) uptr);
+
+ r = copy_to_user(uptr, from, PMD_SIZE);
+
+ if (r) {
+ r = -EFAULT;
+ goto out;
+ }
+ from += PMD_SIZE;
+ n -= PMD_SIZE;
+ guestdest += PMD_SIZE;
+ }
+
+ /* copy the tail segment */
+ if (n) {
+ uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
+
+ if (IS_ERR((void __force *) uptr))
+ return PTR_ERR((void __force *) uptr);
+
+ r = copy_to_user(uptr, from, n);
+
+ if (r)
+ r = -EFAULT;
+ }
+out:
+ return r;
+}
+
+static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu,
+ unsigned long guestdest,
+ void *from, unsigned long n)
+{
+ return __copy_to_guest_fast(vcpu, guestdest, from, n);
+}
+
static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest,
- const void *from, unsigned long n)
+ void *from, unsigned long n)
{
unsigned long prefix = vcpu->arch.sie_block->prefix;
- unsigned long origin = vcpu->arch.sie_block->gmsor;
- unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE))
goto slowpath;
@@ -177,15 +253,7 @@ static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest,
else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE))
guestdest -= prefix;
- if (guestdest + n > memsize)
- return -EFAULT;
-
- if (guestdest + n < guestdest)
- return -EFAULT;
-
- guestdest += origin;
-
- return copy_to_user((void __user *) guestdest, from, n);
+ return __copy_to_guest_fast(vcpu, guestdest, from, n);
slowpath:
return __copy_to_guest_slow(vcpu, guestdest, from, n);
}
@@ -206,74 +274,113 @@ static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to,
return 0;
}
-static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
- unsigned long guestsrc, unsigned long n)
+static inline int __copy_from_guest_fast(struct kvm_vcpu *vcpu, void *to,
+ unsigned long guestsrc,
+ unsigned long n)
{
- unsigned long prefix = vcpu->arch.sie_block->prefix;
- unsigned long origin = vcpu->arch.sie_block->gmsor;
- unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
+ int r;
+ void __user *uptr;
+ unsigned long size;
- if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE))
- goto slowpath;
+ if (guestsrc + n < guestsrc)
+ return -EFAULT;
- if ((guestsrc < prefix) && (guestsrc + n > prefix))
- goto slowpath;
+ /* simple case: all within one segment table entry? */
+ if ((guestsrc & PMD_MASK) == ((guestsrc+n) & PMD_MASK)) {
+ uptr = (void __user *) gmap_fault(guestsrc, vcpu->arch.gmap);
- if ((guestsrc < prefix + 2 * PAGE_SIZE)
- && (guestsrc + n > prefix + 2 * PAGE_SIZE))
- goto slowpath;
+ if (IS_ERR((void __force *) uptr))
+ return PTR_ERR((void __force *) uptr);
- if (guestsrc < 2 * PAGE_SIZE)
- guestsrc += prefix;
- else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE))
- guestsrc -= prefix;
+ r = copy_from_user(to, uptr, n);
- if (guestsrc + n > memsize)
- return -EFAULT;
+ if (r)
+ r = -EFAULT;
- if (guestsrc + n < guestsrc)
- return -EFAULT;
+ goto out;
+ }
- guestsrc += origin;
+ /* copy first segment */
+ uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
- return copy_from_user(to, (void __user *) guestsrc, n);
-slowpath:
- return __copy_from_guest_slow(vcpu, to, guestsrc, n);
-}
+ if (IS_ERR((void __force *) uptr))
+ return PTR_ERR((void __force *) uptr);
-static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu,
- unsigned long guestdest,
- const void *from, unsigned long n)
-{
- unsigned long origin = vcpu->arch.sie_block->gmsor;
- unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
+ size = PMD_SIZE - (guestsrc & ~PMD_MASK);
- if (guestdest + n > memsize)
- return -EFAULT;
+ r = copy_from_user(to, uptr, size);
- if (guestdest + n < guestdest)
- return -EFAULT;
+ if (r) {
+ r = -EFAULT;
+ goto out;
+ }
+ to += size;
+ n -= size;
+ guestsrc += size;
+
+ /* copy full segments */
+ while (n >= PMD_SIZE) {
+ uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
+
+ if (IS_ERR((void __force *) uptr))
+ return PTR_ERR((void __force *) uptr);
+
+ r = copy_from_user(to, uptr, PMD_SIZE);
+
+ if (r) {
+ r = -EFAULT;
+ goto out;
+ }
+ to += PMD_SIZE;
+ n -= PMD_SIZE;
+ guestsrc += PMD_SIZE;
+ }
+
+ /* copy the tail segment */
+ if (n) {
+ uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
- guestdest += origin;
+ if (IS_ERR((void __force *) uptr))
+ return PTR_ERR((void __force *) uptr);
- return copy_to_user((void __user *) guestdest, from, n);
+ r = copy_from_user(to, uptr, n);
+
+ if (r)
+ r = -EFAULT;
+ }
+out:
+ return r;
}
static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to,
unsigned long guestsrc,
unsigned long n)
{
- unsigned long origin = vcpu->arch.sie_block->gmsor;
- unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
+ return __copy_from_guest_fast(vcpu, to, guestsrc, n);
+}
- if (guestsrc + n > memsize)
- return -EFAULT;
+static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
+ unsigned long guestsrc, unsigned long n)
+{
+ unsigned long prefix = vcpu->arch.sie_block->prefix;
- if (guestsrc + n < guestsrc)
- return -EFAULT;
+ if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE))
+ goto slowpath;
- guestsrc += origin;
+ if ((guestsrc < prefix) && (guestsrc + n > prefix))
+ goto slowpath;
+
+ if ((guestsrc < prefix + 2 * PAGE_SIZE)
+ && (guestsrc + n > prefix + 2 * PAGE_SIZE))
+ goto slowpath;
+
+ if (guestsrc < 2 * PAGE_SIZE)
+ guestsrc += prefix;
+ else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE))
+ guestsrc -= prefix;
- return copy_from_user(to, (void __user *) guestsrc, n);
+ return __copy_from_guest_fast(vcpu, to, guestsrc, n);
+slowpath:
+ return __copy_from_guest_slow(vcpu, to, guestsrc, n);
}
#endif
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index f7b6df45d8be..c7c51898984e 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -105,6 +105,7 @@ static intercept_handler_t instruction_handlers[256] = {
[0xae] = kvm_s390_handle_sigp,
[0xb2] = kvm_s390_handle_b2,
[0xb7] = handle_lctl,
+ [0xe5] = kvm_s390_handle_e5,
[0xeb] = handle_lctlg,
};
@@ -159,22 +160,42 @@ static int handle_stop(struct kvm_vcpu *vcpu)
static int handle_validity(struct kvm_vcpu *vcpu)
{
+ unsigned long vmaddr;
int viwhy = vcpu->arch.sie_block->ipb >> 16;
int rc;
vcpu->stat.exit_validity++;
- if ((viwhy == 0x37) && (vcpu->arch.sie_block->prefix
- <= kvm_s390_vcpu_get_memsize(vcpu) - 2*PAGE_SIZE)) {
- rc = fault_in_pages_writeable((char __user *)
- vcpu->arch.sie_block->gmsor +
- vcpu->arch.sie_block->prefix,
- 2*PAGE_SIZE);
- if (rc)
+ if (viwhy == 0x37) {
+ vmaddr = gmap_fault(vcpu->arch.sie_block->prefix,
+ vcpu->arch.gmap);
+ if (IS_ERR_VALUE(vmaddr)) {
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+ rc = fault_in_pages_writeable((char __user *) vmaddr,
+ PAGE_SIZE);
+ if (rc) {
+ /* user will receive sigsegv, exit to user */
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+ vmaddr = gmap_fault(vcpu->arch.sie_block->prefix + PAGE_SIZE,
+ vcpu->arch.gmap);
+ if (IS_ERR_VALUE(vmaddr)) {
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+ rc = fault_in_pages_writeable((char __user *) vmaddr,
+ PAGE_SIZE);
+ if (rc) {
/* user will receive sigsegv, exit to user */
rc = -EOPNOTSUPP;
+ goto out;
+ }
} else
rc = -EOPNOTSUPP;
+out:
if (rc)
VCPU_EVENT(vcpu, 2, "unhandled validity intercept code %d",
viwhy);
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 35c21bf910c5..c9aeb4b4d0b8 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -128,6 +128,10 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
if (rc == -EFAULT)
exception = 1;
+ rc = put_guest_u16(vcpu, __LC_CPU_ADDRESS, inti->emerg.code);
+ if (rc == -EFAULT)
+ exception = 1;
+
rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
&vcpu->arch.sie_block->gpsw, sizeof(psw_t));
if (rc == -EFAULT)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 67345ae7ce8d..f17296e4fc89 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -62,6 +62,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "instruction_chsc", VCPU_STAT(instruction_chsc) },
{ "instruction_stsi", VCPU_STAT(instruction_stsi) },
{ "instruction_stfl", VCPU_STAT(instruction_stfl) },
+ { "instruction_tprot", VCPU_STAT(instruction_tprot) },
{ "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
{ "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
@@ -189,7 +190,13 @@ int kvm_arch_init_vm(struct kvm *kvm)
debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
VM_EVENT(kvm, 3, "%s", "vm created");
+ kvm->arch.gmap = gmap_alloc(current->mm);
+ if (!kvm->arch.gmap)
+ goto out_nogmap;
+
return 0;
+out_nogmap:
+ debug_unregister(kvm->arch.dbf);
out_nodbf:
free_page((unsigned long)(kvm->arch.sca));
out_err:
@@ -234,11 +241,13 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kvm_free_vcpus(kvm);
free_page((unsigned long)(kvm->arch.sca));
debug_unregister(kvm->arch.dbf);
+ gmap_free(kvm->arch.gmap);
}
/* Section: vcpu related */
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
+ vcpu->arch.gmap = vcpu->kvm->arch.gmap;
return 0;
}
@@ -284,8 +293,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
- atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
- set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests);
+ atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | CPUSTAT_SM);
vcpu->arch.sie_block->ecb = 6;
vcpu->arch.sie_block->eca = 0xC1002001U;
vcpu->arch.sie_block->fac = (int) (long) facilities;
@@ -453,6 +461,7 @@ static void __vcpu_run(struct kvm_vcpu *vcpu)
local_irq_disable();
kvm_guest_enter();
local_irq_enable();
+ gmap_enable(vcpu->arch.gmap);
VCPU_EVENT(vcpu, 6, "entering sie flags %x",
atomic_read(&vcpu->arch.sie_block->cpuflags));
if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
@@ -461,6 +470,7 @@ static void __vcpu_run(struct kvm_vcpu *vcpu)
}
VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
vcpu->arch.sie_block->icptcode);
+ gmap_disable(vcpu->arch.gmap);
local_irq_disable();
kvm_guest_exit();
local_irq_enable();
@@ -474,17 +484,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
sigset_t sigsaved;
rerun_vcpu:
- if (vcpu->requests)
- if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
- kvm_s390_vcpu_set_mem(vcpu);
-
- /* verify, that memory has been registered */
- if (!vcpu->arch.sie_block->gmslm) {
- vcpu_put(vcpu);
- VCPU_EVENT(vcpu, 3, "%s", "no memory registered to run vcpu");
- return -EINVAL;
- }
-
if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
@@ -545,7 +544,7 @@ rerun_vcpu:
return rc;
}
-static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
+static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
unsigned long n, int prefix)
{
if (prefix)
@@ -562,7 +561,7 @@ static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
*/
int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
{
- const unsigned char archmode = 1;
+ unsigned char archmode = 1;
int prefix;
if (addr == KVM_S390_STORE_STATUS_NOADDR) {
@@ -680,10 +679,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
if (mem->guest_phys_addr)
return -EINVAL;
- if (mem->userspace_addr & (PAGE_SIZE - 1))
+ if (mem->userspace_addr & 0xffffful)
return -EINVAL;
- if (mem->memory_size & (PAGE_SIZE - 1))
+ if (mem->memory_size & 0xffffful)
return -EINVAL;
if (!user_alloc)
@@ -697,15 +696,14 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_memory_slot old,
int user_alloc)
{
- int i;
- struct kvm_vcpu *vcpu;
+ int rc;
- /* request update of sie control block for all available vcpus */
- kvm_for_each_vcpu(i, vcpu, kvm) {
- if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
- continue;
- kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
- }
+
+ rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
+ mem->guest_phys_addr, mem->memory_size);
+ if (rc)
+ printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
+ return;
}
void kvm_arch_flush_shadow(struct kvm *kvm)
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index a7b7586626db..99b0b7597115 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -58,35 +58,9 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code);
int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action);
-static inline long kvm_s390_vcpu_get_memsize(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.sie_block->gmslm
- - vcpu->arch.sie_block->gmsor
- - VIRTIODESCSPACE + 1ul;
-}
-
-static inline void kvm_s390_vcpu_set_mem(struct kvm_vcpu *vcpu)
-{
- int idx;
- struct kvm_memory_slot *mem;
- struct kvm_memslots *memslots;
-
- idx = srcu_read_lock(&vcpu->kvm->srcu);
- memslots = kvm_memslots(vcpu->kvm);
-
- mem = &memslots->memslots[0];
-
- vcpu->arch.sie_block->gmsor = mem->userspace_addr;
- vcpu->arch.sie_block->gmslm =
- mem->userspace_addr +
- (mem->npages << PAGE_SHIFT) +
- VIRTIODESCSPACE - 1ul;
-
- srcu_read_unlock(&vcpu->kvm->srcu, idx);
-}
-
/* implemented in priv.c */
int kvm_s390_handle_b2(struct kvm_vcpu *vcpu);
+int kvm_s390_handle_e5(struct kvm_vcpu *vcpu);
/* implemented in sigp.c */
int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 73c47bd95db3..391626361084 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -326,3 +326,52 @@ int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
}
return -EOPNOTSUPP;
}
+
+static int handle_tprot(struct kvm_vcpu *vcpu)
+{
+ int base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
+ int disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16;
+ int base2 = (vcpu->arch.sie_block->ipb & 0xf000) >> 12;
+ int disp2 = vcpu->arch.sie_block->ipb & 0x0fff;
+ u64 address1 = disp1 + base1 ? vcpu->arch.guest_gprs[base1] : 0;
+ u64 address2 = disp2 + base2 ? vcpu->arch.guest_gprs[base2] : 0;
+ struct vm_area_struct *vma;
+
+ vcpu->stat.instruction_tprot++;
+
+ /* we only handle the Linux memory detection case:
+ * access key == 0
+ * guest DAT == off
+ * everything else goes to userspace. */
+ if (address2 & 0xf0)
+ return -EOPNOTSUPP;
+ if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
+ return -EOPNOTSUPP;
+
+
+ down_read(&current->mm->mmap_sem);
+ vma = find_vma(current->mm,
+ (unsigned long) __guestaddr_to_user(vcpu, address1));
+ if (!vma) {
+ up_read(&current->mm->mmap_sem);
+ return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ }
+
+ vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
+ if (!(vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_READ))
+ vcpu->arch.sie_block->gpsw.mask |= (1ul << 44);
+ if (!(vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_READ))
+ vcpu->arch.sie_block->gpsw.mask |= (2ul << 44);
+
+ up_read(&current->mm->mmap_sem);
+ return 0;
+}
+
+int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
+{
+ /* For e5xx... instructions we only handle TPROT */
+ if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
+ return handle_tprot(vcpu);
+ return -EOPNOTSUPP;
+}
+
diff --git a/arch/s390/kvm/sie64a.S b/arch/s390/kvm/sie64a.S
deleted file mode 100644
index 5faa1b1b23fa..000000000000
--- a/arch/s390/kvm/sie64a.S
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * sie64a.S - low level sie call
- *
- * Copyright IBM Corp. 2008,2010
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
- * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
- * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
- */
-
-#include <linux/errno.h>
-#include <asm/asm-offsets.h>
-#include <asm/setup.h>
-#include <asm/asm-offsets.h>
-#include <asm/ptrace.h>
-#include <asm/thread_info.h>
-
-_TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING)
-
-/*
- * offsets into stackframe
- * SP_ = offsets into stack sie64 is called with
- * SPI_ = offsets into irq stack
- */
-SP_GREGS = __SF_EMPTY
-SP_HOOK = __SF_EMPTY+8
-SP_GPP = __SF_EMPTY+16
-SPI_PSW = STACK_FRAME_OVERHEAD + __PT_PSW
-
-
- .macro SPP newpp
- tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_SPP
- jz 0f
- .insn s,0xb2800000,\newpp
-0:
- .endm
-
-sie_irq_handler:
- SPP __LC_CMF_HPP # set host id
- larl %r2,sie_inst
- clg %r2,SPI_PSW+8(0,%r15) # intercepted sie
- jne 1f
- xc __LC_SIE_HOOK(8),__LC_SIE_HOOK
- lg %r2,__LC_THREAD_INFO # pointer thread_info struct
- tm __TI_flags+7(%r2),_TIF_EXIT_SIE
- jz 0f
- larl %r2,sie_exit # work pending, leave sie
- stg %r2,SPI_PSW+8(0,%r15)
- br %r14
-0: larl %r2,sie_reenter # re-enter with guest id
- stg %r2,SPI_PSW+8(0,%r15)
-1: br %r14
-
-/*
- * sie64a calling convention:
- * %r2 pointer to sie control block
- * %r3 guest register save area
- */
- .globl sie64a
-sie64a:
- stg %r3,SP_GREGS(%r15) # save guest register save area
- stmg %r6,%r14,__SF_GPRS(%r15) # save registers on entry
- lgr %r14,%r2 # pointer to sie control block
- larl %r5,sie_irq_handler
- stg %r2,SP_GPP(%r15)
- stg %r5,SP_HOOK(%r15) # save hook target
- lmg %r0,%r13,0(%r3) # load guest gprs 0-13
-sie_reenter:
- mvc __LC_SIE_HOOK(8),SP_HOOK(%r15)
- SPP SP_GPP(%r15) # set guest id
-sie_inst:
- sie 0(%r14)
- xc __LC_SIE_HOOK(8),__LC_SIE_HOOK
- SPP __LC_CMF_HPP # set host id
-sie_exit:
- lg %r14,SP_GREGS(%r15)
- stmg %r0,%r13,0(%r14) # save guest gprs 0-13
- lghi %r2,0
- lmg %r6,%r14,__SF_GPRS(%r15)
- br %r14
-
-sie_err:
- xc __LC_SIE_HOOK(8),__LC_SIE_HOOK
- SPP __LC_CMF_HPP # set host id
- lg %r14,SP_GREGS(%r15)
- stmg %r0,%r13,0(%r14) # save guest gprs 0-13
- lghi %r2,-EFAULT
- lmg %r6,%r14,__SF_GPRS(%r15)
- br %r14
-
- .section __ex_table,"a"
- .quad sie_inst,sie_err
- .quad sie_exit,sie_err
- .quad sie_reenter,sie_err
- .previous
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 702276f5e2fa..d6a50c1fb2e6 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -189,10 +189,8 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
/* make sure that the new value is valid memory */
address = address & 0x7fffe000u;
- if ((copy_from_user(&tmp, (void __user *)
- (address + vcpu->arch.sie_block->gmsor) , 1)) ||
- (copy_from_user(&tmp, (void __user *)(address +
- vcpu->arch.sie_block->gmsor + PAGE_SIZE), 1))) {
+ if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
+ copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)) {
*reg |= SIGP_STAT_INVALID_PARAMETER;
return 1; /* invalid parameter */
}
diff --git a/arch/s390/lib/qrnnd.S b/arch/s390/lib/qrnnd.S
index eb1df632e749..d321329130ec 100644
--- a/arch/s390/lib/qrnnd.S
+++ b/arch/s390/lib/qrnnd.S
@@ -1,5 +1,7 @@
# S/390 __udiv_qrnnd
+#include <linux/linkage.h>
+
# r2 : &__r
# r3 : upper half of 64 bit word n
# r4 : lower half of 64 bit word n
@@ -8,8 +10,7 @@
# the quotient q is to be returned
.text
- .globl __udiv_qrnnd
-__udiv_qrnnd:
+ENTRY(__udiv_qrnnd)
st %r2,24(%r15) # store pointer to reminder for later
lr %r0,%r3 # reload n
lr %r1,%r4
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 095f782a5512..9564fc779b27 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -303,9 +303,24 @@ static inline int do_exception(struct pt_regs *regs, int access,
flags = FAULT_FLAG_ALLOW_RETRY;
if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
flags |= FAULT_FLAG_WRITE;
-retry:
down_read(&mm->mmap_sem);
+#ifdef CONFIG_PGSTE
+ if (test_tsk_thread_flag(current, TIF_SIE) && S390_lowcore.gmap) {
+ address = gmap_fault(address,
+ (struct gmap *) S390_lowcore.gmap);
+ if (address == -EFAULT) {
+ fault = VM_FAULT_BADMAP;
+ goto out_up;
+ }
+ if (address == -ENOMEM) {
+ fault = VM_FAULT_OOM;
+ goto out_up;
+ }
+ }
+#endif
+
+retry:
fault = VM_FAULT_BADMAP;
vma = find_vma(mm, address);
if (!vma)
@@ -356,6 +371,7 @@ retry:
/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
* of starvation. */
flags &= ~FAULT_FLAG_ALLOW_RETRY;
+ down_read(&mm->mmap_sem);
goto retry;
}
}
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index a4d856db9154..597bb2d27c3c 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -35,7 +35,7 @@ int arch_prepare_hugepage(struct page *page)
if (MACHINE_HAS_HPAGE)
return 0;
- ptep = (pte_t *) pte_alloc_one(&init_mm, address);
+ ptep = (pte_t *) pte_alloc_one(&init_mm, addr);
if (!ptep)
return -ENOMEM;
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 37a23c223705..2adb23938a7f 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/quicklist.h>
#include <linux/rcupdate.h>
+#include <linux/slab.h>
#include <asm/system.h>
#include <asm/pgtable.h>
@@ -133,30 +134,374 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
}
#endif
-static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
+#ifdef CONFIG_PGSTE
+
+/**
+ * gmap_alloc - allocate a guest address space
+ * @mm: pointer to the parent mm_struct
+ *
+ * Returns a guest address space structure.
+ */
+struct gmap *gmap_alloc(struct mm_struct *mm)
{
- unsigned int old, new;
+ struct gmap *gmap;
+ struct page *page;
+ unsigned long *table;
- do {
- old = atomic_read(v);
- new = old ^ bits;
- } while (atomic_cmpxchg(v, old, new) != old);
- return new;
+ gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
+ if (!gmap)
+ goto out;
+ INIT_LIST_HEAD(&gmap->crst_list);
+ gmap->mm = mm;
+ page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
+ if (!page)
+ goto out_free;
+ list_add(&page->lru, &gmap->crst_list);
+ table = (unsigned long *) page_to_phys(page);
+ crst_table_init(table, _REGION1_ENTRY_EMPTY);
+ gmap->table = table;
+ list_add(&gmap->list, &mm->context.gmap_list);
+ return gmap;
+
+out_free:
+ kfree(gmap);
+out:
+ return NULL;
}
+EXPORT_SYMBOL_GPL(gmap_alloc);
-/*
- * page table entry allocation/free routines.
+static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
+{
+ struct gmap_pgtable *mp;
+ struct gmap_rmap *rmap;
+ struct page *page;
+
+ if (*table & _SEGMENT_ENTRY_INV)
+ return 0;
+ page = pfn_to_page(*table >> PAGE_SHIFT);
+ mp = (struct gmap_pgtable *) page->index;
+ list_for_each_entry(rmap, &mp->mapper, list) {
+ if (rmap->entry != table)
+ continue;
+ list_del(&rmap->list);
+ kfree(rmap);
+ break;
+ }
+ *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
+ return 1;
+}
+
+static void gmap_flush_tlb(struct gmap *gmap)
+{
+ if (MACHINE_HAS_IDTE)
+ __tlb_flush_idte((unsigned long) gmap->table |
+ _ASCE_TYPE_REGION1);
+ else
+ __tlb_flush_global();
+}
+
+/**
+ * gmap_free - free a guest address space
+ * @gmap: pointer to the guest address space structure
*/
-#ifdef CONFIG_PGSTE
-static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm)
+void gmap_free(struct gmap *gmap)
+{
+ struct page *page, *next;
+ unsigned long *table;
+ int i;
+
+
+ /* Flush tlb. */
+ if (MACHINE_HAS_IDTE)
+ __tlb_flush_idte((unsigned long) gmap->table |
+ _ASCE_TYPE_REGION1);
+ else
+ __tlb_flush_global();
+
+ /* Free all segment & region tables. */
+ down_read(&gmap->mm->mmap_sem);
+ list_for_each_entry_safe(page, next, &gmap->crst_list, lru) {
+ table = (unsigned long *) page_to_phys(page);
+ if ((*table & _REGION_ENTRY_TYPE_MASK) == 0)
+ /* Remove gmap rmap structures for segment table. */
+ for (i = 0; i < PTRS_PER_PMD; i++, table++)
+ gmap_unlink_segment(gmap, table);
+ __free_pages(page, ALLOC_ORDER);
+ }
+ up_read(&gmap->mm->mmap_sem);
+ list_del(&gmap->list);
+ kfree(gmap);
+}
+EXPORT_SYMBOL_GPL(gmap_free);
+
+/**
+ * gmap_enable - switch primary space to the guest address space
+ * @gmap: pointer to the guest address space structure
+ */
+void gmap_enable(struct gmap *gmap)
+{
+ /* Load primary space page table origin. */
+ S390_lowcore.user_asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
+ _ASCE_USER_BITS | __pa(gmap->table);
+ asm volatile("lctlg 1,1,%0\n" : : "m" (S390_lowcore.user_asce) );
+ S390_lowcore.gmap = (unsigned long) gmap;
+}
+EXPORT_SYMBOL_GPL(gmap_enable);
+
+/**
+ * gmap_disable - switch back to the standard primary address space
+ * @gmap: pointer to the guest address space structure
+ */
+void gmap_disable(struct gmap *gmap)
+{
+ /* Load primary space page table origin. */
+ S390_lowcore.user_asce =
+ gmap->mm->context.asce_bits | __pa(gmap->mm->pgd);
+ asm volatile("lctlg 1,1,%0\n" : : "m" (S390_lowcore.user_asce) );
+ S390_lowcore.gmap = 0UL;
+}
+EXPORT_SYMBOL_GPL(gmap_disable);
+
+static int gmap_alloc_table(struct gmap *gmap,
+ unsigned long *table, unsigned long init)
+{
+ struct page *page;
+ unsigned long *new;
+
+ page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
+ if (!page)
+ return -ENOMEM;
+ new = (unsigned long *) page_to_phys(page);
+ crst_table_init(new, init);
+ down_read(&gmap->mm->mmap_sem);
+ if (*table & _REGION_ENTRY_INV) {
+ list_add(&page->lru, &gmap->crst_list);
+ *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
+ (*table & _REGION_ENTRY_TYPE_MASK);
+ } else
+ __free_pages(page, ALLOC_ORDER);
+ up_read(&gmap->mm->mmap_sem);
+ return 0;
+}
+
+/**
+ * gmap_unmap_segment - unmap segment from the guest address space
+ * @gmap: pointer to the guest address space structure
+ * @addr: address in the guest address space
+ * @len: length of the memory area to unmap
+ *
+ * Returns 0 if the unmap succeded, -EINVAL if not.
+ */
+int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
+{
+ unsigned long *table;
+ unsigned long off;
+ int flush;
+
+ if ((to | len) & (PMD_SIZE - 1))
+ return -EINVAL;
+ if (len == 0 || to + len < to)
+ return -EINVAL;
+
+ flush = 0;
+ down_read(&gmap->mm->mmap_sem);
+ for (off = 0; off < len; off += PMD_SIZE) {
+ /* Walk the guest addr space page table */
+ table = gmap->table + (((to + off) >> 53) & 0x7ff);
+ if (*table & _REGION_ENTRY_INV)
+ return 0;
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ table = table + (((to + off) >> 42) & 0x7ff);
+ if (*table & _REGION_ENTRY_INV)
+ return 0;
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ table = table + (((to + off) >> 31) & 0x7ff);
+ if (*table & _REGION_ENTRY_INV)
+ return 0;
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ table = table + (((to + off) >> 20) & 0x7ff);
+
+ /* Clear segment table entry in guest address space. */
+ flush |= gmap_unlink_segment(gmap, table);
+ *table = _SEGMENT_ENTRY_INV;
+ }
+ up_read(&gmap->mm->mmap_sem);
+ if (flush)
+ gmap_flush_tlb(gmap);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gmap_unmap_segment);
+
+/**
+ * gmap_mmap_segment - map a segment to the guest address space
+ * @gmap: pointer to the guest address space structure
+ * @from: source address in the parent address space
+ * @to: target address in the guest address space
+ *
+ * Returns 0 if the mmap succeded, -EINVAL or -ENOMEM if not.
+ */
+int gmap_map_segment(struct gmap *gmap, unsigned long from,
+ unsigned long to, unsigned long len)
+{
+ unsigned long *table;
+ unsigned long off;
+ int flush;
+
+ if ((from | to | len) & (PMD_SIZE - 1))
+ return -EINVAL;
+ if (len == 0 || from + len > PGDIR_SIZE ||
+ from + len < from || to + len < to)
+ return -EINVAL;
+
+ flush = 0;
+ down_read(&gmap->mm->mmap_sem);
+ for (off = 0; off < len; off += PMD_SIZE) {
+ /* Walk the gmap address space page table */
+ table = gmap->table + (((to + off) >> 53) & 0x7ff);
+ if ((*table & _REGION_ENTRY_INV) &&
+ gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY))
+ goto out_unmap;
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ table = table + (((to + off) >> 42) & 0x7ff);
+ if ((*table & _REGION_ENTRY_INV) &&
+ gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY))
+ goto out_unmap;
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ table = table + (((to + off) >> 31) & 0x7ff);
+ if ((*table & _REGION_ENTRY_INV) &&
+ gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY))
+ goto out_unmap;
+ table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN);
+ table = table + (((to + off) >> 20) & 0x7ff);
+
+ /* Store 'from' address in an invalid segment table entry. */
+ flush |= gmap_unlink_segment(gmap, table);
+ *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off);
+ }
+ up_read(&gmap->mm->mmap_sem);
+ if (flush)
+ gmap_flush_tlb(gmap);
+ return 0;
+
+out_unmap:
+ up_read(&gmap->mm->mmap_sem);
+ gmap_unmap_segment(gmap, to, len);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(gmap_map_segment);
+
+unsigned long gmap_fault(unsigned long address, struct gmap *gmap)
+{
+ unsigned long *table, vmaddr, segment;
+ struct mm_struct *mm;
+ struct gmap_pgtable *mp;
+ struct gmap_rmap *rmap;
+ struct vm_area_struct *vma;
+ struct page *page;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+
+ current->thread.gmap_addr = address;
+ mm = gmap->mm;
+ /* Walk the gmap address space page table */
+ table = gmap->table + ((address >> 53) & 0x7ff);
+ if (unlikely(*table & _REGION_ENTRY_INV))
+ return -EFAULT;
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ table = table + ((address >> 42) & 0x7ff);
+ if (unlikely(*table & _REGION_ENTRY_INV))
+ return -EFAULT;
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ table = table + ((address >> 31) & 0x7ff);
+ if (unlikely(*table & _REGION_ENTRY_INV))
+ return -EFAULT;
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ table = table + ((address >> 20) & 0x7ff);
+
+ /* Convert the gmap address to an mm address. */
+ segment = *table;
+ if (likely(!(segment & _SEGMENT_ENTRY_INV))) {
+ page = pfn_to_page(segment >> PAGE_SHIFT);
+ mp = (struct gmap_pgtable *) page->index;
+ return mp->vmaddr | (address & ~PMD_MASK);
+ } else if (segment & _SEGMENT_ENTRY_RO) {
+ vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
+ vma = find_vma(mm, vmaddr);
+ if (!vma || vma->vm_start > vmaddr)
+ return -EFAULT;
+
+ /* Walk the parent mm page table */
+ pgd = pgd_offset(mm, vmaddr);
+ pud = pud_alloc(mm, pgd, vmaddr);
+ if (!pud)
+ return -ENOMEM;
+ pmd = pmd_alloc(mm, pud, vmaddr);
+ if (!pmd)
+ return -ENOMEM;
+ if (!pmd_present(*pmd) &&
+ __pte_alloc(mm, vma, pmd, vmaddr))
+ return -ENOMEM;
+ /* pmd now points to a valid segment table entry. */
+ rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
+ if (!rmap)
+ return -ENOMEM;
+ /* Link gmap segment table entry location to page table. */
+ page = pmd_page(*pmd);
+ mp = (struct gmap_pgtable *) page->index;
+ rmap->entry = table;
+ list_add(&rmap->list, &mp->mapper);
+ /* Set gmap segment table entry to page table. */
+ *table = pmd_val(*pmd) & PAGE_MASK;
+ return vmaddr | (address & ~PMD_MASK);
+ }
+ return -EFAULT;
+
+}
+EXPORT_SYMBOL_GPL(gmap_fault);
+
+void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table)
+{
+ struct gmap_rmap *rmap, *next;
+ struct gmap_pgtable *mp;
+ struct page *page;
+ int flush;
+
+ flush = 0;
+ spin_lock(&mm->page_table_lock);
+ page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
+ mp = (struct gmap_pgtable *) page->index;
+ list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
+ *rmap->entry =
+ _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
+ list_del(&rmap->list);
+ kfree(rmap);
+ flush = 1;
+ }
+ spin_unlock(&mm->page_table_lock);
+ if (flush)
+ __tlb_flush_global();
+}
+
+static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
+ unsigned long vmaddr)
{
struct page *page;
unsigned long *table;
+ struct gmap_pgtable *mp;
page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
if (!page)
return NULL;
+ mp = kmalloc(sizeof(*mp), GFP_KERNEL|__GFP_REPEAT);
+ if (!mp) {
+ __free_page(page);
+ return NULL;
+ }
pgtable_page_ctor(page);
+ mp->vmaddr = vmaddr & PMD_MASK;
+ INIT_LIST_HEAD(&mp->mapper);
+ page->index = (unsigned long) mp;
atomic_set(&page->_mapcount, 3);
table = (unsigned long *) page_to_phys(page);
clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
@@ -167,24 +512,57 @@ static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm)
static inline void page_table_free_pgste(unsigned long *table)
{
struct page *page;
+ struct gmap_pgtable *mp;
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
+ mp = (struct gmap_pgtable *) page->index;
+ BUG_ON(!list_empty(&mp->mapper));
pgtable_page_ctor(page);
atomic_set(&page->_mapcount, -1);
+ kfree(mp);
__free_page(page);
}
-#endif
-unsigned long *page_table_alloc(struct mm_struct *mm)
+#else /* CONFIG_PGSTE */
+
+static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
+ unsigned long vmaddr)
+{
+}
+
+static inline void page_table_free_pgste(unsigned long *table)
+{
+}
+
+static inline void gmap_unmap_notifier(struct mm_struct *mm,
+ unsigned long *table)
+{
+}
+
+#endif /* CONFIG_PGSTE */
+
+static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
+{
+ unsigned int old, new;
+
+ do {
+ old = atomic_read(v);
+ new = old ^ bits;
+ } while (atomic_cmpxchg(v, old, new) != old);
+ return new;
+}
+
+/*
+ * page table entry allocation/free routines.
+ */
+unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)
{
struct page *page;
unsigned long *table;
unsigned int mask, bit;
-#ifdef CONFIG_PGSTE
if (mm_has_pgste(mm))
- return page_table_alloc_pgste(mm);
-#endif
+ return page_table_alloc_pgste(mm, vmaddr);
/* Allocate fragments of a 4K page as 1K/2K page table */
spin_lock_bh(&mm->context.list_lock);
mask = FRAG_MASK;
@@ -222,10 +600,10 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
struct page *page;
unsigned int bit, mask;
-#ifdef CONFIG_PGSTE
- if (mm_has_pgste(mm))
+ if (mm_has_pgste(mm)) {
+ gmap_unmap_notifier(mm, table);
return page_table_free_pgste(table);
-#endif
+ }
/* Free 1K/2K page table fragment of a 4K page */
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
@@ -249,10 +627,8 @@ static void __page_table_free_rcu(void *table, unsigned bit)
{
struct page *page;
-#ifdef CONFIG_PGSTE
if (bit == FRAG_MASK)
return page_table_free_pgste(table);
-#endif
/* Free 1K/2K page table fragment of a 4K page */
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
if (atomic_xor_bits(&page->_mapcount, bit) == 0) {
@@ -269,13 +645,12 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
unsigned int bit, mask;
mm = tlb->mm;
-#ifdef CONFIG_PGSTE
if (mm_has_pgste(mm)) {
+ gmap_unmap_notifier(mm, table);
table = (unsigned long *) (__pa(table) | FRAG_MASK);
tlb_remove_table(tlb, table);
return;
}
-#endif
bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
spin_lock_bh(&mm->context.list_lock);
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 8c1970d1dd91..781ff5169560 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -61,12 +61,12 @@ static inline pmd_t *vmem_pmd_alloc(void)
return pmd;
}
-static pte_t __ref *vmem_pte_alloc(void)
+static pte_t __ref *vmem_pte_alloc(unsigned long address)
{
pte_t *pte;
if (slab_is_available())
- pte = (pte_t *) page_table_alloc(&init_mm);
+ pte = (pte_t *) page_table_alloc(&init_mm, address);
else
pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
if (!pte)
@@ -120,7 +120,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
}
#endif
if (pmd_none(*pm_dir)) {
- pt_dir = vmem_pte_alloc();
+ pt_dir = vmem_pte_alloc(address);
if (!pt_dir)
goto out;
pmd_populate(&init_mm, pm_dir, pt_dir);
@@ -205,7 +205,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
pm_dir = pmd_offset(pu_dir, address);
if (pmd_none(*pm_dir)) {
- pt_dir = vmem_pte_alloc();
+ pt_dir = vmem_pte_alloc(address);
if (!pt_dir)
goto out;
pmd_populate(&init_mm, pm_dir, pt_dir);
diff --git a/arch/score/kernel/module.c b/arch/score/kernel/module.c
index 4de8d47becd3..469e3b64e2f2 100644
--- a/arch/score/kernel/module.c
+++ b/arch/score/kernel/module.c
@@ -27,23 +27,6 @@
#include <linux/module.h>
#include <linux/vmalloc.h>
-void *module_alloc(unsigned long size)
-{
- return size ? vmalloc(size) : NULL;
-}
-
-/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
-{
- vfree(module_region);
-}
-
-int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
- char *secstrings, struct module *mod)
-{
- return 0;
-}
-
int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
unsigned int symindex, unsigned int relindex,
struct module *me)
@@ -146,6 +129,9 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
unsigned int symindex, unsigned int relsec,
struct module *me)
{
+ /* Non-standard return value... most other arch's return -ENOEXEC
+ * for an unsupported relocation variant
+ */
return 0;
}
@@ -154,12 +140,3 @@ const struct exception_table_entry *search_module_dbetables(unsigned long addr)
{
return NULL;
}
-
-/* Put in dbe list if necessary. */
-int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
- struct module *me)
-{
- return 0;
-}
-
-void module_arch_cleanup(struct module *mod) {}
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index bbdeb48bbf8e..748ff1920068 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -897,20 +897,4 @@ source "security/Kconfig"
source "crypto/Kconfig"
-menuconfig VIRTUALIZATION
- bool "Virtualization"
- default n
- ---help---
- Say Y here to get to see options for using your Linux host to run other
- operating systems inside virtual machines (guests).
- This option alone does not add any kernel code.
-
- If you say N, all options in this submenu will be skipped and disabled.
-
-if VIRTUALIZATION
-
-source drivers/virtio/Kconfig
-
-endif # VIRTUALIZATION
-
source "lib/Kconfig"
diff --git a/arch/sh/include/asm/delay.h b/arch/sh/include/asm/delay.h
index 4b16bf9b56bd..9670e127b7b2 100644
--- a/arch/sh/include/asm/delay.h
+++ b/arch/sh/include/asm/delay.h
@@ -1,26 +1 @@
-#ifndef __ASM_SH_DELAY_H
-#define __ASM_SH_DELAY_H
-
-/*
- * Copyright (C) 1993 Linus Torvalds
- *
- * Delay routines calling functions in arch/sh/lib/delay.c
- */
-
-extern void __bad_udelay(void);
-extern void __bad_ndelay(void);
-
-extern void __udelay(unsigned long usecs);
-extern void __ndelay(unsigned long nsecs);
-extern void __const_udelay(unsigned long xloops);
-extern void __delay(unsigned long loops);
-
-#define udelay(n) (__builtin_constant_p(n) ? \
- ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c6ul)) : \
- __udelay(n))
-
-#define ndelay(n) (__builtin_constant_p(n) ? \
- ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
- __ndelay(n))
-
-#endif /* __ASM_SH_DELAY_H */
+#include <asm-generic/delay.h>
diff --git a/arch/sh/kernel/module.c b/arch/sh/kernel/module.c
index 19b1f8826aef..1b525dedd29a 100644
--- a/arch/sh/kernel/module.c
+++ b/arch/sh/kernel/module.c
@@ -34,30 +34,6 @@
#include <asm/unaligned.h>
#include <asm/dwarf.h>
-void *module_alloc(unsigned long size)
-{
- if (size == 0)
- return NULL;
-
- return vmalloc_exec(size);
-}
-
-
-/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
-{
- vfree(module_region);
-}
-
-/* We don't need anything special. */
-int module_frob_arch_sections(Elf_Ehdr *hdr,
- Elf_Shdr *sechdrs,
- char *secstrings,
- struct module *mod)
-{
- return 0;
-}
-
int apply_relocate_add(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
@@ -133,17 +109,6 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
return 0;
}
-int apply_relocate(Elf32_Shdr *sechdrs,
- const char *strtab,
- unsigned int symindex,
- unsigned int relsec,
- struct module *me)
-{
- printk(KERN_ERR "module %s: REL RELOCATION unsupported\n",
- me->name);
- return -ENOEXEC;
-}
-
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c
index 99ba5baa9497..da0c6c70ccb2 100644
--- a/arch/sparc/kernel/module.c
+++ b/arch/sparc/kernel/module.c
@@ -68,12 +68,6 @@ void *module_alloc(unsigned long size)
return ret;
}
-/* Free memory returned from module_core_alloc/module_init_alloc */
-void module_free(struct module *mod, void *module_region)
-{
- vfree(module_region);
-}
-
/* Make generic code ignore STT_REGISTER dummy undefined symbols. */
int module_frob_arch_sections(Elf_Ehdr *hdr,
Elf_Shdr *sechdrs,
@@ -107,17 +101,6 @@ int module_frob_arch_sections(Elf_Ehdr *hdr,
return 0;
}
-int apply_relocate(Elf_Shdr *sechdrs,
- const char *strtab,
- unsigned int symindex,
- unsigned int relsec,
- struct module *me)
-{
- printk(KERN_ERR "module %s: non-ADD RELOCATION unsupported\n",
- me->name);
- return -ENOEXEC;
-}
-
int apply_relocate_add(Elf_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
@@ -239,15 +222,4 @@ int module_finalize(const Elf_Ehdr *hdr,
return 0;
}
-#else
-int module_finalize(const Elf_Ehdr *hdr,
- const Elf_Shdr *sechdrs,
- struct module *me)
-{
- return 0;
-}
#endif /* CONFIG_SPARC64 */
-
-void module_arch_cleanup(struct module *mod)
-{
-}
diff --git a/arch/tile/kernel/module.c b/arch/tile/kernel/module.c
index f68df69f1f67..28fa6ece9d3a 100644
--- a/arch/tile/kernel/module.c
+++ b/arch/tile/kernel/module.c
@@ -98,25 +98,6 @@ void module_free(struct module *mod, void *module_region)
*/
}
-/* We don't need anything special. */
-int module_frob_arch_sections(Elf_Ehdr *hdr,
- Elf_Shdr *sechdrs,
- char *secstrings,
- struct module *mod)
-{
- return 0;
-}
-
-int apply_relocate(Elf_Shdr *sechdrs,
- const char *strtab,
- unsigned int symindex,
- unsigned int relsec,
- struct module *me)
-{
- pr_err("module %s: .rel relocation unsupported\n", me->name);
- return -ENOEXEC;
-}
-
#ifdef __tilegx__
/*
* Validate that the high 16 bits of "value" is just the sign-extension of
@@ -249,15 +230,3 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
}
return 0;
}
-
-int module_finalize(const Elf_Ehdr *hdr,
- const Elf_Shdr *sechdrs,
- struct module *me)
-{
- /* FIXME: perhaps remove the "writable" bit from the TLB? */
- return 0;
-}
-
-void module_arch_cleanup(struct module *mod)
-{
-}
diff --git a/arch/tile/kvm/Kconfig b/arch/tile/kvm/Kconfig
index b88f9c047781..669fcdba31ea 100644
--- a/arch/tile/kvm/Kconfig
+++ b/arch/tile/kvm/Kconfig
@@ -33,6 +33,5 @@ config KVM
If unsure, say N.
source drivers/vhost/Kconfig
-source drivers/virtio/Kconfig
endif # VIRTUALIZATION
diff --git a/arch/um/sys-i386/Makefile b/arch/um/sys-i386/Makefile
index 15587ed9a361..87b659dadf3f 100644
--- a/arch/um/sys-i386/Makefile
+++ b/arch/um/sys-i386/Makefile
@@ -8,7 +8,8 @@ obj-y = bug.o bugs.o checksum.o delay.o fault.o ksyms.o ldt.o ptrace.o \
obj-$(CONFIG_BINFMT_ELF) += elfcore.o
-subarch-obj-y = lib/rwsem.o lib/string_32.o
+subarch-obj-y = lib/string_32.o
+subarch-obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += lib/rwsem.o
subarch-obj-$(CONFIG_HIGHMEM) += mm/highmem_32.o
subarch-obj-$(CONFIG_MODULES) += kernel/module.o
diff --git a/arch/unicore32/kernel/module.c b/arch/unicore32/kernel/module.c
index 3e5a38d71a1e..8fbe8577f5e6 100644
--- a/arch/unicore32/kernel/module.c
+++ b/arch/unicore32/kernel/module.c
@@ -37,19 +37,6 @@ void *module_alloc(unsigned long size)
return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL_EXEC);
}
-void module_free(struct module *module, void *region)
-{
- vfree(region);
-}
-
-int module_frob_arch_sections(Elf_Ehdr *hdr,
- Elf_Shdr *sechdrs,
- char *secstrings,
- struct module *mod)
-{
- return 0;
-}
-
int
apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
unsigned int relindex, struct module *module)
@@ -128,25 +115,3 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
}
return 0;
}
-
-int
-apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
- unsigned int symindex, unsigned int relsec,
- struct module *module)
-{
- printk(KERN_ERR "module %s: ADD RELOCATION unsupported\n",
- module->name);
- return -ENOEXEC;
-}
-
-int
-module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
- struct module *module)
-{
- return 0;
-}
-
-void
-module_arch_cleanup(struct module *mod)
-{
-}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 5f60ea190d5b..a67e014e4e44 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -390,12 +390,21 @@ config X86_INTEL_CE
This option compiles in support for the CE4100 SOC for settop
boxes and media devices.
+config X86_INTEL_MID
+ bool "Intel MID platform support"
+ depends on X86_32
+ depends on X86_EXTENDED_PLATFORM
+ ---help---
+ Select to build a kernel capable of supporting Intel MID platform
+ systems which do not have the PCI legacy interfaces (Moorestown,
+ Medfield). If you are building for a PC class system say N here.
+
+if X86_INTEL_MID
+
config X86_MRST
bool "Moorestown MID platform"
depends on PCI
depends on PCI_GOANY
- depends on X86_32
- depends on X86_EXTENDED_PLATFORM
depends on X86_IO_APIC
select APB_TIMER
select I2C
@@ -410,6 +419,8 @@ config X86_MRST
nor standard legacy replacement devices/features. e.g. Moorestown does
not contain i8259, i8254, HPET, legacy BIOS, most of the io ports.
+endif
+
config X86_RDC321X
bool "RDC R-321x SoC"
depends on X86_32
@@ -518,6 +529,18 @@ menuconfig PARAVIRT_GUEST
if PARAVIRT_GUEST
+config PARAVIRT_TIME_ACCOUNTING
+ bool "Paravirtual steal time accounting"
+ select PARAVIRT
+ default n
+ ---help---
+ Select this option to enable fine granularity task steal time
+ accounting. Time spent executing other tasks in parallel with
+ the current vCPU is discounted from the vCPU power. To account for
+ that, there can be a small performance impact.
+
+ If in doubt, say N here.
+
source "arch/x86/xen/Kconfig"
config KVM_CLOCK
@@ -623,6 +646,7 @@ config HPET_EMULATE_RTC
config APB_TIMER
def_bool y if MRST
prompt "Langwell APB Timer Support" if X86_MRST
+ select DW_APB_TIMER
help
APB timer is the replacement for 8254, HPET on X86 MID platforms.
The APBT provides a stable time base on SMP
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index 6a7cfdf8ff69..e3ca7e0d858c 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -312,6 +312,9 @@ config X86_CMPXCHG
config CMPXCHG_LOCAL
def_bool X86_64 || (X86_32 && !M386)
+config CMPXCHG_DOUBLE
+ def_bool y
+
config X86_L1_CACHE_SHIFT
int
default "7" if MPENTIUM4 || MPSC
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index f7cb086b4add..95365a82b6a0 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -9,12 +9,6 @@
# Changed by many, many contributors over the years.
#
-# ROOT_DEV specifies the default root-device when making the image.
-# This can be either FLOPPY, CURRENT, /dev/xxxx or empty, in which case
-# the default of FLOPPY is used by 'build'.
-
-ROOT_DEV := CURRENT
-
# If you want to preset the SVGA mode, uncomment the next line and
# set SVGA_MODE to whatever number you want.
# Set it to -DSVGA_MODE=NORMAL_VGA if you just want the EGA/VGA mode.
@@ -75,8 +69,7 @@ GCOV_PROFILE := n
$(obj)/bzImage: asflags-y := $(SVGA_MODE)
quiet_cmd_image = BUILD $@
-cmd_image = $(obj)/tools/build $(obj)/setup.bin $(obj)/vmlinux.bin \
- $(ROOT_DEV) > $@
+cmd_image = $(obj)/tools/build $(obj)/setup.bin $(obj)/vmlinux.bin > $@
$(obj)/bzImage: $(obj)/setup.bin $(obj)/vmlinux.bin $(obj)/tools/build FORCE
$(call if_changed,image)
diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c
index ee3a4ea923ac..fdc60a0b3c20 100644
--- a/arch/x86/boot/tools/build.c
+++ b/arch/x86/boot/tools/build.c
@@ -130,7 +130,7 @@ static void die(const char * str, ...)
static void usage(void)
{
- die("Usage: build setup system [rootdev] [> image]");
+ die("Usage: build setup system [> image]");
}
int main(int argc, char ** argv)
@@ -138,39 +138,14 @@ int main(int argc, char ** argv)
unsigned int i, sz, setup_sectors;
int c;
u32 sys_size;
- u8 major_root, minor_root;
struct stat sb;
FILE *file;
int fd;
void *kernel;
u32 crc = 0xffffffffUL;
- if ((argc < 3) || (argc > 4))
+ if (argc != 3)
usage();
- if (argc > 3) {
- if (!strcmp(argv[3], "CURRENT")) {
- if (stat("/", &sb)) {
- perror("/");
- die("Couldn't stat /");
- }
- major_root = major(sb.st_dev);
- minor_root = minor(sb.st_dev);
- } else if (strcmp(argv[3], "FLOPPY")) {
- if (stat(argv[3], &sb)) {
- perror(argv[3]);
- die("Couldn't stat root device.");
- }
- major_root = major(sb.st_rdev);
- minor_root = minor(sb.st_rdev);
- } else {
- major_root = 0;
- minor_root = 0;
- }
- } else {
- major_root = DEFAULT_MAJOR_ROOT;
- minor_root = DEFAULT_MINOR_ROOT;
- }
- fprintf(stderr, "Root device is (%d, %d)\n", major_root, minor_root);
/* Copy the setup code */
file = fopen(argv[1], "r");
@@ -193,8 +168,8 @@ int main(int argc, char ** argv)
memset(buf+c, 0, i-c);
/* Set the default root device */
- buf[508] = minor_root;
- buf[509] = major_root;
+ buf[508] = DEFAULT_MINOR_ROOT;
+ buf[509] = DEFAULT_MAJOR_ROOT;
fprintf(stderr, "Setup is %d bytes (padded to %d bytes).\n", c, i);
diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
index 7a6e68e4f748..976aa64d9a20 100644
--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
+++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
@@ -245,7 +245,7 @@ static int ghash_async_setkey(struct crypto_ahash *tfm, const u8 *key,
crypto_ahash_set_flags(tfm, crypto_ahash_get_flags(child)
& CRYPTO_TFM_RES_MASK);
- return 0;
+ return err;
}
static int ghash_async_init_tfm(struct crypto_tfm *tfm)
diff --git a/arch/x86/include/asm/apb_timer.h b/arch/x86/include/asm/apb_timer.h
index 082cf8184935..0acbac299e49 100644
--- a/arch/x86/include/asm/apb_timer.h
+++ b/arch/x86/include/asm/apb_timer.h
@@ -18,24 +18,6 @@
#ifdef CONFIG_APB_TIMER
-/* Langwell DW APB timer registers */
-#define APBTMR_N_LOAD_COUNT 0x00
-#define APBTMR_N_CURRENT_VALUE 0x04
-#define APBTMR_N_CONTROL 0x08
-#define APBTMR_N_EOI 0x0c
-#define APBTMR_N_INT_STATUS 0x10
-
-#define APBTMRS_INT_STATUS 0xa0
-#define APBTMRS_EOI 0xa4
-#define APBTMRS_RAW_INT_STATUS 0xa8
-#define APBTMRS_COMP_VERSION 0xac
-#define APBTMRS_REG_SIZE 0x14
-
-/* register bits */
-#define APBTMR_CONTROL_ENABLE (1<<0)
-#define APBTMR_CONTROL_MODE_PERIODIC (1<<1) /*1: periodic 0:free running */
-#define APBTMR_CONTROL_INT (1<<2)
-
/* default memory mapped register base */
#define LNW_SCU_ADDR 0xFF100000
#define LNW_EXT_TIMER_OFFSET 0x1B800
@@ -43,8 +25,8 @@
#define LNW_EXT_TIMER_PGOFFSET 0x800
/* APBT clock speed range from PCLK to fabric base, 25-100MHz */
-#define APBT_MAX_FREQ 50
-#define APBT_MIN_FREQ 1
+#define APBT_MAX_FREQ 50000000
+#define APBT_MIN_FREQ 1000000
#define APBT_MMAP_SIZE 1024
#define APBT_DEV_USED 1
diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h
index 284a6e8f7ce1..3deb7250624c 100644
--- a/arch/x86/include/asm/cmpxchg_32.h
+++ b/arch/x86/include/asm/cmpxchg_32.h
@@ -280,4 +280,52 @@ static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
#endif
+#define cmpxchg8b(ptr, o1, o2, n1, n2) \
+({ \
+ char __ret; \
+ __typeof__(o2) __dummy; \
+ __typeof__(*(ptr)) __old1 = (o1); \
+ __typeof__(o2) __old2 = (o2); \
+ __typeof__(*(ptr)) __new1 = (n1); \
+ __typeof__(o2) __new2 = (n2); \
+ asm volatile(LOCK_PREFIX "cmpxchg8b %2; setz %1" \
+ : "=d"(__dummy), "=a" (__ret), "+m" (*ptr)\
+ : "a" (__old1), "d"(__old2), \
+ "b" (__new1), "c" (__new2) \
+ : "memory"); \
+ __ret; })
+
+
+#define cmpxchg8b_local(ptr, o1, o2, n1, n2) \
+({ \
+ char __ret; \
+ __typeof__(o2) __dummy; \
+ __typeof__(*(ptr)) __old1 = (o1); \
+ __typeof__(o2) __old2 = (o2); \
+ __typeof__(*(ptr)) __new1 = (n1); \
+ __typeof__(o2) __new2 = (n2); \
+ asm volatile("cmpxchg8b %2; setz %1" \
+ : "=d"(__dummy), "=a"(__ret), "+m" (*ptr)\
+ : "a" (__old), "d"(__old2), \
+ "b" (__new1), "c" (__new2), \
+ : "memory"); \
+ __ret; })
+
+
+#define cmpxchg_double(ptr, o1, o2, n1, n2) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
+ VM_BUG_ON((unsigned long)(ptr) % 8); \
+ cmpxchg8b((ptr), (o1), (o2), (n1), (n2)); \
+})
+
+#define cmpxchg_double_local(ptr, o1, o2, n1, n2) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
+ VM_BUG_ON((unsigned long)(ptr) % 8); \
+ cmpxchg16b_local((ptr), (o1), (o2), (n1), (n2)); \
+})
+
+#define system_has_cmpxchg_double() cpu_has_cx8
+
#endif /* _ASM_X86_CMPXCHG_32_H */
diff --git a/arch/x86/include/asm/cmpxchg_64.h b/arch/x86/include/asm/cmpxchg_64.h
index 423ae58aa020..7cf5c0a24434 100644
--- a/arch/x86/include/asm/cmpxchg_64.h
+++ b/arch/x86/include/asm/cmpxchg_64.h
@@ -151,4 +151,49 @@ extern void __cmpxchg_wrong_size(void);
cmpxchg_local((ptr), (o), (n)); \
})
+#define cmpxchg16b(ptr, o1, o2, n1, n2) \
+({ \
+ char __ret; \
+ __typeof__(o2) __junk; \
+ __typeof__(*(ptr)) __old1 = (o1); \
+ __typeof__(o2) __old2 = (o2); \
+ __typeof__(*(ptr)) __new1 = (n1); \
+ __typeof__(o2) __new2 = (n2); \
+ asm volatile(LOCK_PREFIX "cmpxchg16b %2;setz %1" \
+ : "=d"(__junk), "=a"(__ret), "+m" (*ptr) \
+ : "b"(__new1), "c"(__new2), \
+ "a"(__old1), "d"(__old2)); \
+ __ret; })
+
+
+#define cmpxchg16b_local(ptr, o1, o2, n1, n2) \
+({ \
+ char __ret; \
+ __typeof__(o2) __junk; \
+ __typeof__(*(ptr)) __old1 = (o1); \
+ __typeof__(o2) __old2 = (o2); \
+ __typeof__(*(ptr)) __new1 = (n1); \
+ __typeof__(o2) __new2 = (n2); \
+ asm volatile("cmpxchg16b %2;setz %1" \
+ : "=d"(__junk), "=a"(__ret), "+m" (*ptr) \
+ : "b"(__new1), "c"(__new2), \
+ "a"(__old1), "d"(__old2)); \
+ __ret; })
+
+#define cmpxchg_double(ptr, o1, o2, n1, n2) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ VM_BUG_ON((unsigned long)(ptr) % 16); \
+ cmpxchg16b((ptr), (o1), (o2), (n1), (n2)); \
+})
+
+#define cmpxchg_double_local(ptr, o1, o2, n1, n2) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ VM_BUG_ON((unsigned long)(ptr) % 16); \
+ cmpxchg16b_local((ptr), (o1), (o2), (n1), (n2)); \
+})
+
+#define system_has_cmpxchg_double() cpu_has_cx16
+
#endif /* _ASM_X86_CMPXCHG_64_H */
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 9929b35929ff..4258aac99a6e 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -288,6 +288,8 @@ extern const char * const x86_power_flags[32];
#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ)
#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE)
+#define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8)
+#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16)
#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64)
# define cpu_has_invlpg 1
diff --git a/arch/x86/include/asm/delay.h b/arch/x86/include/asm/delay.h
index 409a649204aa..9b3b4f2754c7 100644
--- a/arch/x86/include/asm/delay.h
+++ b/arch/x86/include/asm/delay.h
@@ -1,30 +1,7 @@
#ifndef _ASM_X86_DELAY_H
#define _ASM_X86_DELAY_H
-/*
- * Copyright (C) 1993 Linus Torvalds
- *
- * Delay routines calling functions in arch/x86/lib/delay.c
- */
-
-/* Undefined functions to get compile-time errors */
-extern void __bad_udelay(void);
-extern void __bad_ndelay(void);
-
-extern void __udelay(unsigned long usecs);
-extern void __ndelay(unsigned long nsecs);
-extern void __const_udelay(unsigned long xloops);
-extern void __delay(unsigned long loops);
-
-/* 0x10c7 is 2**32 / 1000000 (rounded up) */
-#define udelay(n) (__builtin_constant_p(n) ? \
- ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \
- __udelay(n))
-
-/* 0x5 is 2**32 / 1000000000 (rounded up) */
-#define ndelay(n) (__builtin_constant_p(n) ? \
- ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
- __ndelay(n))
+#include <asm-generic/delay.h>
void use_tsc_delay(void);
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 0049211959c0..6040d115ef51 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -229,7 +229,26 @@ struct read_cache {
unsigned long end;
};
-struct decode_cache {
+struct x86_emulate_ctxt {
+ struct x86_emulate_ops *ops;
+
+ /* Register state before/after emulation. */
+ unsigned long eflags;
+ unsigned long eip; /* eip before instruction emulation */
+ /* Emulated execution mode, represented by an X86EMUL_MODE value. */
+ int mode;
+
+ /* interruptibility state, as a result of execution of STI or MOV SS */
+ int interruptibility;
+
+ bool guest_mode; /* guest running a nested guest */
+ bool perm_ok; /* do not check permissions if true */
+ bool only_vendor_specific_insn;
+
+ bool have_exception;
+ struct x86_exception exception;
+
+ /* decode cache */
u8 twobyte;
u8 b;
u8 intercept;
@@ -246,8 +265,6 @@ struct decode_cache {
unsigned int d;
int (*execute)(struct x86_emulate_ctxt *ctxt);
int (*check_perm)(struct x86_emulate_ctxt *ctxt);
- unsigned long regs[NR_VCPU_REGS];
- unsigned long eip;
/* modrm */
u8 modrm;
u8 modrm_mod;
@@ -255,34 +272,14 @@ struct decode_cache {
u8 modrm_rm;
u8 modrm_seg;
bool rip_relative;
+ unsigned long _eip;
+ /* Fields above regs are cleared together. */
+ unsigned long regs[NR_VCPU_REGS];
struct fetch_cache fetch;
struct read_cache io_read;
struct read_cache mem_read;
};
-struct x86_emulate_ctxt {
- struct x86_emulate_ops *ops;
-
- /* Register state before/after emulation. */
- unsigned long eflags;
- unsigned long eip; /* eip before instruction emulation */
- /* Emulated execution mode, represented by an X86EMUL_MODE value. */
- int mode;
-
- /* interruptibility state, as a result of execution of STI or MOV SS */
- int interruptibility;
-
- bool guest_mode; /* guest running a nested guest */
- bool perm_ok; /* do not check permissions if true */
- bool only_vendor_specific_insn;
-
- bool have_exception;
- struct x86_exception exception;
-
- /* decode cache */
- struct decode_cache decode;
-};
-
/* Repeat String Operation Prefix */
#define REPE_PREFIX 0xf3
#define REPNE_PREFIX 0xf2
@@ -373,6 +370,5 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt);
int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
u16 tss_selector, int reason,
bool has_error_code, u32 error_code);
-int emulate_int_real(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops, int irq);
+int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq);
#endif /* _ASM_X86_KVM_X86_EMULATE_H */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index d2ac8e2ee897..dd51c83aa5de 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -48,7 +48,7 @@
(~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
| X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
| X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
- | X86_CR4_OSXSAVE \
+ | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_RDWRGSFS \
| X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
@@ -205,6 +205,7 @@ union kvm_mmu_page_role {
unsigned invalid:1;
unsigned nxe:1;
unsigned cr0_wp:1;
+ unsigned smep_andnot_wp:1;
};
};
@@ -227,15 +228,17 @@ struct kvm_mmu_page {
* in this shadow page.
*/
DECLARE_BITMAP(slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
- bool multimapped; /* More than one parent_pte? */
bool unsync;
int root_count; /* Currently serving as active root */
unsigned int unsync_children;
- union {
- u64 *parent_pte; /* !multimapped */
- struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */
- };
+ unsigned long parent_ptes; /* Reverse mapping for parent_pte */
DECLARE_BITMAP(unsync_child_bitmap, 512);
+
+#ifdef CONFIG_X86_32
+ int clear_spte_count;
+#endif
+
+ struct rcu_head rcu;
};
struct kvm_pv_mmu_op_buffer {
@@ -269,8 +272,6 @@ struct kvm_mmu {
gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
struct x86_exception *exception);
gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access);
- void (*prefetch_page)(struct kvm_vcpu *vcpu,
- struct kvm_mmu_page *page);
int (*sync_page)(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp);
void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva);
@@ -346,8 +347,7 @@ struct kvm_vcpu_arch {
* put it here to avoid allocation */
struct kvm_pv_mmu_op_buffer mmu_op_buffer;
- struct kvm_mmu_memory_cache mmu_pte_chain_cache;
- struct kvm_mmu_memory_cache mmu_rmap_desc_cache;
+ struct kvm_mmu_memory_cache mmu_pte_list_desc_cache;
struct kvm_mmu_memory_cache mmu_page_cache;
struct kvm_mmu_memory_cache mmu_page_header_cache;
@@ -393,6 +393,15 @@ struct kvm_vcpu_arch {
unsigned int hw_tsc_khz;
unsigned int time_offset;
struct page *time_page;
+
+ struct {
+ u64 msr_val;
+ u64 last_steal;
+ u64 accum_steal;
+ struct gfn_to_hva_cache stime;
+ struct kvm_steal_time steal;
+ } st;
+
u64 last_guest_tsc;
u64 last_kernel_ns;
u64 last_tsc_nsec;
@@ -419,6 +428,11 @@ struct kvm_vcpu_arch {
u64 mcg_ctl;
u64 *mce_banks;
+ /* Cache MMIO info */
+ u64 mmio_gva;
+ unsigned access;
+ gfn_t mmio_gfn;
+
/* used for guest single stepping over the given code position */
unsigned long singlestep_rip;
@@ -441,6 +455,7 @@ struct kvm_arch {
unsigned int n_used_mmu_pages;
unsigned int n_requested_mmu_pages;
unsigned int n_max_mmu_pages;
+ unsigned int indirect_shadow_pages;
atomic_t invlpg_counter;
struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
/*
@@ -477,6 +492,8 @@ struct kvm_arch {
u64 hv_guest_os_id;
u64 hv_hypercall;
+ atomic_t reader_counter;
+
#ifdef CONFIG_KVM_MMU_AUDIT
int audit_point;
#endif
@@ -559,7 +576,7 @@ struct kvm_x86_ops {
void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
- void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
+ int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
@@ -636,7 +653,6 @@ void kvm_mmu_module_exit(void);
void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
int kvm_mmu_create(struct kvm_vcpu *vcpu);
int kvm_mmu_setup(struct kvm_vcpu *vcpu);
-void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte);
void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
u64 dirty_mask, u64 nx_mask, u64 x_mask);
@@ -830,11 +846,12 @@ enum {
asmlinkage void kvm_spurious_fault(void);
extern bool kvm_rebooting;
-#define __kvm_handle_fault_on_reboot(insn) \
+#define ____kvm_handle_fault_on_reboot(insn, cleanup_insn) \
"666: " insn "\n\t" \
"668: \n\t" \
".pushsection .fixup, \"ax\" \n" \
"667: \n\t" \
+ cleanup_insn "\n\t" \
"cmpb $0, kvm_rebooting \n\t" \
"jne 668b \n\t" \
__ASM_SIZE(push) " $666b \n\t" \
@@ -844,6 +861,9 @@ extern bool kvm_rebooting;
_ASM_PTR " 666b, 667b \n\t" \
".popsection"
+#define __kvm_handle_fault_on_reboot(insn) \
+ ____kvm_handle_fault_on_reboot(insn, "")
+
#define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
int kvm_age_hva(struct kvm *kvm, unsigned long hva);
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index a427bf77a93d..734c3767cfac 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -21,6 +21,7 @@
*/
#define KVM_FEATURE_CLOCKSOURCE2 3
#define KVM_FEATURE_ASYNC_PF 4
+#define KVM_FEATURE_STEAL_TIME 5
/* The last 8 bits are used to indicate how to interpret the flags field
* in pvclock structure. If no bits are set, all flags are ignored.
@@ -30,10 +31,23 @@
#define MSR_KVM_WALL_CLOCK 0x11
#define MSR_KVM_SYSTEM_TIME 0x12
+#define KVM_MSR_ENABLED 1
/* Custom MSRs falls in the range 0x4b564d00-0x4b564dff */
#define MSR_KVM_WALL_CLOCK_NEW 0x4b564d00
#define MSR_KVM_SYSTEM_TIME_NEW 0x4b564d01
#define MSR_KVM_ASYNC_PF_EN 0x4b564d02
+#define MSR_KVM_STEAL_TIME 0x4b564d03
+
+struct kvm_steal_time {
+ __u64 steal;
+ __u32 version;
+ __u32 flags;
+ __u32 pad[12];
+};
+
+#define KVM_STEAL_ALIGNMENT_BITS 5
+#define KVM_STEAL_VALID_BITS ((-1ULL << (KVM_STEAL_ALIGNMENT_BITS + 1)))
+#define KVM_STEAL_RESERVED_MASK (((1 << KVM_STEAL_ALIGNMENT_BITS) - 1 ) << 1)
#define KVM_MAX_MMU_OP_BATCH 32
@@ -178,6 +192,7 @@ void __init kvm_guest_init(void);
void kvm_async_pf_task_wait(u32 token);
void kvm_async_pf_task_wake(u32 token);
u32 kvm_read_and_reset_pf_reason(void);
+extern void kvm_disable_steal_time(void);
#else
#define kvm_guest_init() do { } while (0)
#define kvm_async_pf_task_wait(T) do {} while(0)
@@ -186,6 +201,11 @@ static inline u32 kvm_read_and_reset_pf_reason(void)
{
return 0;
}
+
+static inline void kvm_disable_steal_time(void)
+{
+ return;
+}
#endif
#endif /* __KERNEL__ */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index d96bdb25ca3d..d52609aeeab8 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -441,6 +441,18 @@
#define MSR_IA32_VMX_VMCS_ENUM 0x0000048a
#define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b
#define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c
+#define MSR_IA32_VMX_TRUE_PINBASED_CTLS 0x0000048d
+#define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x0000048e
+#define MSR_IA32_VMX_TRUE_EXIT_CTLS 0x0000048f
+#define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x00000490
+
+/* VMX_BASIC bits and bitmasks */
+#define VMX_BASIC_VMCS_SIZE_SHIFT 32
+#define VMX_BASIC_64 0x0001000000000000LLU
+#define VMX_BASIC_MEM_TYPE_SHIFT 50
+#define VMX_BASIC_MEM_TYPE_MASK 0x003c000000000000LLU
+#define VMX_BASIC_MEM_TYPE_WB 6LLU
+#define VMX_BASIC_INOUT 0x0040000000000000LLU
/* AMD-V MSRs */
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index ebbc4d8ab170..a7d2db9a74fb 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -230,6 +230,15 @@ static inline unsigned long long paravirt_sched_clock(void)
return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
}
+struct jump_label_key;
+extern struct jump_label_key paravirt_steal_enabled;
+extern struct jump_label_key paravirt_steal_rq_enabled;
+
+static inline u64 paravirt_steal_clock(int cpu)
+{
+ return PVOP_CALL1(u64, pv_time_ops.steal_clock, cpu);
+}
+
static inline unsigned long long paravirt_read_pmc(int counter)
{
return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 82885099c869..2c7652163111 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -89,6 +89,7 @@ struct pv_lazy_ops {
struct pv_time_ops {
unsigned long long (*sched_clock)(void);
+ unsigned long long (*steal_clock)(int cpu);
unsigned long (*get_tsc_khz)(void);
};
diff --git a/arch/x86/include/asm/processor-flags.h b/arch/x86/include/asm/processor-flags.h
index 59ab4dffa377..2dddb317bb39 100644
--- a/arch/x86/include/asm/processor-flags.h
+++ b/arch/x86/include/asm/processor-flags.h
@@ -59,6 +59,7 @@
#define X86_CR4_OSFXSR 0x00000200 /* enable fast FPU save and restore */
#define X86_CR4_OSXMMEXCPT 0x00000400 /* enable unmasked SSE exceptions */
#define X86_CR4_VMXE 0x00002000 /* enable VMX virtualization */
+#define X86_CR4_RDWRGSFS 0x00010000 /* enable RDWRGSFS support */
#define X86_CR4_OSXSAVE 0x00040000 /* enable xsave and xrestore */
#define X86_CR4_SMEP 0x00100000 /* enable SMEP support */
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 84471b810460..2caf290e9895 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -132,6 +132,8 @@ enum vmcs_field {
GUEST_IA32_PAT_HIGH = 0x00002805,
GUEST_IA32_EFER = 0x00002806,
GUEST_IA32_EFER_HIGH = 0x00002807,
+ GUEST_IA32_PERF_GLOBAL_CTRL = 0x00002808,
+ GUEST_IA32_PERF_GLOBAL_CTRL_HIGH= 0x00002809,
GUEST_PDPTR0 = 0x0000280a,
GUEST_PDPTR0_HIGH = 0x0000280b,
GUEST_PDPTR1 = 0x0000280c,
@@ -144,6 +146,8 @@ enum vmcs_field {
HOST_IA32_PAT_HIGH = 0x00002c01,
HOST_IA32_EFER = 0x00002c02,
HOST_IA32_EFER_HIGH = 0x00002c03,
+ HOST_IA32_PERF_GLOBAL_CTRL = 0x00002c04,
+ HOST_IA32_PERF_GLOBAL_CTRL_HIGH = 0x00002c05,
PIN_BASED_VM_EXEC_CONTROL = 0x00004000,
CPU_BASED_VM_EXEC_CONTROL = 0x00004002,
EXCEPTION_BITMAP = 0x00004004,
@@ -426,4 +430,43 @@ struct vmx_msr_entry {
u64 value;
} __aligned(16);
+/*
+ * Exit Qualifications for entry failure during or after loading guest state
+ */
+#define ENTRY_FAIL_DEFAULT 0
+#define ENTRY_FAIL_PDPTE 2
+#define ENTRY_FAIL_NMI 3
+#define ENTRY_FAIL_VMCS_LINK_PTR 4
+
+/*
+ * VM-instruction error numbers
+ */
+enum vm_instruction_error_number {
+ VMXERR_VMCALL_IN_VMX_ROOT_OPERATION = 1,
+ VMXERR_VMCLEAR_INVALID_ADDRESS = 2,
+ VMXERR_VMCLEAR_VMXON_POINTER = 3,
+ VMXERR_VMLAUNCH_NONCLEAR_VMCS = 4,
+ VMXERR_VMRESUME_NONLAUNCHED_VMCS = 5,
+ VMXERR_VMRESUME_AFTER_VMXOFF = 6,
+ VMXERR_ENTRY_INVALID_CONTROL_FIELD = 7,
+ VMXERR_ENTRY_INVALID_HOST_STATE_FIELD = 8,
+ VMXERR_VMPTRLD_INVALID_ADDRESS = 9,
+ VMXERR_VMPTRLD_VMXON_POINTER = 10,
+ VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID = 11,
+ VMXERR_UNSUPPORTED_VMCS_COMPONENT = 12,
+ VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT = 13,
+ VMXERR_VMXON_IN_VMX_ROOT_OPERATION = 15,
+ VMXERR_ENTRY_INVALID_EXECUTIVE_VMCS_POINTER = 16,
+ VMXERR_ENTRY_NONLAUNCHED_EXECUTIVE_VMCS = 17,
+ VMXERR_ENTRY_EXECUTIVE_VMCS_POINTER_NOT_VMXON_POINTER = 18,
+ VMXERR_VMCALL_NONCLEAR_VMCS = 19,
+ VMXERR_VMCALL_INVALID_VM_EXIT_CONTROL_FIELDS = 20,
+ VMXERR_VMCALL_INCORRECT_MSEG_REVISION_ID = 22,
+ VMXERR_VMXOFF_UNDER_DUAL_MONITOR_TREATMENT_OF_SMIS_AND_SMM = 23,
+ VMXERR_VMCALL_INVALID_SMM_MONITOR_FEATURES = 24,
+ VMXERR_ENTRY_INVALID_VM_EXECUTION_CONTROL_FIELDS_IN_EXECUTIVE_VMCS = 25,
+ VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS = 26,
+ VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID = 28,
+};
+
#endif
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index d240ea950519..417777de5a40 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -39,6 +39,8 @@
#include <linux/string.h>
#include <linux/types.h>
+#include <trace/events/xen.h>
+
#include <asm/page.h>
#include <asm/pgtable.h>
@@ -459,6 +461,8 @@ MULTI_fpu_taskswitch(struct multicall_entry *mcl, int set)
{
mcl->op = __HYPERVISOR_fpu_taskswitch;
mcl->args[0] = set;
+
+ trace_xen_mc_entry(mcl, 1);
}
static inline void
@@ -475,6 +479,8 @@ MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va,
mcl->args[2] = new_val.pte >> 32;
mcl->args[3] = flags;
}
+
+ trace_xen_mc_entry(mcl, sizeof(new_val) == sizeof(long) ? 3 : 4);
}
static inline void
@@ -485,6 +491,8 @@ MULTI_grant_table_op(struct multicall_entry *mcl, unsigned int cmd,
mcl->args[0] = cmd;
mcl->args[1] = (unsigned long)uop;
mcl->args[2] = count;
+
+ trace_xen_mc_entry(mcl, 3);
}
static inline void
@@ -504,6 +512,8 @@ MULTI_update_va_mapping_otherdomain(struct multicall_entry *mcl, unsigned long v
mcl->args[3] = flags;
mcl->args[4] = domid;
}
+
+ trace_xen_mc_entry(mcl, sizeof(new_val) == sizeof(long) ? 4 : 5);
}
static inline void
@@ -520,6 +530,8 @@ MULTI_update_descriptor(struct multicall_entry *mcl, u64 maddr,
mcl->args[2] = desc.a;
mcl->args[3] = desc.b;
}
+
+ trace_xen_mc_entry(mcl, sizeof(maddr) == sizeof(long) ? 2 : 4);
}
static inline void
@@ -528,6 +540,8 @@ MULTI_memory_op(struct multicall_entry *mcl, unsigned int cmd, void *arg)
mcl->op = __HYPERVISOR_memory_op;
mcl->args[0] = cmd;
mcl->args[1] = (unsigned long)arg;
+
+ trace_xen_mc_entry(mcl, 2);
}
static inline void
@@ -539,6 +553,8 @@ MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
mcl->args[1] = count;
mcl->args[2] = (unsigned long)success_count;
mcl->args[3] = domid;
+
+ trace_xen_mc_entry(mcl, 4);
}
static inline void
@@ -550,6 +566,8 @@ MULTI_mmuext_op(struct multicall_entry *mcl, struct mmuext_op *op, int count,
mcl->args[1] = count;
mcl->args[2] = (unsigned long)success_count;
mcl->args[3] = domid;
+
+ trace_xen_mc_entry(mcl, 4);
}
static inline void
@@ -558,6 +576,8 @@ MULTI_set_gdt(struct multicall_entry *mcl, unsigned long *frames, int entries)
mcl->op = __HYPERVISOR_set_gdt;
mcl->args[0] = (unsigned long)frames;
mcl->args[1] = entries;
+
+ trace_xen_mc_entry(mcl, 2);
}
static inline void
@@ -567,6 +587,8 @@ MULTI_stack_switch(struct multicall_entry *mcl,
mcl->op = __HYPERVISOR_stack_switch;
mcl->args[0] = ss;
mcl->args[1] = esp;
+
+ trace_xen_mc_entry(mcl, 2);
}
#endif /* _ASM_X86_XEN_HYPERCALL_H */
diff --git a/arch/x86/include/asm/xen/trace_types.h b/arch/x86/include/asm/xen/trace_types.h
new file mode 100644
index 000000000000..21e1874c0a0b
--- /dev/null
+++ b/arch/x86/include/asm/xen/trace_types.h
@@ -0,0 +1,18 @@
+#ifndef _ASM_XEN_TRACE_TYPES_H
+#define _ASM_XEN_TRACE_TYPES_H
+
+enum xen_mc_flush_reason {
+ XEN_MC_FL_NONE, /* explicit flush */
+ XEN_MC_FL_BATCH, /* out of hypercall space */
+ XEN_MC_FL_ARGS, /* out of argument space */
+ XEN_MC_FL_CALLBACK, /* out of callback space */
+};
+
+enum xen_mc_extend_args {
+ XEN_MC_XE_OK,
+ XEN_MC_XE_BAD_OP,
+ XEN_MC_XE_NO_SPACE
+};
+typedef void (*xen_mc_callback_fn_t)(void *);
+
+#endif /* _ASM_XEN_TRACE_TYPES_H */
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
index 2b6630d75e17..afdc3f756dea 100644
--- a/arch/x86/kernel/apb_timer.c
+++ b/arch/x86/kernel/apb_timer.c
@@ -27,15 +27,12 @@
* timer, but by default APB timer has higher rating than local APIC timers.
*/
-#include <linux/clocksource.h>
-#include <linux/clockchips.h>
#include <linux/delay.h>
+#include <linux/dw_apb_timer.h>
#include <linux/errno.h>
#include <linux/init.h>
-#include <linux/sysdev.h>
#include <linux/slab.h>
#include <linux/pm.h>
-#include <linux/pci.h>
#include <linux/sfi.h>
#include <linux/interrupt.h>
#include <linux/cpu.h>
@@ -46,75 +43,46 @@
#include <asm/mrst.h>
#include <asm/time.h>
-#define APBT_MASK CLOCKSOURCE_MASK(32)
-#define APBT_SHIFT 22
#define APBT_CLOCKEVENT_RATING 110
#define APBT_CLOCKSOURCE_RATING 250
-#define APBT_MIN_DELTA_USEC 200
-#define EVT_TO_APBT_DEV(evt) container_of(evt, struct apbt_dev, evt)
#define APBT_CLOCKEVENT0_NUM (0)
-#define APBT_CLOCKEVENT1_NUM (1)
#define APBT_CLOCKSOURCE_NUM (2)
-static unsigned long apbt_address;
+static phys_addr_t apbt_address;
static int apb_timer_block_enabled;
static void __iomem *apbt_virt_address;
-static int phy_cs_timer_id;
/*
* Common DW APB timer info
*/
-static uint64_t apbt_freq;
-
-static void apbt_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt);
-static int apbt_next_event(unsigned long delta,
- struct clock_event_device *evt);
-static cycle_t apbt_read_clocksource(struct clocksource *cs);
-static void apbt_restart_clocksource(struct clocksource *cs);
+static unsigned long apbt_freq;
struct apbt_dev {
- struct clock_event_device evt;
- unsigned int num;
- int cpu;
- unsigned int irq;
- unsigned int tick;
- unsigned int count;
- unsigned int flags;
- char name[10];
+ struct dw_apb_clock_event_device *timer;
+ unsigned int num;
+ int cpu;
+ unsigned int irq;
+ char name[10];
};
-static DEFINE_PER_CPU(struct apbt_dev, cpu_apbt_dev);
+static struct dw_apb_clocksource *clocksource_apbt;
-#ifdef CONFIG_SMP
-static unsigned int apbt_num_timers_used;
-static struct apbt_dev *apbt_devs;
-#endif
-
-static inline unsigned long apbt_readl_reg(unsigned long a)
+static inline void __iomem *adev_virt_addr(struct apbt_dev *adev)
{
- return readl(apbt_virt_address + a);
+ return apbt_virt_address + adev->num * APBTMRS_REG_SIZE;
}
-static inline void apbt_writel_reg(unsigned long d, unsigned long a)
-{
- writel(d, apbt_virt_address + a);
-}
-
-static inline unsigned long apbt_readl(int n, unsigned long a)
-{
- return readl(apbt_virt_address + a + n * APBTMRS_REG_SIZE);
-}
+static DEFINE_PER_CPU(struct apbt_dev, cpu_apbt_dev);
-static inline void apbt_writel(int n, unsigned long d, unsigned long a)
-{
- writel(d, apbt_virt_address + a + n * APBTMRS_REG_SIZE);
-}
+#ifdef CONFIG_SMP
+static unsigned int apbt_num_timers_used;
+#endif
static inline void apbt_set_mapping(void)
{
struct sfi_timer_table_entry *mtmr;
+ int phy_cs_timer_id = 0;
if (apbt_virt_address) {
pr_debug("APBT base already mapped\n");
@@ -126,21 +94,18 @@ static inline void apbt_set_mapping(void)
APBT_CLOCKEVENT0_NUM);
return;
}
- apbt_address = (unsigned long)mtmr->phys_addr;
+ apbt_address = (phys_addr_t)mtmr->phys_addr;
if (!apbt_address) {
printk(KERN_WARNING "No timer base from SFI, use default\n");
apbt_address = APBT_DEFAULT_BASE;
}
apbt_virt_address = ioremap_nocache(apbt_address, APBT_MMAP_SIZE);
- if (apbt_virt_address) {
- pr_debug("Mapped APBT physical addr %p at virtual addr %p\n",\
- (void *)apbt_address, (void *)apbt_virt_address);
- } else {
- pr_debug("Failed mapping APBT phy address at %p\n",\
- (void *)apbt_address);
+ if (!apbt_virt_address) {
+ pr_debug("Failed mapping APBT phy address at %lu\n",\
+ (unsigned long)apbt_address);
goto panic_noapbt;
}
- apbt_freq = mtmr->freq_hz / USEC_PER_SEC;
+ apbt_freq = mtmr->freq_hz;
sfi_free_mtmr(mtmr);
/* Now figure out the physical timer id for clocksource device */
@@ -149,9 +114,14 @@ static inline void apbt_set_mapping(void)
goto panic_noapbt;
/* Now figure out the physical timer id */
- phy_cs_timer_id = (unsigned int)(mtmr->phys_addr & 0xff)
- / APBTMRS_REG_SIZE;
- pr_debug("Use timer %d for clocksource\n", phy_cs_timer_id);
+ pr_debug("Use timer %d for clocksource\n",
+ (int)(mtmr->phys_addr & 0xff) / APBTMRS_REG_SIZE);
+ phy_cs_timer_id = (unsigned int)(mtmr->phys_addr & 0xff) /
+ APBTMRS_REG_SIZE;
+
+ clocksource_apbt = dw_apb_clocksource_init(APBT_CLOCKSOURCE_RATING,
+ "apbt0", apbt_virt_address + phy_cs_timer_id *
+ APBTMRS_REG_SIZE, apbt_freq);
return;
panic_noapbt:
@@ -173,82 +143,6 @@ static inline int is_apbt_capable(void)
return apbt_virt_address ? 1 : 0;
}
-static struct clocksource clocksource_apbt = {
- .name = "apbt",
- .rating = APBT_CLOCKSOURCE_RATING,
- .read = apbt_read_clocksource,
- .mask = APBT_MASK,
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
- .resume = apbt_restart_clocksource,
-};
-
-/* boot APB clock event device */
-static struct clock_event_device apbt_clockevent = {
- .name = "apbt0",
- .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
- .set_mode = apbt_set_mode,
- .set_next_event = apbt_next_event,
- .shift = APBT_SHIFT,
- .irq = 0,
- .rating = APBT_CLOCKEVENT_RATING,
-};
-
-/*
- * start count down from 0xffff_ffff. this is done by toggling the enable bit
- * then load initial load count to ~0.
- */
-static void apbt_start_counter(int n)
-{
- unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL);
-
- ctrl &= ~APBTMR_CONTROL_ENABLE;
- apbt_writel(n, ctrl, APBTMR_N_CONTROL);
- apbt_writel(n, ~0, APBTMR_N_LOAD_COUNT);
- /* enable, mask interrupt */
- ctrl &= ~APBTMR_CONTROL_MODE_PERIODIC;
- ctrl |= (APBTMR_CONTROL_ENABLE | APBTMR_CONTROL_INT);
- apbt_writel(n, ctrl, APBTMR_N_CONTROL);
- /* read it once to get cached counter value initialized */
- apbt_read_clocksource(&clocksource_apbt);
-}
-
-static irqreturn_t apbt_interrupt_handler(int irq, void *data)
-{
- struct apbt_dev *dev = (struct apbt_dev *)data;
- struct clock_event_device *aevt = &dev->evt;
-
- if (!aevt->event_handler) {
- printk(KERN_INFO "Spurious APBT timer interrupt on %d\n",
- dev->num);
- return IRQ_NONE;
- }
- aevt->event_handler(aevt);
- return IRQ_HANDLED;
-}
-
-static void apbt_restart_clocksource(struct clocksource *cs)
-{
- apbt_start_counter(phy_cs_timer_id);
-}
-
-static void apbt_enable_int(int n)
-{
- unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL);
- /* clear pending intr */
- apbt_readl(n, APBTMR_N_EOI);
- ctrl &= ~APBTMR_CONTROL_INT;
- apbt_writel(n, ctrl, APBTMR_N_CONTROL);
-}
-
-static void apbt_disable_int(int n)
-{
- unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL);
-
- ctrl |= APBTMR_CONTROL_INT;
- apbt_writel(n, ctrl, APBTMR_N_CONTROL);
-}
-
-
static int __init apbt_clockevent_register(void)
{
struct sfi_timer_table_entry *mtmr;
@@ -261,45 +155,21 @@ static int __init apbt_clockevent_register(void)
return -ENODEV;
}
- /*
- * We need to calculate the scaled math multiplication factor for
- * nanosecond to apbt tick conversion.
- * mult = (nsec/cycle)*2^APBT_SHIFT
- */
- apbt_clockevent.mult = div_sc((unsigned long) mtmr->freq_hz
- , NSEC_PER_SEC, APBT_SHIFT);
-
- /* Calculate the min / max delta */
- apbt_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF,
- &apbt_clockevent);
- apbt_clockevent.min_delta_ns = clockevent_delta2ns(
- APBT_MIN_DELTA_USEC*apbt_freq,
- &apbt_clockevent);
- /*
- * Start apbt with the boot cpu mask and make it
- * global if not used for per cpu timer.
- */
- apbt_clockevent.cpumask = cpumask_of(smp_processor_id());
adev->num = smp_processor_id();
- memcpy(&adev->evt, &apbt_clockevent, sizeof(struct clock_event_device));
+ adev->timer = dw_apb_clockevent_init(smp_processor_id(), "apbt0",
+ mrst_timer_options == MRST_TIMER_LAPIC_APBT ?
+ APBT_CLOCKEVENT_RATING - 100 : APBT_CLOCKEVENT_RATING,
+ adev_virt_addr(adev), 0, apbt_freq);
+ /* Firmware does EOI handling for us. */
+ adev->timer->eoi = NULL;
if (mrst_timer_options == MRST_TIMER_LAPIC_APBT) {
- adev->evt.rating = APBT_CLOCKEVENT_RATING - 100;
- global_clock_event = &adev->evt;
+ global_clock_event = &adev->timer->ced;
printk(KERN_DEBUG "%s clockevent registered as global\n",
global_clock_event->name);
}
- if (request_irq(apbt_clockevent.irq, apbt_interrupt_handler,
- IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING,
- apbt_clockevent.name, adev)) {
- printk(KERN_ERR "Failed request IRQ for APBT%d\n",
- apbt_clockevent.irq);
- }
-
- clockevents_register_device(&adev->evt);
- /* Start APBT 0 interrupts */
- apbt_enable_int(APBT_CLOCKEVENT0_NUM);
+ dw_apb_clockevent_register(adev->timer);
sfi_free_mtmr(mtmr);
return 0;
@@ -317,52 +187,34 @@ static void apbt_setup_irq(struct apbt_dev *adev)
irq_set_affinity(adev->irq, cpumask_of(adev->cpu));
/* APB timer irqs are set up as mp_irqs, timer is edge type */
__irq_set_handler(adev->irq, handle_edge_irq, 0, "edge");
-
- if (system_state == SYSTEM_BOOTING) {
- if (request_irq(adev->irq, apbt_interrupt_handler,
- IRQF_TIMER | IRQF_DISABLED |
- IRQF_NOBALANCING,
- adev->name, adev)) {
- printk(KERN_ERR "Failed request IRQ for APBT%d\n",
- adev->num);
- }
- } else
- enable_irq(adev->irq);
}
/* Should be called with per cpu */
void apbt_setup_secondary_clock(void)
{
struct apbt_dev *adev;
- struct clock_event_device *aevt;
int cpu;
/* Don't register boot CPU clockevent */
cpu = smp_processor_id();
if (!cpu)
return;
- /*
- * We need to calculate the scaled math multiplication factor for
- * nanosecond to apbt tick conversion.
- * mult = (nsec/cycle)*2^APBT_SHIFT
- */
- printk(KERN_INFO "Init per CPU clockevent %d\n", cpu);
- adev = &per_cpu(cpu_apbt_dev, cpu);
- aevt = &adev->evt;
- memcpy(aevt, &apbt_clockevent, sizeof(*aevt));
- aevt->cpumask = cpumask_of(cpu);
- aevt->name = adev->name;
- aevt->mode = CLOCK_EVT_MODE_UNUSED;
+ adev = &__get_cpu_var(cpu_apbt_dev);
+ if (!adev->timer) {
+ adev->timer = dw_apb_clockevent_init(cpu, adev->name,
+ APBT_CLOCKEVENT_RATING, adev_virt_addr(adev),
+ adev->irq, apbt_freq);
+ adev->timer->eoi = NULL;
+ } else {
+ dw_apb_clockevent_resume(adev->timer);
+ }
- printk(KERN_INFO "Registering CPU %d clockevent device %s, mask %08x\n",
- cpu, aevt->name, *(u32 *)aevt->cpumask);
+ printk(KERN_INFO "Registering CPU %d clockevent device %s, cpu %08x\n",
+ cpu, adev->name, adev->cpu);
apbt_setup_irq(adev);
-
- clockevents_register_device(aevt);
-
- apbt_enable_int(cpu);
+ dw_apb_clockevent_register(adev->timer);
return;
}
@@ -385,13 +237,12 @@ static int apbt_cpuhp_notify(struct notifier_block *n,
switch (action & 0xf) {
case CPU_DEAD:
- disable_irq(adev->irq);
- apbt_disable_int(cpu);
+ dw_apb_clockevent_pause(adev->timer);
if (system_state == SYSTEM_RUNNING) {
pr_debug("skipping APBT CPU %lu offline\n", cpu);
} else if (adev) {
pr_debug("APBT clockevent for cpu %lu offline\n", cpu);
- free_irq(adev->irq, adev);
+ dw_apb_clockevent_stop(adev->timer);
}
break;
default:
@@ -416,116 +267,16 @@ void apbt_setup_secondary_clock(void) {}
#endif /* CONFIG_SMP */
-static void apbt_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
-{
- unsigned long ctrl;
- uint64_t delta;
- int timer_num;
- struct apbt_dev *adev = EVT_TO_APBT_DEV(evt);
-
- BUG_ON(!apbt_virt_address);
-
- timer_num = adev->num;
- pr_debug("%s CPU %d timer %d mode=%d\n",
- __func__, first_cpu(*evt->cpumask), timer_num, mode);
-
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * apbt_clockevent.mult;
- delta >>= apbt_clockevent.shift;
- ctrl = apbt_readl(timer_num, APBTMR_N_CONTROL);
- ctrl |= APBTMR_CONTROL_MODE_PERIODIC;
- apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
- /*
- * DW APB p. 46, have to disable timer before load counter,
- * may cause sync problem.
- */
- ctrl &= ~APBTMR_CONTROL_ENABLE;
- apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
- udelay(1);
- pr_debug("Setting clock period %d for HZ %d\n", (int)delta, HZ);
- apbt_writel(timer_num, delta, APBTMR_N_LOAD_COUNT);
- ctrl |= APBTMR_CONTROL_ENABLE;
- apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
- break;
- /* APB timer does not have one-shot mode, use free running mode */
- case CLOCK_EVT_MODE_ONESHOT:
- ctrl = apbt_readl(timer_num, APBTMR_N_CONTROL);
- /*
- * set free running mode, this mode will let timer reload max
- * timeout which will give time (3min on 25MHz clock) to rearm
- * the next event, therefore emulate the one-shot mode.
- */
- ctrl &= ~APBTMR_CONTROL_ENABLE;
- ctrl &= ~APBTMR_CONTROL_MODE_PERIODIC;
-
- apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
- /* write again to set free running mode */
- apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
-
- /*
- * DW APB p. 46, load counter with all 1s before starting free
- * running mode.
- */
- apbt_writel(timer_num, ~0, APBTMR_N_LOAD_COUNT);
- ctrl &= ~APBTMR_CONTROL_INT;
- ctrl |= APBTMR_CONTROL_ENABLE;
- apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
- break;
-
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_SHUTDOWN:
- apbt_disable_int(timer_num);
- ctrl = apbt_readl(timer_num, APBTMR_N_CONTROL);
- ctrl &= ~APBTMR_CONTROL_ENABLE;
- apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
- break;
-
- case CLOCK_EVT_MODE_RESUME:
- apbt_enable_int(timer_num);
- break;
- }
-}
-
-static int apbt_next_event(unsigned long delta,
- struct clock_event_device *evt)
-{
- unsigned long ctrl;
- int timer_num;
-
- struct apbt_dev *adev = EVT_TO_APBT_DEV(evt);
-
- timer_num = adev->num;
- /* Disable timer */
- ctrl = apbt_readl(timer_num, APBTMR_N_CONTROL);
- ctrl &= ~APBTMR_CONTROL_ENABLE;
- apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
- /* write new count */
- apbt_writel(timer_num, delta, APBTMR_N_LOAD_COUNT);
- ctrl |= APBTMR_CONTROL_ENABLE;
- apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
- return 0;
-}
-
-static cycle_t apbt_read_clocksource(struct clocksource *cs)
-{
- unsigned long current_count;
-
- current_count = apbt_readl(phy_cs_timer_id, APBTMR_N_CURRENT_VALUE);
- return (cycle_t)~current_count;
-}
-
static int apbt_clocksource_register(void)
{
u64 start, now;
cycle_t t1;
/* Start the counter, use timer 2 as source, timer 0/1 for event */
- apbt_start_counter(phy_cs_timer_id);
+ dw_apb_clocksource_start(clocksource_apbt);
/* Verify whether apbt counter works */
- t1 = apbt_read_clocksource(&clocksource_apbt);
+ t1 = dw_apb_clocksource_read(clocksource_apbt);
rdtscll(start);
/*
@@ -540,10 +291,10 @@ static int apbt_clocksource_register(void)
} while ((now - start) < 200000UL);
/* APBT is the only always on clocksource, it has to work! */
- if (t1 == apbt_read_clocksource(&clocksource_apbt))
+ if (t1 == dw_apb_clocksource_read(clocksource_apbt))
panic("APBT counter not counting. APBT disabled\n");
- clocksource_register_khz(&clocksource_apbt, (u32)apbt_freq*1000);
+ dw_apb_clocksource_register(clocksource_apbt);
return 0;
}
@@ -567,10 +318,7 @@ void __init apbt_time_init(void)
if (apb_timer_block_enabled)
return;
apbt_set_mapping();
- if (apbt_virt_address) {
- pr_debug("Found APBT version 0x%lx\n",\
- apbt_readl_reg(APBTMRS_COMP_VERSION));
- } else
+ if (!apbt_virt_address)
goto out_noapbt;
/*
* Read the frequency and check for a sane value, for ESL model
@@ -578,7 +326,7 @@ void __init apbt_time_init(void)
*/
if (apbt_freq < APBT_MIN_FREQ || apbt_freq > APBT_MAX_FREQ) {
- pr_debug("APBT has invalid freq 0x%llx\n", apbt_freq);
+ pr_debug("APBT has invalid freq 0x%lx\n", apbt_freq);
goto out_noapbt;
}
if (apbt_clocksource_register()) {
@@ -604,30 +352,20 @@ void __init apbt_time_init(void)
} else {
percpu_timer = 0;
apbt_num_timers_used = 1;
- adev = &per_cpu(cpu_apbt_dev, 0);
- adev->flags &= ~APBT_DEV_USED;
}
pr_debug("%s: %d APB timers used\n", __func__, apbt_num_timers_used);
/* here we set up per CPU timer data structure */
- apbt_devs = kzalloc(sizeof(struct apbt_dev) * apbt_num_timers_used,
- GFP_KERNEL);
- if (!apbt_devs) {
- printk(KERN_ERR "Failed to allocate APB timer devices\n");
- return;
- }
for (i = 0; i < apbt_num_timers_used; i++) {
adev = &per_cpu(cpu_apbt_dev, i);
adev->num = i;
adev->cpu = i;
p_mtmr = sfi_get_mtmr(i);
- if (p_mtmr) {
- adev->tick = p_mtmr->freq_hz;
+ if (p_mtmr)
adev->irq = p_mtmr->irq;
- } else
+ else
printk(KERN_ERR "Failed to get timer for cpu %d\n", i);
- adev->count = 0;
- sprintf(adev->name, "apbt%d", i);
+ snprintf(adev->name, sizeof(adev->name) - 1, "apbt%d", i);
}
#endif
@@ -639,17 +377,8 @@ out_noapbt:
panic("failed to enable APB timer\n");
}
-static inline void apbt_disable(int n)
-{
- if (is_apbt_capable()) {
- unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL);
- ctrl &= ~APBTMR_CONTROL_ENABLE;
- apbt_writel(n, ctrl, APBTMR_N_CONTROL);
- }
-}
-
/* called before apb_timer_enable, use early map */
-unsigned long apbt_quick_calibrate()
+unsigned long apbt_quick_calibrate(void)
{
int i, scale;
u64 old, new;
@@ -658,31 +387,31 @@ unsigned long apbt_quick_calibrate()
u32 loop, shift;
apbt_set_mapping();
- apbt_start_counter(phy_cs_timer_id);
+ dw_apb_clocksource_start(clocksource_apbt);
/* check if the timer can count down, otherwise return */
- old = apbt_read_clocksource(&clocksource_apbt);
+ old = dw_apb_clocksource_read(clocksource_apbt);
i = 10000;
while (--i) {
- if (old != apbt_read_clocksource(&clocksource_apbt))
+ if (old != dw_apb_clocksource_read(clocksource_apbt))
break;
}
if (!i)
goto failed;
/* count 16 ms */
- loop = (apbt_freq * 1000) << 4;
+ loop = (apbt_freq / 1000) << 4;
/* restart the timer to ensure it won't get to 0 in the calibration */
- apbt_start_counter(phy_cs_timer_id);
+ dw_apb_clocksource_start(clocksource_apbt);
- old = apbt_read_clocksource(&clocksource_apbt);
+ old = dw_apb_clocksource_read(clocksource_apbt);
old += loop;
t1 = __native_read_tsc();
do {
- new = apbt_read_clocksource(&clocksource_apbt);
+ new = dw_apb_clocksource_read(clocksource_apbt);
} while (new < old);
t2 = __native_read_tsc();
@@ -694,7 +423,7 @@ unsigned long apbt_quick_calibrate()
return 0;
}
scale = (int)div_u64((t2 - t1), loop >> shift);
- khz = (scale * apbt_freq * 1000) >> shift;
+ khz = (scale * (apbt_freq / 1000)) >> shift;
printk(KERN_INFO "TSC freq calculated by APB timer is %lu khz\n", khz);
return khz;
failed:
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 9498b8445186..b24be38c8cf8 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1944,10 +1944,28 @@ void disconnect_bsp_APIC(int virt_wire_setup)
void __cpuinit generic_processor_info(int apicid, int version)
{
- int cpu;
+ int cpu, max = nr_cpu_ids;
+ bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid,
+ phys_cpu_present_map);
+
+ /*
+ * If boot cpu has not been detected yet, then only allow upto
+ * nr_cpu_ids - 1 processors and keep one slot free for boot cpu
+ */
+ if (!boot_cpu_detected && num_processors >= nr_cpu_ids - 1 &&
+ apicid != boot_cpu_physical_apicid) {
+ int thiscpu = max + disabled_cpus - 1;
+
+ pr_warning(
+ "ACPI: NR_CPUS/possible_cpus limit of %i almost"
+ " reached. Keeping one slot for boot cpu."
+ " Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
+
+ disabled_cpus++;
+ return;
+ }
if (num_processors >= nr_cpu_ids) {
- int max = nr_cpu_ids;
int thiscpu = max + disabled_cpus;
pr_warning(
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 525514cf33c3..46674fbb62ba 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -62,6 +62,8 @@ static void __init check_fpu(void)
return;
}
+ kernel_fpu_begin();
+
/*
* trap_init() enabled FXSR and company _before_ testing for FP
* problems here.
@@ -80,6 +82,8 @@ static void __init check_fpu(void)
: "=m" (*&fdiv_bug)
: "m" (*&x), "m" (*&y));
+ kernel_fpu_end();
+
boot_cpu_data.fdiv_bug = fdiv_bug;
if (boot_cpu_data.fdiv_bug)
printk(KERN_WARNING "Hmm, FPU with FDIV bug.\n");
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c
index 8095f8611f8a..755f64fb0743 100644
--- a/arch/x86/kernel/cpu/hypervisor.c
+++ b/arch/x86/kernel/cpu/hypervisor.c
@@ -32,11 +32,11 @@
*/
static const __initconst struct hypervisor_x86 * const hypervisors[] =
{
- &x86_hyper_vmware,
- &x86_hyper_ms_hyperv,
#ifdef CONFIG_XEN_PVHVM
&x86_hyper_xen_hvm,
#endif
+ &x86_hyper_vmware,
+ &x86_hyper_ms_hyperv,
};
const struct hypervisor_x86 *x86_hyper;
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 33c07b0b122e..a9c2116001d6 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -51,6 +51,15 @@ static int parse_no_kvmapf(char *arg)
early_param("no-kvmapf", parse_no_kvmapf);
+static int steal_acc = 1;
+static int parse_no_stealacc(char *arg)
+{
+ steal_acc = 0;
+ return 0;
+}
+
+early_param("no-steal-acc", parse_no_stealacc);
+
struct kvm_para_state {
u8 mmu_queue[MMU_QUEUE_SIZE];
int mmu_queue_len;
@@ -58,6 +67,8 @@ struct kvm_para_state {
static DEFINE_PER_CPU(struct kvm_para_state, para_state);
static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
+static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
+static int has_steal_clock = 0;
static struct kvm_para_state *kvm_para_state(void)
{
@@ -441,6 +452,21 @@ static void __init paravirt_ops_setup(void)
#endif
}
+static void kvm_register_steal_time(void)
+{
+ int cpu = smp_processor_id();
+ struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
+
+ if (!has_steal_clock)
+ return;
+
+ memset(st, 0, sizeof(*st));
+
+ wrmsrl(MSR_KVM_STEAL_TIME, (__pa(st) | KVM_MSR_ENABLED));
+ printk(KERN_INFO "kvm-stealtime: cpu %d, msr %lx\n",
+ cpu, __pa(st));
+}
+
void __cpuinit kvm_guest_cpu_init(void)
{
if (!kvm_para_available())
@@ -457,6 +483,9 @@ void __cpuinit kvm_guest_cpu_init(void)
printk(KERN_INFO"KVM setup async PF for cpu %d\n",
smp_processor_id());
}
+
+ if (has_steal_clock)
+ kvm_register_steal_time();
}
static void kvm_pv_disable_apf(void *unused)
@@ -483,6 +512,31 @@ static struct notifier_block kvm_pv_reboot_nb = {
.notifier_call = kvm_pv_reboot_notify,
};
+static u64 kvm_steal_clock(int cpu)
+{
+ u64 steal;
+ struct kvm_steal_time *src;
+ int version;
+
+ src = &per_cpu(steal_time, cpu);
+ do {
+ version = src->version;
+ rmb();
+ steal = src->steal;
+ rmb();
+ } while ((version & 1) || (version != src->version));
+
+ return steal;
+}
+
+void kvm_disable_steal_time(void)
+{
+ if (!has_steal_clock)
+ return;
+
+ wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
+}
+
#ifdef CONFIG_SMP
static void __init kvm_smp_prepare_boot_cpu(void)
{
@@ -500,6 +554,7 @@ static void __cpuinit kvm_guest_cpu_online(void *dummy)
static void kvm_guest_cpu_offline(void *dummy)
{
+ kvm_disable_steal_time();
kvm_pv_disable_apf(NULL);
apf_task_wake_all();
}
@@ -548,6 +603,11 @@ void __init kvm_guest_init(void)
if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
x86_init.irqs.trap_init = kvm_apf_trap_init;
+ if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
+ has_steal_clock = 1;
+ pv_time_ops.steal_clock = kvm_steal_clock;
+ }
+
#ifdef CONFIG_SMP
smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
register_cpu_notifier(&kvm_cpu_notifier);
@@ -555,3 +615,15 @@ void __init kvm_guest_init(void)
kvm_guest_cpu_init();
#endif
}
+
+static __init int activate_jump_labels(void)
+{
+ if (has_steal_clock) {
+ jump_label_inc(&paravirt_steal_enabled);
+ if (steal_acc)
+ jump_label_inc(&paravirt_steal_rq_enabled);
+ }
+
+ return 0;
+}
+arch_initcall(activate_jump_labels);
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 6389a6bca11b..c1a0188e29ae 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -160,6 +160,7 @@ static void __cpuinit kvm_setup_secondary_clock(void)
static void kvm_crash_shutdown(struct pt_regs *regs)
{
native_write_msr(msr_kvm_system_time, 0, 0);
+ kvm_disable_steal_time();
native_machine_crash_shutdown(regs);
}
#endif
@@ -167,6 +168,7 @@ static void kvm_crash_shutdown(struct pt_regs *regs)
static void kvm_shutdown(void)
{
native_write_msr(msr_kvm_system_time, 0, 0);
+ kvm_disable_steal_time();
native_machine_shutdown();
}
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index 52f256f2cc81..925179f871de 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -45,21 +45,6 @@ void *module_alloc(unsigned long size)
-1, __builtin_return_address(0));
}
-/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
-{
- vfree(module_region);
-}
-
-/* We don't need anything special. */
-int module_frob_arch_sections(Elf_Ehdr *hdr,
- Elf_Shdr *sechdrs,
- char *secstrings,
- struct module *mod)
-{
- return 0;
-}
-
#ifdef CONFIG_X86_32
int apply_relocate(Elf32_Shdr *sechdrs,
const char *strtab,
@@ -100,17 +85,6 @@ int apply_relocate(Elf32_Shdr *sechdrs,
}
return 0;
}
-
-int apply_relocate_add(Elf32_Shdr *sechdrs,
- const char *strtab,
- unsigned int symindex,
- unsigned int relsec,
- struct module *me)
-{
- printk(KERN_ERR "module %s: ADD RELOCATION unsupported\n",
- me->name);
- return -ENOEXEC;
-}
#else /*X86_64*/
int apply_relocate_add(Elf64_Shdr *sechdrs,
const char *strtab,
@@ -181,17 +155,6 @@ overflow:
me->name);
return -ENOEXEC;
}
-
-int apply_relocate(Elf_Shdr *sechdrs,
- const char *strtab,
- unsigned int symindex,
- unsigned int relsec,
- struct module *me)
-{
- printk(KERN_ERR "non add relocation not supported\n");
- return -ENOSYS;
-}
-
#endif
int module_finalize(const Elf_Ehdr *hdr,
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 869e1aeeb71b..613a7931ecc1 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -202,6 +202,14 @@ static void native_flush_tlb_single(unsigned long addr)
__native_flush_tlb_single(addr);
}
+struct jump_label_key paravirt_steal_enabled;
+struct jump_label_key paravirt_steal_rq_enabled;
+
+static u64 native_steal_clock(int cpu)
+{
+ return 0;
+}
+
/* These are in entry.S */
extern void native_iret(void);
extern void native_irq_enable_sysexit(void);
@@ -307,6 +315,7 @@ struct pv_init_ops pv_init_ops = {
struct pv_time_ops pv_time_ops = {
.sched_clock = native_sched_clock,
+ .steal_clock = native_steal_clock,
};
struct pv_irq_ops pv_irq_ops = {
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index 8bbe8c56916d..b78643d0f9a5 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -10,7 +10,7 @@
static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
{
- u8 config, rev;
+ u8 config;
u16 word;
/* BIOS may enable hardware IRQ balancing for
@@ -18,8 +18,7 @@ static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
* based platforms.
* Disable SW irqbalance/affinity on those platforms.
*/
- pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev);
- if (rev > 0x9)
+ if (dev->revision > 0x9)
return;
/* enable access to config space*/
diff --git a/arch/x86/kernel/relocate_kernel_32.S b/arch/x86/kernel/relocate_kernel_32.S
index 41235531b11c..36818f8ec2be 100644
--- a/arch/x86/kernel/relocate_kernel_32.S
+++ b/arch/x86/kernel/relocate_kernel_32.S
@@ -97,6 +97,8 @@ relocate_kernel:
ret
identity_mapped:
+ /* set return address to 0 if not preserving context */
+ pushl $0
/* store the start address on the stack */
pushl %edx
diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
index 4de8f5b3d476..7a6f3b3be3cf 100644
--- a/arch/x86/kernel/relocate_kernel_64.S
+++ b/arch/x86/kernel/relocate_kernel_64.S
@@ -100,6 +100,8 @@ relocate_kernel:
ret
identity_mapped:
+ /* set return address to 0 if not preserving context */
+ pushq $0
/* store the start address on the stack */
pushq %rdx
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 56c633a5db72..db483369f10b 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -5,7 +5,6 @@
#include <linux/timer.h>
#include <linux/acpi_pmtmr.h>
#include <linux/cpufreq.h>
-#include <linux/dmi.h>
#include <linux/delay.h>
#include <linux/clocksource.h>
#include <linux/percpu.h>
@@ -800,27 +799,6 @@ void mark_tsc_unstable(char *reason)
EXPORT_SYMBOL_GPL(mark_tsc_unstable);
-static int __init dmi_mark_tsc_unstable(const struct dmi_system_id *d)
-{
- printk(KERN_NOTICE "%s detected: marking TSC unstable.\n",
- d->ident);
- tsc_unstable = 1;
- return 0;
-}
-
-/* List of systems that have known TSC problems */
-static struct dmi_system_id __initdata bad_tsc_dmi_table[] = {
- {
- .callback = dmi_mark_tsc_unstable,
- .ident = "IBM Thinkpad 380XD",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
- DMI_MATCH(DMI_BOARD_NAME, "2635FA0"),
- },
- },
- {}
-};
-
static void __init check_system_tsc_reliable(void)
{
#ifdef CONFIG_MGEODE_LX
@@ -1010,8 +988,6 @@ void __init tsc_init(void)
lpj_fine = lpj;
use_tsc_delay();
- /* Check and install the TSC clocksource */
- dmi_check_system(bad_tsc_dmi_table);
if (unsynchronized_tsc())
mark_tsc_unstable("TSCs unsynchronized");
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 50f63648ce1b..988724b236b6 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -31,6 +31,7 @@ config KVM
select KVM_ASYNC_PF
select USER_RETURN_NOTIFIER
select KVM_MMIO
+ select TASK_DELAY_ACCT
---help---
Support hosting fully virtualized guest machines using hardware
virtualization extensions. You will need a fairly recent
@@ -76,6 +77,5 @@ config KVM_MMU_AUDIT
# the virtualization menu.
source drivers/vhost/Kconfig
source drivers/lguest/Kconfig
-source drivers/virtio/Kconfig
endif # VIRTUALIZATION
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index adc98675cda0..6f08bc940fa8 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -407,76 +407,59 @@ struct gprefix {
} \
} while (0)
-/* Fetch next part of the instruction being emulated. */
-#define insn_fetch(_type, _size, _eip) \
-({ unsigned long _x; \
- rc = do_insn_fetch(ctxt, ops, (_eip), &_x, (_size)); \
- if (rc != X86EMUL_CONTINUE) \
- goto done; \
- (_eip) += (_size); \
- (_type)_x; \
-})
-
-#define insn_fetch_arr(_arr, _size, _eip) \
-({ rc = do_insn_fetch(ctxt, ops, (_eip), _arr, (_size)); \
- if (rc != X86EMUL_CONTINUE) \
- goto done; \
- (_eip) += (_size); \
-})
-
static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
enum x86_intercept intercept,
enum x86_intercept_stage stage)
{
struct x86_instruction_info info = {
.intercept = intercept,
- .rep_prefix = ctxt->decode.rep_prefix,
- .modrm_mod = ctxt->decode.modrm_mod,
- .modrm_reg = ctxt->decode.modrm_reg,
- .modrm_rm = ctxt->decode.modrm_rm,
- .src_val = ctxt->decode.src.val64,
- .src_bytes = ctxt->decode.src.bytes,
- .dst_bytes = ctxt->decode.dst.bytes,
- .ad_bytes = ctxt->decode.ad_bytes,
+ .rep_prefix = ctxt->rep_prefix,
+ .modrm_mod = ctxt->modrm_mod,
+ .modrm_reg = ctxt->modrm_reg,
+ .modrm_rm = ctxt->modrm_rm,
+ .src_val = ctxt->src.val64,
+ .src_bytes = ctxt->src.bytes,
+ .dst_bytes = ctxt->dst.bytes,
+ .ad_bytes = ctxt->ad_bytes,
.next_rip = ctxt->eip,
};
return ctxt->ops->intercept(ctxt, &info, stage);
}
-static inline unsigned long ad_mask(struct decode_cache *c)
+static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
{
- return (1UL << (c->ad_bytes << 3)) - 1;
+ return (1UL << (ctxt->ad_bytes << 3)) - 1;
}
/* Access/update address held in a register, based on addressing mode. */
static inline unsigned long
-address_mask(struct decode_cache *c, unsigned long reg)
+address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
{
- if (c->ad_bytes == sizeof(unsigned long))
+ if (ctxt->ad_bytes == sizeof(unsigned long))
return reg;
else
- return reg & ad_mask(c);
+ return reg & ad_mask(ctxt);
}
static inline unsigned long
-register_address(struct decode_cache *c, unsigned long reg)
+register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
{
- return address_mask(c, reg);
+ return address_mask(ctxt, reg);
}
static inline void
-register_address_increment(struct decode_cache *c, unsigned long *reg, int inc)
+register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
{
- if (c->ad_bytes == sizeof(unsigned long))
+ if (ctxt->ad_bytes == sizeof(unsigned long))
*reg += inc;
else
- *reg = (*reg & ~ad_mask(c)) | ((*reg + inc) & ad_mask(c));
+ *reg = (*reg & ~ad_mask(ctxt)) | ((*reg + inc) & ad_mask(ctxt));
}
-static inline void jmp_rel(struct decode_cache *c, int rel)
+static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
{
- register_address_increment(c, &c->eip, rel);
+ register_address_increment(ctxt, &ctxt->_eip, rel);
}
static u32 desc_limit_scaled(struct desc_struct *desc)
@@ -486,28 +469,26 @@ static u32 desc_limit_scaled(struct desc_struct *desc)
return desc->g ? (limit << 12) | 0xfff : limit;
}
-static void set_seg_override(struct decode_cache *c, int seg)
+static void set_seg_override(struct x86_emulate_ctxt *ctxt, int seg)
{
- c->has_seg_override = true;
- c->seg_override = seg;
+ ctxt->has_seg_override = true;
+ ctxt->seg_override = seg;
}
-static unsigned long seg_base(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops, int seg)
+static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
{
if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
return 0;
- return ops->get_cached_segment_base(ctxt, seg);
+ return ctxt->ops->get_cached_segment_base(ctxt, seg);
}
-static unsigned seg_override(struct x86_emulate_ctxt *ctxt,
- struct decode_cache *c)
+static unsigned seg_override(struct x86_emulate_ctxt *ctxt)
{
- if (!c->has_seg_override)
+ if (!ctxt->has_seg_override)
return 0;
- return c->seg_override;
+ return ctxt->seg_override;
}
static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
@@ -579,7 +560,6 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
unsigned size, bool write, bool fetch,
ulong *linear)
{
- struct decode_cache *c = &ctxt->decode;
struct desc_struct desc;
bool usable;
ulong la;
@@ -587,7 +567,7 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
u16 sel;
unsigned cpl, rpl;
- la = seg_base(ctxt, ctxt->ops, addr.seg) + addr.ea;
+ la = seg_base(ctxt, addr.seg) + addr.ea;
switch (ctxt->mode) {
case X86EMUL_MODE_REAL:
break;
@@ -637,7 +617,7 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
}
break;
}
- if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : c->ad_bytes != 8)
+ if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8)
la &= (u32)-1;
*linear = la;
return X86EMUL_CONTINUE;
@@ -671,11 +651,10 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
}
-static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops,
+static int do_insn_fetch_byte(struct x86_emulate_ctxt *ctxt,
unsigned long eip, u8 *dest)
{
- struct fetch_cache *fc = &ctxt->decode.fetch;
+ struct fetch_cache *fc = &ctxt->fetch;
int rc;
int size, cur_size;
@@ -687,8 +666,8 @@ static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
rc = __linearize(ctxt, addr, size, false, true, &linear);
if (rc != X86EMUL_CONTINUE)
return rc;
- rc = ops->fetch(ctxt, linear, fc->data + cur_size,
- size, &ctxt->exception);
+ rc = ctxt->ops->fetch(ctxt, linear, fc->data + cur_size,
+ size, &ctxt->exception);
if (rc != X86EMUL_CONTINUE)
return rc;
fc->end += size;
@@ -698,7 +677,6 @@ static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
}
static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops,
unsigned long eip, void *dest, unsigned size)
{
int rc;
@@ -707,13 +685,30 @@ static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
if (eip + size - ctxt->eip > 15)
return X86EMUL_UNHANDLEABLE;
while (size--) {
- rc = do_fetch_insn_byte(ctxt, ops, eip++, dest++);
+ rc = do_insn_fetch_byte(ctxt, eip++, dest++);
if (rc != X86EMUL_CONTINUE)
return rc;
}
return X86EMUL_CONTINUE;
}
+/* Fetch next part of the instruction being emulated. */
+#define insn_fetch(_type, _size, _eip) \
+({ unsigned long _x; \
+ rc = do_insn_fetch(ctxt, (_eip), &_x, (_size)); \
+ if (rc != X86EMUL_CONTINUE) \
+ goto done; \
+ (_eip) += (_size); \
+ (_type)_x; \
+})
+
+#define insn_fetch_arr(_arr, _size, _eip) \
+({ rc = do_insn_fetch(ctxt, (_eip), _arr, (_size)); \
+ if (rc != X86EMUL_CONTINUE) \
+ goto done; \
+ (_eip) += (_size); \
+})
+
/*
* Given the 'reg' portion of a ModRM byte, and a register block, return a
* pointer into the block that addresses the relevant register.
@@ -857,16 +852,15 @@ static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
struct operand *op,
- struct decode_cache *c,
int inhibit_bytereg)
{
- unsigned reg = c->modrm_reg;
- int highbyte_regs = c->rex_prefix == 0;
+ unsigned reg = ctxt->modrm_reg;
+ int highbyte_regs = ctxt->rex_prefix == 0;
- if (!(c->d & ModRM))
- reg = (c->b & 7) | ((c->rex_prefix & 1) << 3);
+ if (!(ctxt->d & ModRM))
+ reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
- if (c->d & Sse) {
+ if (ctxt->d & Sse) {
op->type = OP_XMM;
op->bytes = 16;
op->addr.xmm = reg;
@@ -875,49 +869,47 @@ static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
}
op->type = OP_REG;
- if ((c->d & ByteOp) && !inhibit_bytereg) {
- op->addr.reg = decode_register(reg, c->regs, highbyte_regs);
+ if ((ctxt->d & ByteOp) && !inhibit_bytereg) {
+ op->addr.reg = decode_register(reg, ctxt->regs, highbyte_regs);
op->bytes = 1;
} else {
- op->addr.reg = decode_register(reg, c->regs, 0);
- op->bytes = c->op_bytes;
+ op->addr.reg = decode_register(reg, ctxt->regs, 0);
+ op->bytes = ctxt->op_bytes;
}
fetch_register_operand(op);
op->orig_val = op->val;
}
static int decode_modrm(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops,
struct operand *op)
{
- struct decode_cache *c = &ctxt->decode;
u8 sib;
int index_reg = 0, base_reg = 0, scale;
int rc = X86EMUL_CONTINUE;
ulong modrm_ea = 0;
- if (c->rex_prefix) {
- c->modrm_reg = (c->rex_prefix & 4) << 1; /* REX.R */
- index_reg = (c->rex_prefix & 2) << 2; /* REX.X */
- c->modrm_rm = base_reg = (c->rex_prefix & 1) << 3; /* REG.B */
+ if (ctxt->rex_prefix) {
+ ctxt->modrm_reg = (ctxt->rex_prefix & 4) << 1; /* REX.R */
+ index_reg = (ctxt->rex_prefix & 2) << 2; /* REX.X */
+ ctxt->modrm_rm = base_reg = (ctxt->rex_prefix & 1) << 3; /* REG.B */
}
- c->modrm = insn_fetch(u8, 1, c->eip);
- c->modrm_mod |= (c->modrm & 0xc0) >> 6;
- c->modrm_reg |= (c->modrm & 0x38) >> 3;
- c->modrm_rm |= (c->modrm & 0x07);
- c->modrm_seg = VCPU_SREG_DS;
+ ctxt->modrm = insn_fetch(u8, 1, ctxt->_eip);
+ ctxt->modrm_mod |= (ctxt->modrm & 0xc0) >> 6;
+ ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
+ ctxt->modrm_rm |= (ctxt->modrm & 0x07);
+ ctxt->modrm_seg = VCPU_SREG_DS;
- if (c->modrm_mod == 3) {
+ if (ctxt->modrm_mod == 3) {
op->type = OP_REG;
- op->bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
- op->addr.reg = decode_register(c->modrm_rm,
- c->regs, c->d & ByteOp);
- if (c->d & Sse) {
+ op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
+ op->addr.reg = decode_register(ctxt->modrm_rm,
+ ctxt->regs, ctxt->d & ByteOp);
+ if (ctxt->d & Sse) {
op->type = OP_XMM;
op->bytes = 16;
- op->addr.xmm = c->modrm_rm;
- read_sse_reg(ctxt, &op->vec_val, c->modrm_rm);
+ op->addr.xmm = ctxt->modrm_rm;
+ read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
return rc;
}
fetch_register_operand(op);
@@ -926,26 +918,26 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
op->type = OP_MEM;
- if (c->ad_bytes == 2) {
- unsigned bx = c->regs[VCPU_REGS_RBX];
- unsigned bp = c->regs[VCPU_REGS_RBP];
- unsigned si = c->regs[VCPU_REGS_RSI];
- unsigned di = c->regs[VCPU_REGS_RDI];
+ if (ctxt->ad_bytes == 2) {
+ unsigned bx = ctxt->regs[VCPU_REGS_RBX];
+ unsigned bp = ctxt->regs[VCPU_REGS_RBP];
+ unsigned si = ctxt->regs[VCPU_REGS_RSI];
+ unsigned di = ctxt->regs[VCPU_REGS_RDI];
/* 16-bit ModR/M decode. */
- switch (c->modrm_mod) {
+ switch (ctxt->modrm_mod) {
case 0:
- if (c->modrm_rm == 6)
- modrm_ea += insn_fetch(u16, 2, c->eip);
+ if (ctxt->modrm_rm == 6)
+ modrm_ea += insn_fetch(u16, 2, ctxt->_eip);
break;
case 1:
- modrm_ea += insn_fetch(s8, 1, c->eip);
+ modrm_ea += insn_fetch(s8, 1, ctxt->_eip);
break;
case 2:
- modrm_ea += insn_fetch(u16, 2, c->eip);
+ modrm_ea += insn_fetch(u16, 2, ctxt->_eip);
break;
}
- switch (c->modrm_rm) {
+ switch (ctxt->modrm_rm) {
case 0:
modrm_ea += bx + si;
break;
@@ -965,46 +957,46 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
modrm_ea += di;
break;
case 6:
- if (c->modrm_mod != 0)
+ if (ctxt->modrm_mod != 0)
modrm_ea += bp;
break;
case 7:
modrm_ea += bx;
break;
}
- if (c->modrm_rm == 2 || c->modrm_rm == 3 ||
- (c->modrm_rm == 6 && c->modrm_mod != 0))
- c->modrm_seg = VCPU_SREG_SS;
+ if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
+ (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
+ ctxt->modrm_seg = VCPU_SREG_SS;
modrm_ea = (u16)modrm_ea;
} else {
/* 32/64-bit ModR/M decode. */
- if ((c->modrm_rm & 7) == 4) {
- sib = insn_fetch(u8, 1, c->eip);
+ if ((ctxt->modrm_rm & 7) == 4) {
+ sib = insn_fetch(u8, 1, ctxt->_eip);
index_reg |= (sib >> 3) & 7;
base_reg |= sib & 7;
scale = sib >> 6;
- if ((base_reg & 7) == 5 && c->modrm_mod == 0)
- modrm_ea += insn_fetch(s32, 4, c->eip);
+ if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
+ modrm_ea += insn_fetch(s32, 4, ctxt->_eip);
else
- modrm_ea += c->regs[base_reg];
+ modrm_ea += ctxt->regs[base_reg];
if (index_reg != 4)
- modrm_ea += c->regs[index_reg] << scale;
- } else if ((c->modrm_rm & 7) == 5 && c->modrm_mod == 0) {
+ modrm_ea += ctxt->regs[index_reg] << scale;
+ } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
if (ctxt->mode == X86EMUL_MODE_PROT64)
- c->rip_relative = 1;
+ ctxt->rip_relative = 1;
} else
- modrm_ea += c->regs[c->modrm_rm];
- switch (c->modrm_mod) {
+ modrm_ea += ctxt->regs[ctxt->modrm_rm];
+ switch (ctxt->modrm_mod) {
case 0:
- if (c->modrm_rm == 5)
- modrm_ea += insn_fetch(s32, 4, c->eip);
+ if (ctxt->modrm_rm == 5)
+ modrm_ea += insn_fetch(s32, 4, ctxt->_eip);
break;
case 1:
- modrm_ea += insn_fetch(s8, 1, c->eip);
+ modrm_ea += insn_fetch(s8, 1, ctxt->_eip);
break;
case 2:
- modrm_ea += insn_fetch(s32, 4, c->eip);
+ modrm_ea += insn_fetch(s32, 4, ctxt->_eip);
break;
}
}
@@ -1014,53 +1006,50 @@ done:
}
static int decode_abs(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops,
struct operand *op)
{
- struct decode_cache *c = &ctxt->decode;
int rc = X86EMUL_CONTINUE;
op->type = OP_MEM;
- switch (c->ad_bytes) {
+ switch (ctxt->ad_bytes) {
case 2:
- op->addr.mem.ea = insn_fetch(u16, 2, c->eip);
+ op->addr.mem.ea = insn_fetch(u16, 2, ctxt->_eip);
break;
case 4:
- op->addr.mem.ea = insn_fetch(u32, 4, c->eip);
+ op->addr.mem.ea = insn_fetch(u32, 4, ctxt->_eip);
break;
case 8:
- op->addr.mem.ea = insn_fetch(u64, 8, c->eip);
+ op->addr.mem.ea = insn_fetch(u64, 8, ctxt->_eip);
break;
}
done:
return rc;
}
-static void fetch_bit_operand(struct decode_cache *c)
+static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
{
long sv = 0, mask;
- if (c->dst.type == OP_MEM && c->src.type == OP_REG) {
- mask = ~(c->dst.bytes * 8 - 1);
+ if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
+ mask = ~(ctxt->dst.bytes * 8 - 1);
- if (c->src.bytes == 2)
- sv = (s16)c->src.val & (s16)mask;
- else if (c->src.bytes == 4)
- sv = (s32)c->src.val & (s32)mask;
+ if (ctxt->src.bytes == 2)
+ sv = (s16)ctxt->src.val & (s16)mask;
+ else if (ctxt->src.bytes == 4)
+ sv = (s32)ctxt->src.val & (s32)mask;
- c->dst.addr.mem.ea += (sv >> 3);
+ ctxt->dst.addr.mem.ea += (sv >> 3);
}
/* only subword offset */
- c->src.val &= (c->dst.bytes << 3) - 1;
+ ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
}
static int read_emulated(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops,
unsigned long addr, void *dest, unsigned size)
{
int rc;
- struct read_cache *mc = &ctxt->decode.mem_read;
+ struct read_cache *mc = &ctxt->mem_read;
while (size) {
int n = min(size, 8u);
@@ -1068,8 +1057,8 @@ static int read_emulated(struct x86_emulate_ctxt *ctxt,
if (mc->pos < mc->end)
goto read_cached;
- rc = ops->read_emulated(ctxt, addr, mc->data + mc->end, n,
- &ctxt->exception);
+ rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, n,
+ &ctxt->exception);
if (rc != X86EMUL_CONTINUE)
return rc;
mc->end += n;
@@ -1094,7 +1083,7 @@ static int segmented_read(struct x86_emulate_ctxt *ctxt,
rc = linearize(ctxt, addr, size, false, &linear);
if (rc != X86EMUL_CONTINUE)
return rc;
- return read_emulated(ctxt, ctxt->ops, linear, data, size);
+ return read_emulated(ctxt, linear, data, size);
}
static int segmented_write(struct x86_emulate_ctxt *ctxt,
@@ -1128,26 +1117,24 @@ static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
}
static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops,
unsigned int size, unsigned short port,
void *dest)
{
- struct read_cache *rc = &ctxt->decode.io_read;
+ struct read_cache *rc = &ctxt->io_read;
if (rc->pos == rc->end) { /* refill pio read ahead */
- struct decode_cache *c = &ctxt->decode;
unsigned int in_page, n;
- unsigned int count = c->rep_prefix ?
- address_mask(c, c->regs[VCPU_REGS_RCX]) : 1;
+ unsigned int count = ctxt->rep_prefix ?
+ address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) : 1;
in_page = (ctxt->eflags & EFLG_DF) ?
- offset_in_page(c->regs[VCPU_REGS_RDI]) :
- PAGE_SIZE - offset_in_page(c->regs[VCPU_REGS_RDI]);
+ offset_in_page(ctxt->regs[VCPU_REGS_RDI]) :
+ PAGE_SIZE - offset_in_page(ctxt->regs[VCPU_REGS_RDI]);
n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
count);
if (n == 0)
n = 1;
rc->pos = rc->end = 0;
- if (!ops->pio_in_emulated(ctxt, size, port, rc->data, n))
+ if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
return 0;
rc->end = n * size;
}
@@ -1158,9 +1145,10 @@ static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
}
static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops,
u16 selector, struct desc_ptr *dt)
{
+ struct x86_emulate_ops *ops = ctxt->ops;
+
if (selector & 1 << 2) {
struct desc_struct desc;
u16 sel;
@@ -1177,48 +1165,42 @@ static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
/* allowed just for 8 bytes segments */
static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops,
u16 selector, struct desc_struct *desc)
{
struct desc_ptr dt;
u16 index = selector >> 3;
- int ret;
ulong addr;
- get_descriptor_table_ptr(ctxt, ops, selector, &dt);
+ get_descriptor_table_ptr(ctxt, selector, &dt);
if (dt.size < index * 8 + 7)
return emulate_gp(ctxt, selector & 0xfffc);
- addr = dt.address + index * 8;
- ret = ops->read_std(ctxt, addr, desc, sizeof *desc, &ctxt->exception);
- return ret;
+ addr = dt.address + index * 8;
+ return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
+ &ctxt->exception);
}
/* allowed just for 8 bytes segments */
static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops,
u16 selector, struct desc_struct *desc)
{
struct desc_ptr dt;
u16 index = selector >> 3;
ulong addr;
- int ret;
- get_descriptor_table_ptr(ctxt, ops, selector, &dt);
+ get_descriptor_table_ptr(ctxt, selector, &dt);
if (dt.size < index * 8 + 7)
return emulate_gp(ctxt, selector & 0xfffc);
addr = dt.address + index * 8;
- ret = ops->write_std(ctxt, addr, desc, sizeof *desc, &ctxt->exception);
-
- return ret;
+ return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
+ &ctxt->exception);
}
/* Does not support long mode */
static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops,
u16 selector, int seg)
{
struct desc_struct seg_desc;
@@ -1253,7 +1235,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
if (null_selector) /* for NULL selector skip all following checks */
goto load;
- ret = read_segment_descriptor(ctxt, ops, selector, &seg_desc);
+ ret = read_segment_descriptor(ctxt, selector, &seg_desc);
if (ret != X86EMUL_CONTINUE)
return ret;
@@ -1271,7 +1253,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
rpl = selector & 3;
dpl = seg_desc.dpl;
- cpl = ops->cpl(ctxt);
+ cpl = ctxt->ops->cpl(ctxt);
switch (seg) {
case VCPU_SREG_SS:
@@ -1322,12 +1304,12 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
if (seg_desc.s) {
/* mark segment as accessed */
seg_desc.type |= 1;
- ret = write_segment_descriptor(ctxt, ops, selector, &seg_desc);
+ ret = write_segment_descriptor(ctxt, selector, &seg_desc);
if (ret != X86EMUL_CONTINUE)
return ret;
}
load:
- ops->set_segment(ctxt, selector, &seg_desc, 0, seg);
+ ctxt->ops->set_segment(ctxt, selector, &seg_desc, 0, seg);
return X86EMUL_CONTINUE;
exception:
emulate_exception(ctxt, err_vec, err_code, true);
@@ -1356,29 +1338,28 @@ static void write_register_operand(struct operand *op)
static int writeback(struct x86_emulate_ctxt *ctxt)
{
int rc;
- struct decode_cache *c = &ctxt->decode;
- switch (c->dst.type) {
+ switch (ctxt->dst.type) {
case OP_REG:
- write_register_operand(&c->dst);
+ write_register_operand(&ctxt->dst);
break;
case OP_MEM:
- if (c->lock_prefix)
+ if (ctxt->lock_prefix)
rc = segmented_cmpxchg(ctxt,
- c->dst.addr.mem,
- &c->dst.orig_val,
- &c->dst.val,
- c->dst.bytes);
+ ctxt->dst.addr.mem,
+ &ctxt->dst.orig_val,
+ &ctxt->dst.val,
+ ctxt->dst.bytes);
else
rc = segmented_write(ctxt,
- c->dst.addr.mem,
- &c->dst.val,
- c->dst.bytes);
+ ctxt->dst.addr.mem,
+ &ctxt->dst.val,
+ ctxt->dst.bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
break;
case OP_XMM:
- write_sse_reg(ctxt, &c->dst.vec_val, c->dst.addr.xmm);
+ write_sse_reg(ctxt, &ctxt->dst.vec_val, ctxt->dst.addr.xmm);
break;
case OP_NONE:
/* no writeback */
@@ -1391,50 +1372,45 @@ static int writeback(struct x86_emulate_ctxt *ctxt)
static int em_push(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
struct segmented_address addr;
- register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes);
- addr.ea = register_address(c, c->regs[VCPU_REGS_RSP]);
+ register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], -ctxt->op_bytes);
+ addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]);
addr.seg = VCPU_SREG_SS;
/* Disable writeback. */
- c->dst.type = OP_NONE;
- return segmented_write(ctxt, addr, &c->src.val, c->op_bytes);
+ ctxt->dst.type = OP_NONE;
+ return segmented_write(ctxt, addr, &ctxt->src.val, ctxt->op_bytes);
}
static int emulate_pop(struct x86_emulate_ctxt *ctxt,
void *dest, int len)
{
- struct decode_cache *c = &ctxt->decode;
int rc;
struct segmented_address addr;
- addr.ea = register_address(c, c->regs[VCPU_REGS_RSP]);
+ addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]);
addr.seg = VCPU_SREG_SS;
rc = segmented_read(ctxt, addr, dest, len);
if (rc != X86EMUL_CONTINUE)
return rc;
- register_address_increment(c, &c->regs[VCPU_REGS_RSP], len);
+ register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], len);
return rc;
}
static int em_pop(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
-
- return emulate_pop(ctxt, &c->dst.val, c->op_bytes);
+ return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
}
static int emulate_popf(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops,
- void *dest, int len)
+ void *dest, int len)
{
int rc;
unsigned long val, change_mask;
int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
- int cpl = ops->cpl(ctxt);
+ int cpl = ctxt->ops->cpl(ctxt);
rc = emulate_pop(ctxt, &val, len);
if (rc != X86EMUL_CONTINUE)
@@ -1470,49 +1446,41 @@ static int emulate_popf(struct x86_emulate_ctxt *ctxt,
static int em_popf(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
-
- c->dst.type = OP_REG;
- c->dst.addr.reg = &ctxt->eflags;
- c->dst.bytes = c->op_bytes;
- return emulate_popf(ctxt, ctxt->ops, &c->dst.val, c->op_bytes);
+ ctxt->dst.type = OP_REG;
+ ctxt->dst.addr.reg = &ctxt->eflags;
+ ctxt->dst.bytes = ctxt->op_bytes;
+ return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
}
-static int emulate_push_sreg(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops, int seg)
+static int emulate_push_sreg(struct x86_emulate_ctxt *ctxt, int seg)
{
- struct decode_cache *c = &ctxt->decode;
-
- c->src.val = get_segment_selector(ctxt, seg);
+ ctxt->src.val = get_segment_selector(ctxt, seg);
return em_push(ctxt);
}
-static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops, int seg)
+static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt, int seg)
{
- struct decode_cache *c = &ctxt->decode;
unsigned long selector;
int rc;
- rc = emulate_pop(ctxt, &selector, c->op_bytes);
+ rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
- rc = load_segment_descriptor(ctxt, ops, (u16)selector, seg);
+ rc = load_segment_descriptor(ctxt, (u16)selector, seg);
return rc;
}
static int em_pusha(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
- unsigned long old_esp = c->regs[VCPU_REGS_RSP];
+ unsigned long old_esp = ctxt->regs[VCPU_REGS_RSP];
int rc = X86EMUL_CONTINUE;
int reg = VCPU_REGS_RAX;
while (reg <= VCPU_REGS_RDI) {
(reg == VCPU_REGS_RSP) ?
- (c->src.val = old_esp) : (c->src.val = c->regs[reg]);
+ (ctxt->src.val = old_esp) : (ctxt->src.val = ctxt->regs[reg]);
rc = em_push(ctxt);
if (rc != X86EMUL_CONTINUE)
@@ -1526,26 +1494,23 @@ static int em_pusha(struct x86_emulate_ctxt *ctxt)
static int em_pushf(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
-
- c->src.val = (unsigned long)ctxt->eflags;
+ ctxt->src.val = (unsigned long)ctxt->eflags;
return em_push(ctxt);
}
static int em_popa(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
int rc = X86EMUL_CONTINUE;
int reg = VCPU_REGS_RDI;
while (reg >= VCPU_REGS_RAX) {
if (reg == VCPU_REGS_RSP) {
- register_address_increment(c, &c->regs[VCPU_REGS_RSP],
- c->op_bytes);
+ register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP],
+ ctxt->op_bytes);
--reg;
}
- rc = emulate_pop(ctxt, &c->regs[reg], c->op_bytes);
+ rc = emulate_pop(ctxt, &ctxt->regs[reg], ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
break;
--reg;
@@ -1553,10 +1518,9 @@ static int em_popa(struct x86_emulate_ctxt *ctxt)
return rc;
}
-int emulate_int_real(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops, int irq)
+int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
{
- struct decode_cache *c = &ctxt->decode;
+ struct x86_emulate_ops *ops = ctxt->ops;
int rc;
struct desc_ptr dt;
gva_t cs_addr;
@@ -1564,19 +1528,19 @@ int emulate_int_real(struct x86_emulate_ctxt *ctxt,
u16 cs, eip;
/* TODO: Add limit checks */
- c->src.val = ctxt->eflags;
+ ctxt->src.val = ctxt->eflags;
rc = em_push(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
- c->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
+ ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
rc = em_push(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
- c->src.val = c->eip;
+ ctxt->src.val = ctxt->_eip;
rc = em_push(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
@@ -1594,21 +1558,20 @@ int emulate_int_real(struct x86_emulate_ctxt *ctxt,
if (rc != X86EMUL_CONTINUE)
return rc;
- rc = load_segment_descriptor(ctxt, ops, cs, VCPU_SREG_CS);
+ rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
if (rc != X86EMUL_CONTINUE)
return rc;
- c->eip = eip;
+ ctxt->_eip = eip;
return rc;
}
-static int emulate_int(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops, int irq)
+static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
{
switch(ctxt->mode) {
case X86EMUL_MODE_REAL:
- return emulate_int_real(ctxt, ops, irq);
+ return emulate_int_real(ctxt, irq);
case X86EMUL_MODE_VM86:
case X86EMUL_MODE_PROT16:
case X86EMUL_MODE_PROT32:
@@ -1619,10 +1582,8 @@ static int emulate_int(struct x86_emulate_ctxt *ctxt,
}
}
-static int emulate_iret_real(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops)
+static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
int rc = X86EMUL_CONTINUE;
unsigned long temp_eip = 0;
unsigned long temp_eflags = 0;
@@ -1634,7 +1595,7 @@ static int emulate_iret_real(struct x86_emulate_ctxt *ctxt,
/* TODO: Add stack limit check */
- rc = emulate_pop(ctxt, &temp_eip, c->op_bytes);
+ rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
@@ -1642,27 +1603,27 @@ static int emulate_iret_real(struct x86_emulate_ctxt *ctxt,
if (temp_eip & ~0xffff)
return emulate_gp(ctxt, 0);
- rc = emulate_pop(ctxt, &cs, c->op_bytes);
+ rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
- rc = emulate_pop(ctxt, &temp_eflags, c->op_bytes);
+ rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
- rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS);
+ rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
if (rc != X86EMUL_CONTINUE)
return rc;
- c->eip = temp_eip;
+ ctxt->_eip = temp_eip;
- if (c->op_bytes == 4)
+ if (ctxt->op_bytes == 4)
ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
- else if (c->op_bytes == 2) {
+ else if (ctxt->op_bytes == 2) {
ctxt->eflags &= ~0xffff;
ctxt->eflags |= temp_eflags;
}
@@ -1673,12 +1634,11 @@ static int emulate_iret_real(struct x86_emulate_ctxt *ctxt,
return rc;
}
-static inline int emulate_iret(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops* ops)
+static int em_iret(struct x86_emulate_ctxt *ctxt)
{
switch(ctxt->mode) {
case X86EMUL_MODE_REAL:
- return emulate_iret_real(ctxt, ops);
+ return emulate_iret_real(ctxt);
case X86EMUL_MODE_VM86:
case X86EMUL_MODE_PROT16:
case X86EMUL_MODE_PROT32:
@@ -1691,53 +1651,49 @@ static inline int emulate_iret(struct x86_emulate_ctxt *ctxt,
static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
int rc;
unsigned short sel;
- memcpy(&sel, c->src.valptr + c->op_bytes, 2);
+ memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
- rc = load_segment_descriptor(ctxt, ctxt->ops, sel, VCPU_SREG_CS);
+ rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS);
if (rc != X86EMUL_CONTINUE)
return rc;
- c->eip = 0;
- memcpy(&c->eip, c->src.valptr, c->op_bytes);
+ ctxt->_eip = 0;
+ memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
return X86EMUL_CONTINUE;
}
static int em_grp1a(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
-
- return emulate_pop(ctxt, &c->dst.val, c->dst.bytes);
+ return emulate_pop(ctxt, &ctxt->dst.val, ctxt->dst.bytes);
}
static int em_grp2(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
- switch (c->modrm_reg) {
+ switch (ctxt->modrm_reg) {
case 0: /* rol */
- emulate_2op_SrcB("rol", c->src, c->dst, ctxt->eflags);
+ emulate_2op_SrcB("rol", ctxt->src, ctxt->dst, ctxt->eflags);
break;
case 1: /* ror */
- emulate_2op_SrcB("ror", c->src, c->dst, ctxt->eflags);
+ emulate_2op_SrcB("ror", ctxt->src, ctxt->dst, ctxt->eflags);
break;
case 2: /* rcl */
- emulate_2op_SrcB("rcl", c->src, c->dst, ctxt->eflags);
+ emulate_2op_SrcB("rcl", ctxt->src, ctxt->dst, ctxt->eflags);
break;
case 3: /* rcr */
- emulate_2op_SrcB("rcr", c->src, c->dst, ctxt->eflags);
+ emulate_2op_SrcB("rcr", ctxt->src, ctxt->dst, ctxt->eflags);
break;
case 4: /* sal/shl */
case 6: /* sal/shl */
- emulate_2op_SrcB("sal", c->src, c->dst, ctxt->eflags);
+ emulate_2op_SrcB("sal", ctxt->src, ctxt->dst, ctxt->eflags);
break;
case 5: /* shr */
- emulate_2op_SrcB("shr", c->src, c->dst, ctxt->eflags);
+ emulate_2op_SrcB("shr", ctxt->src, ctxt->dst, ctxt->eflags);
break;
case 7: /* sar */
- emulate_2op_SrcB("sar", c->src, c->dst, ctxt->eflags);
+ emulate_2op_SrcB("sar", ctxt->src, ctxt->dst, ctxt->eflags);
break;
}
return X86EMUL_CONTINUE;
@@ -1745,33 +1701,32 @@ static int em_grp2(struct x86_emulate_ctxt *ctxt)
static int em_grp3(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
- unsigned long *rax = &c->regs[VCPU_REGS_RAX];
- unsigned long *rdx = &c->regs[VCPU_REGS_RDX];
+ unsigned long *rax = &ctxt->regs[VCPU_REGS_RAX];
+ unsigned long *rdx = &ctxt->regs[VCPU_REGS_RDX];
u8 de = 0;
- switch (c->modrm_reg) {
+ switch (ctxt->modrm_reg) {
case 0 ... 1: /* test */
- emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
+ emulate_2op_SrcV("test", ctxt->src, ctxt->dst, ctxt->eflags);
break;
case 2: /* not */
- c->dst.val = ~c->dst.val;
+ ctxt->dst.val = ~ctxt->dst.val;
break;
case 3: /* neg */
- emulate_1op("neg", c->dst, ctxt->eflags);
+ emulate_1op("neg", ctxt->dst, ctxt->eflags);
break;
case 4: /* mul */
- emulate_1op_rax_rdx("mul", c->src, *rax, *rdx, ctxt->eflags);
+ emulate_1op_rax_rdx("mul", ctxt->src, *rax, *rdx, ctxt->eflags);
break;
case 5: /* imul */
- emulate_1op_rax_rdx("imul", c->src, *rax, *rdx, ctxt->eflags);
+ emulate_1op_rax_rdx("imul", ctxt->src, *rax, *rdx, ctxt->eflags);
break;
case 6: /* div */
- emulate_1op_rax_rdx_ex("div", c->src, *rax, *rdx,
+ emulate_1op_rax_rdx_ex("div", ctxt->src, *rax, *rdx,
ctxt->eflags, de);
break;
case 7: /* idiv */
- emulate_1op_rax_rdx_ex("idiv", c->src, *rax, *rdx,
+ emulate_1op_rax_rdx_ex("idiv", ctxt->src, *rax, *rdx,
ctxt->eflags, de);
break;
default:
@@ -1784,26 +1739,25 @@ static int em_grp3(struct x86_emulate_ctxt *ctxt)
static int em_grp45(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
int rc = X86EMUL_CONTINUE;
- switch (c->modrm_reg) {
+ switch (ctxt->modrm_reg) {
case 0: /* inc */
- emulate_1op("inc", c->dst, ctxt->eflags);
+ emulate_1op("inc", ctxt->dst, ctxt->eflags);
break;
case 1: /* dec */
- emulate_1op("dec", c->dst, ctxt->eflags);
+ emulate_1op("dec", ctxt->dst, ctxt->eflags);
break;
case 2: /* call near abs */ {
long int old_eip;
- old_eip = c->eip;
- c->eip = c->src.val;
- c->src.val = old_eip;
+ old_eip = ctxt->_eip;
+ ctxt->_eip = ctxt->src.val;
+ ctxt->src.val = old_eip;
rc = em_push(ctxt);
break;
}
case 4: /* jmp abs */
- c->eip = c->src.val;
+ ctxt->_eip = ctxt->src.val;
break;
case 5: /* jmp far */
rc = em_jmp_far(ctxt);
@@ -1817,68 +1771,70 @@ static int em_grp45(struct x86_emulate_ctxt *ctxt)
static int em_grp9(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
- u64 old = c->dst.orig_val64;
+ u64 old = ctxt->dst.orig_val64;
- if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) ||
- ((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) {
- c->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
- c->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
+ if (((u32) (old >> 0) != (u32) ctxt->regs[VCPU_REGS_RAX]) ||
+ ((u32) (old >> 32) != (u32) ctxt->regs[VCPU_REGS_RDX])) {
+ ctxt->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
+ ctxt->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
ctxt->eflags &= ~EFLG_ZF;
} else {
- c->dst.val64 = ((u64)c->regs[VCPU_REGS_RCX] << 32) |
- (u32) c->regs[VCPU_REGS_RBX];
+ ctxt->dst.val64 = ((u64)ctxt->regs[VCPU_REGS_RCX] << 32) |
+ (u32) ctxt->regs[VCPU_REGS_RBX];
ctxt->eflags |= EFLG_ZF;
}
return X86EMUL_CONTINUE;
}
-static int emulate_ret_far(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops)
+static int em_ret(struct x86_emulate_ctxt *ctxt)
+{
+ ctxt->dst.type = OP_REG;
+ ctxt->dst.addr.reg = &ctxt->_eip;
+ ctxt->dst.bytes = ctxt->op_bytes;
+ return em_pop(ctxt);
+}
+
+static int em_ret_far(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
int rc;
unsigned long cs;
- rc = emulate_pop(ctxt, &c->eip, c->op_bytes);
+ rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
- if (c->op_bytes == 4)
- c->eip = (u32)c->eip;
- rc = emulate_pop(ctxt, &cs, c->op_bytes);
+ if (ctxt->op_bytes == 4)
+ ctxt->_eip = (u32)ctxt->_eip;
+ rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
- rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS);
+ rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
return rc;
}
-static int emulate_load_segment(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops, int seg)
+static int emulate_load_segment(struct x86_emulate_ctxt *ctxt, int seg)
{
- struct decode_cache *c = &ctxt->decode;
unsigned short sel;
int rc;
- memcpy(&sel, c->src.valptr + c->op_bytes, 2);
+ memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
- rc = load_segment_descriptor(ctxt, ops, sel, seg);
+ rc = load_segment_descriptor(ctxt, sel, seg);
if (rc != X86EMUL_CONTINUE)
return rc;
- c->dst.val = c->src.val;
+ ctxt->dst.val = ctxt->src.val;
return rc;
}
-static inline void
+static void
setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops, struct desc_struct *cs,
- struct desc_struct *ss)
+ struct desc_struct *cs, struct desc_struct *ss)
{
u16 selector;
memset(cs, 0, sizeof(struct desc_struct));
- ops->get_segment(ctxt, &selector, cs, NULL, VCPU_SREG_CS);
+ ctxt->ops->get_segment(ctxt, &selector, cs, NULL, VCPU_SREG_CS);
memset(ss, 0, sizeof(struct desc_struct));
cs->l = 0; /* will be adjusted later */
@@ -1901,10 +1857,9 @@ setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
ss->p = 1;
}
-static int
-emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
+static int em_syscall(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
+ struct x86_emulate_ops *ops = ctxt->ops;
struct desc_struct cs, ss;
u64 msr_data;
u16 cs_sel, ss_sel;
@@ -1916,7 +1871,7 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
return emulate_ud(ctxt);
ops->get_msr(ctxt, MSR_EFER, &efer);
- setup_syscalls_segments(ctxt, ops, &cs, &ss);
+ setup_syscalls_segments(ctxt, &cs, &ss);
ops->get_msr(ctxt, MSR_STAR, &msr_data);
msr_data >>= 32;
@@ -1930,15 +1885,15 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
- c->regs[VCPU_REGS_RCX] = c->eip;
+ ctxt->regs[VCPU_REGS_RCX] = ctxt->_eip;
if (efer & EFER_LMA) {
#ifdef CONFIG_X86_64
- c->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF;
+ ctxt->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF;
ops->get_msr(ctxt,
ctxt->mode == X86EMUL_MODE_PROT64 ?
MSR_LSTAR : MSR_CSTAR, &msr_data);
- c->eip = msr_data;
+ ctxt->_eip = msr_data;
ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
ctxt->eflags &= ~(msr_data | EFLG_RF);
@@ -1946,7 +1901,7 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
} else {
/* legacy mode */
ops->get_msr(ctxt, MSR_STAR, &msr_data);
- c->eip = (u32)msr_data;
+ ctxt->_eip = (u32)msr_data;
ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
}
@@ -1954,16 +1909,15 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
return X86EMUL_CONTINUE;
}
-static int
-emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
+static int em_sysenter(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
+ struct x86_emulate_ops *ops = ctxt->ops;
struct desc_struct cs, ss;
u64 msr_data;
u16 cs_sel, ss_sel;
u64 efer = 0;
- ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
+ ops->get_msr(ctxt, MSR_EFER, &efer);
/* inject #GP if in real mode */
if (ctxt->mode == X86EMUL_MODE_REAL)
return emulate_gp(ctxt, 0);
@@ -1974,7 +1928,7 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
if (ctxt->mode == X86EMUL_MODE_PROT64)
return emulate_ud(ctxt);
- setup_syscalls_segments(ctxt, ops, &cs, &ss);
+ setup_syscalls_segments(ctxt, &cs, &ss);
ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
switch (ctxt->mode) {
@@ -2002,31 +1956,30 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
- c->eip = msr_data;
+ ctxt->_eip = msr_data;
ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
- c->regs[VCPU_REGS_RSP] = msr_data;
+ ctxt->regs[VCPU_REGS_RSP] = msr_data;
return X86EMUL_CONTINUE;
}
-static int
-emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
+static int em_sysexit(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
+ struct x86_emulate_ops *ops = ctxt->ops;
struct desc_struct cs, ss;
u64 msr_data;
int usermode;
- u16 cs_sel, ss_sel;
+ u16 cs_sel = 0, ss_sel = 0;
/* inject #GP if in real mode or Virtual 8086 mode */
if (ctxt->mode == X86EMUL_MODE_REAL ||
ctxt->mode == X86EMUL_MODE_VM86)
return emulate_gp(ctxt, 0);
- setup_syscalls_segments(ctxt, ops, &cs, &ss);
+ setup_syscalls_segments(ctxt, &cs, &ss);
- if ((c->rex_prefix & 0x8) != 0x0)
+ if ((ctxt->rex_prefix & 0x8) != 0x0)
usermode = X86EMUL_MODE_PROT64;
else
usermode = X86EMUL_MODE_PROT32;
@@ -2056,14 +2009,13 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
- c->eip = c->regs[VCPU_REGS_RDX];
- c->regs[VCPU_REGS_RSP] = c->regs[VCPU_REGS_RCX];
+ ctxt->_eip = ctxt->regs[VCPU_REGS_RDX];
+ ctxt->regs[VCPU_REGS_RSP] = ctxt->regs[VCPU_REGS_RCX];
return X86EMUL_CONTINUE;
}
-static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops)
+static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
{
int iopl;
if (ctxt->mode == X86EMUL_MODE_REAL)
@@ -2071,13 +2023,13 @@ static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt,
if (ctxt->mode == X86EMUL_MODE_VM86)
return true;
iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
- return ops->cpl(ctxt) > iopl;
+ return ctxt->ops->cpl(ctxt) > iopl;
}
static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops,
u16 port, u16 len)
{
+ struct x86_emulate_ops *ops = ctxt->ops;
struct desc_struct tr_seg;
u32 base3;
int r;
@@ -2108,14 +2060,13 @@ static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
}
static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops,
u16 port, u16 len)
{
if (ctxt->perm_ok)
return true;
- if (emulator_bad_iopl(ctxt, ops))
- if (!emulator_io_port_access_allowed(ctxt, ops, port, len))
+ if (emulator_bad_iopl(ctxt))
+ if (!emulator_io_port_access_allowed(ctxt, port, len))
return false;
ctxt->perm_ok = true;
@@ -2124,21 +2075,18 @@ static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
}
static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops,
struct tss_segment_16 *tss)
{
- struct decode_cache *c = &ctxt->decode;
-
- tss->ip = c->eip;
+ tss->ip = ctxt->_eip;
tss->flag = ctxt->eflags;
- tss->ax = c->regs[VCPU_REGS_RAX];
- tss->cx = c->regs[VCPU_REGS_RCX];
- tss->dx = c->regs[VCPU_REGS_RDX];
- tss->bx = c->regs[VCPU_REGS_RBX];
- tss->sp = c->regs[VCPU_REGS_RSP];
- tss->bp = c->regs[VCPU_REGS_RBP];
- tss->si = c->regs[VCPU_REGS_RSI];
- tss->di = c->regs[VCPU_REGS_RDI];
+ tss->ax = ctxt->regs[VCPU_REGS_RAX];
+ tss->cx = ctxt->regs[VCPU_REGS_RCX];
+ tss->dx = ctxt->regs[VCPU_REGS_RDX];
+ tss->bx = ctxt->regs[VCPU_REGS_RBX];
+ tss->sp = ctxt->regs[VCPU_REGS_RSP];
+ tss->bp = ctxt->regs[VCPU_REGS_RBP];
+ tss->si = ctxt->regs[VCPU_REGS_RSI];
+ tss->di = ctxt->regs[VCPU_REGS_RDI];
tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
@@ -2148,22 +2096,20 @@ static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
}
static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops,
struct tss_segment_16 *tss)
{
- struct decode_cache *c = &ctxt->decode;
int ret;
- c->eip = tss->ip;
+ ctxt->_eip = tss->ip;
ctxt->eflags = tss->flag | 2;
- c->regs[VCPU_REGS_RAX] = tss->ax;
- c->regs[VCPU_REGS_RCX] = tss->cx;
- c->regs[VCPU_REGS_RDX] = tss->dx;
- c->regs[VCPU_REGS_RBX] = tss->bx;
- c->regs[VCPU_REGS_RSP] = tss->sp;
- c->regs[VCPU_REGS_RBP] = tss->bp;
- c->regs[VCPU_REGS_RSI] = tss->si;
- c->regs[VCPU_REGS_RDI] = tss->di;
+ ctxt->regs[VCPU_REGS_RAX] = tss->ax;
+ ctxt->regs[VCPU_REGS_RCX] = tss->cx;
+ ctxt->regs[VCPU_REGS_RDX] = tss->dx;
+ ctxt->regs[VCPU_REGS_RBX] = tss->bx;
+ ctxt->regs[VCPU_REGS_RSP] = tss->sp;
+ ctxt->regs[VCPU_REGS_RBP] = tss->bp;
+ ctxt->regs[VCPU_REGS_RSI] = tss->si;
+ ctxt->regs[VCPU_REGS_RDI] = tss->di;
/*
* SDM says that segment selectors are loaded before segment
@@ -2179,19 +2125,19 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
* Now load segment descriptors. If fault happenes at this stage
* it is handled in a context of new task
*/
- ret = load_segment_descriptor(ctxt, ops, tss->ldt, VCPU_SREG_LDTR);
+ ret = load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR);
if (ret != X86EMUL_CONTINUE)
return ret;
- ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES);
+ ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
if (ret != X86EMUL_CONTINUE)
return ret;
- ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS);
+ ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
if (ret != X86EMUL_CONTINUE)
return ret;
- ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS);
+ ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
if (ret != X86EMUL_CONTINUE)
return ret;
- ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS);
+ ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
if (ret != X86EMUL_CONTINUE)
return ret;
@@ -2199,10 +2145,10 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
}
static int task_switch_16(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops,
u16 tss_selector, u16 old_tss_sel,
ulong old_tss_base, struct desc_struct *new_desc)
{
+ struct x86_emulate_ops *ops = ctxt->ops;
struct tss_segment_16 tss_seg;
int ret;
u32 new_tss_base = get_desc_base(new_desc);
@@ -2213,7 +2159,7 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
/* FIXME: need to provide precise fault address */
return ret;
- save_state_to_tss16(ctxt, ops, &tss_seg);
+ save_state_to_tss16(ctxt, &tss_seg);
ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
&ctxt->exception);
@@ -2239,26 +2185,23 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
return ret;
}
- return load_state_from_tss16(ctxt, ops, &tss_seg);
+ return load_state_from_tss16(ctxt, &tss_seg);
}
static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops,
struct tss_segment_32 *tss)
{
- struct decode_cache *c = &ctxt->decode;
-
- tss->cr3 = ops->get_cr(ctxt, 3);
- tss->eip = c->eip;
+ tss->cr3 = ctxt->ops->get_cr(ctxt, 3);
+ tss->eip = ctxt->_eip;
tss->eflags = ctxt->eflags;
- tss->eax = c->regs[VCPU_REGS_RAX];
- tss->ecx = c->regs[VCPU_REGS_RCX];
- tss->edx = c->regs[VCPU_REGS_RDX];
- tss->ebx = c->regs[VCPU_REGS_RBX];
- tss->esp = c->regs[VCPU_REGS_RSP];
- tss->ebp = c->regs[VCPU_REGS_RBP];
- tss->esi = c->regs[VCPU_REGS_RSI];
- tss->edi = c->regs[VCPU_REGS_RDI];
+ tss->eax = ctxt->regs[VCPU_REGS_RAX];
+ tss->ecx = ctxt->regs[VCPU_REGS_RCX];
+ tss->edx = ctxt->regs[VCPU_REGS_RDX];
+ tss->ebx = ctxt->regs[VCPU_REGS_RBX];
+ tss->esp = ctxt->regs[VCPU_REGS_RSP];
+ tss->ebp = ctxt->regs[VCPU_REGS_RBP];
+ tss->esi = ctxt->regs[VCPU_REGS_RSI];
+ tss->edi = ctxt->regs[VCPU_REGS_RDI];
tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
@@ -2270,24 +2213,22 @@ static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
}
static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops,
struct tss_segment_32 *tss)
{
- struct decode_cache *c = &ctxt->decode;
int ret;
- if (ops->set_cr(ctxt, 3, tss->cr3))
+ if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
return emulate_gp(ctxt, 0);
- c->eip = tss->eip;
+ ctxt->_eip = tss->eip;
ctxt->eflags = tss->eflags | 2;
- c->regs[VCPU_REGS_RAX] = tss->eax;
- c->regs[VCPU_REGS_RCX] = tss->ecx;
- c->regs[VCPU_REGS_RDX] = tss->edx;
- c->regs[VCPU_REGS_RBX] = tss->ebx;
- c->regs[VCPU_REGS_RSP] = tss->esp;
- c->regs[VCPU_REGS_RBP] = tss->ebp;
- c->regs[VCPU_REGS_RSI] = tss->esi;
- c->regs[VCPU_REGS_RDI] = tss->edi;
+ ctxt->regs[VCPU_REGS_RAX] = tss->eax;
+ ctxt->regs[VCPU_REGS_RCX] = tss->ecx;
+ ctxt->regs[VCPU_REGS_RDX] = tss->edx;
+ ctxt->regs[VCPU_REGS_RBX] = tss->ebx;
+ ctxt->regs[VCPU_REGS_RSP] = tss->esp;
+ ctxt->regs[VCPU_REGS_RBP] = tss->ebp;
+ ctxt->regs[VCPU_REGS_RSI] = tss->esi;
+ ctxt->regs[VCPU_REGS_RDI] = tss->edi;
/*
* SDM says that segment selectors are loaded before segment
@@ -2305,25 +2246,25 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
* Now load segment descriptors. If fault happenes at this stage
* it is handled in a context of new task
*/
- ret = load_segment_descriptor(ctxt, ops, tss->ldt_selector, VCPU_SREG_LDTR);
+ ret = load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
if (ret != X86EMUL_CONTINUE)
return ret;
- ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES);
+ ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
if (ret != X86EMUL_CONTINUE)
return ret;
- ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS);
+ ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
if (ret != X86EMUL_CONTINUE)
return ret;
- ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS);
+ ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
if (ret != X86EMUL_CONTINUE)
return ret;
- ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS);
+ ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
if (ret != X86EMUL_CONTINUE)
return ret;
- ret = load_segment_descriptor(ctxt, ops, tss->fs, VCPU_SREG_FS);
+ ret = load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS);
if (ret != X86EMUL_CONTINUE)
return ret;
- ret = load_segment_descriptor(ctxt, ops, tss->gs, VCPU_SREG_GS);
+ ret = load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS);
if (ret != X86EMUL_CONTINUE)
return ret;
@@ -2331,10 +2272,10 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
}
static int task_switch_32(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops,
u16 tss_selector, u16 old_tss_sel,
ulong old_tss_base, struct desc_struct *new_desc)
{
+ struct x86_emulate_ops *ops = ctxt->ops;
struct tss_segment_32 tss_seg;
int ret;
u32 new_tss_base = get_desc_base(new_desc);
@@ -2345,7 +2286,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
/* FIXME: need to provide precise fault address */
return ret;
- save_state_to_tss32(ctxt, ops, &tss_seg);
+ save_state_to_tss32(ctxt, &tss_seg);
ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
&ctxt->exception);
@@ -2371,14 +2312,14 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
return ret;
}
- return load_state_from_tss32(ctxt, ops, &tss_seg);
+ return load_state_from_tss32(ctxt, &tss_seg);
}
static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops,
u16 tss_selector, int reason,
bool has_error_code, u32 error_code)
{
+ struct x86_emulate_ops *ops = ctxt->ops;
struct desc_struct curr_tss_desc, next_tss_desc;
int ret;
u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
@@ -2388,10 +2329,10 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
/* FIXME: old_tss_base == ~0 ? */
- ret = read_segment_descriptor(ctxt, ops, tss_selector, &next_tss_desc);
+ ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
if (ret != X86EMUL_CONTINUE)
return ret;
- ret = read_segment_descriptor(ctxt, ops, old_tss_sel, &curr_tss_desc);
+ ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
if (ret != X86EMUL_CONTINUE)
return ret;
@@ -2413,8 +2354,7 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
- write_segment_descriptor(ctxt, ops, old_tss_sel,
- &curr_tss_desc);
+ write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
}
if (reason == TASK_SWITCH_IRET)
@@ -2426,10 +2366,10 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
old_tss_sel = 0xffff;
if (next_tss_desc.type & 8)
- ret = task_switch_32(ctxt, ops, tss_selector, old_tss_sel,
+ ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
old_tss_base, &next_tss_desc);
else
- ret = task_switch_16(ctxt, ops, tss_selector, old_tss_sel,
+ ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
old_tss_base, &next_tss_desc);
if (ret != X86EMUL_CONTINUE)
return ret;
@@ -2439,19 +2379,16 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
if (reason != TASK_SWITCH_IRET) {
next_tss_desc.type |= (1 << 1); /* set busy flag */
- write_segment_descriptor(ctxt, ops, tss_selector,
- &next_tss_desc);
+ write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
}
ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
if (has_error_code) {
- struct decode_cache *c = &ctxt->decode;
-
- c->op_bytes = c->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
- c->lock_prefix = 0;
- c->src.val = (unsigned long) error_code;
+ ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
+ ctxt->lock_prefix = 0;
+ ctxt->src.val = (unsigned long) error_code;
ret = em_push(ctxt);
}
@@ -2462,18 +2399,16 @@ int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
u16 tss_selector, int reason,
bool has_error_code, u32 error_code)
{
- struct x86_emulate_ops *ops = ctxt->ops;
- struct decode_cache *c = &ctxt->decode;
int rc;
- c->eip = ctxt->eip;
- c->dst.type = OP_NONE;
+ ctxt->_eip = ctxt->eip;
+ ctxt->dst.type = OP_NONE;
- rc = emulator_do_task_switch(ctxt, ops, tss_selector, reason,
+ rc = emulator_do_task_switch(ctxt, tss_selector, reason,
has_error_code, error_code);
if (rc == X86EMUL_CONTINUE)
- ctxt->eip = c->eip;
+ ctxt->eip = ctxt->_eip;
return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
}
@@ -2481,22 +2416,20 @@ int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned seg,
int reg, struct operand *op)
{
- struct decode_cache *c = &ctxt->decode;
int df = (ctxt->eflags & EFLG_DF) ? -1 : 1;
- register_address_increment(c, &c->regs[reg], df * op->bytes);
- op->addr.mem.ea = register_address(c, c->regs[reg]);
+ register_address_increment(ctxt, &ctxt->regs[reg], df * op->bytes);
+ op->addr.mem.ea = register_address(ctxt, ctxt->regs[reg]);
op->addr.mem.seg = seg;
}
static int em_das(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
u8 al, old_al;
bool af, cf, old_cf;
cf = ctxt->eflags & X86_EFLAGS_CF;
- al = c->dst.val;
+ al = ctxt->dst.val;
old_al = al;
old_cf = cf;
@@ -2514,12 +2447,12 @@ static int em_das(struct x86_emulate_ctxt *ctxt)
cf = true;
}
- c->dst.val = al;
+ ctxt->dst.val = al;
/* Set PF, ZF, SF */
- c->src.type = OP_IMM;
- c->src.val = 0;
- c->src.bytes = 1;
- emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags);
+ ctxt->src.type = OP_IMM;
+ ctxt->src.val = 0;
+ ctxt->src.bytes = 1;
+ emulate_2op_SrcV("or", ctxt->src, ctxt->dst, ctxt->eflags);
ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
if (cf)
ctxt->eflags |= X86_EFLAGS_CF;
@@ -2530,175 +2463,189 @@ static int em_das(struct x86_emulate_ctxt *ctxt)
static int em_call_far(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
u16 sel, old_cs;
ulong old_eip;
int rc;
old_cs = get_segment_selector(ctxt, VCPU_SREG_CS);
- old_eip = c->eip;
+ old_eip = ctxt->_eip;
- memcpy(&sel, c->src.valptr + c->op_bytes, 2);
- if (load_segment_descriptor(ctxt, ctxt->ops, sel, VCPU_SREG_CS))
+ memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
+ if (load_segment_descriptor(ctxt, sel, VCPU_SREG_CS))
return X86EMUL_CONTINUE;
- c->eip = 0;
- memcpy(&c->eip, c->src.valptr, c->op_bytes);
+ ctxt->_eip = 0;
+ memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
- c->src.val = old_cs;
+ ctxt->src.val = old_cs;
rc = em_push(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
- c->src.val = old_eip;
+ ctxt->src.val = old_eip;
return em_push(ctxt);
}
static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
int rc;
- c->dst.type = OP_REG;
- c->dst.addr.reg = &c->eip;
- c->dst.bytes = c->op_bytes;
- rc = emulate_pop(ctxt, &c->dst.val, c->op_bytes);
+ ctxt->dst.type = OP_REG;
+ ctxt->dst.addr.reg = &ctxt->_eip;
+ ctxt->dst.bytes = ctxt->op_bytes;
+ rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
- register_address_increment(c, &c->regs[VCPU_REGS_RSP], c->src.val);
+ register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], ctxt->src.val);
return X86EMUL_CONTINUE;
}
static int em_add(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
-
- emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
+ emulate_2op_SrcV("add", ctxt->src, ctxt->dst, ctxt->eflags);
return X86EMUL_CONTINUE;
}
static int em_or(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
-
- emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags);
+ emulate_2op_SrcV("or", ctxt->src, ctxt->dst, ctxt->eflags);
return X86EMUL_CONTINUE;
}
static int em_adc(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
-
- emulate_2op_SrcV("adc", c->src, c->dst, ctxt->eflags);
+ emulate_2op_SrcV("adc", ctxt->src, ctxt->dst, ctxt->eflags);
return X86EMUL_CONTINUE;
}
static int em_sbb(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
-
- emulate_2op_SrcV("sbb", c->src, c->dst, ctxt->eflags);
+ emulate_2op_SrcV("sbb", ctxt->src, ctxt->dst, ctxt->eflags);
return X86EMUL_CONTINUE;
}
static int em_and(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
-
- emulate_2op_SrcV("and", c->src, c->dst, ctxt->eflags);
+ emulate_2op_SrcV("and", ctxt->src, ctxt->dst, ctxt->eflags);
return X86EMUL_CONTINUE;
}
static int em_sub(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
-
- emulate_2op_SrcV("sub", c->src, c->dst, ctxt->eflags);
+ emulate_2op_SrcV("sub", ctxt->src, ctxt->dst, ctxt->eflags);
return X86EMUL_CONTINUE;
}
static int em_xor(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
-
- emulate_2op_SrcV("xor", c->src, c->dst, ctxt->eflags);
+ emulate_2op_SrcV("xor", ctxt->src, ctxt->dst, ctxt->eflags);
return X86EMUL_CONTINUE;
}
static int em_cmp(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
-
- emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
+ emulate_2op_SrcV("cmp", ctxt->src, ctxt->dst, ctxt->eflags);
/* Disable writeback. */
- c->dst.type = OP_NONE;
+ ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
-static int em_imul(struct x86_emulate_ctxt *ctxt)
+static int em_test(struct x86_emulate_ctxt *ctxt)
+{
+ emulate_2op_SrcV("test", ctxt->src, ctxt->dst, ctxt->eflags);
+ return X86EMUL_CONTINUE;
+}
+
+static int em_xchg(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
+ /* Write back the register source. */
+ ctxt->src.val = ctxt->dst.val;
+ write_register_operand(&ctxt->src);
- emulate_2op_SrcV_nobyte("imul", c->src, c->dst, ctxt->eflags);
+ /* Write back the memory destination with implicit LOCK prefix. */
+ ctxt->dst.val = ctxt->src.orig_val;
+ ctxt->lock_prefix = 1;
return X86EMUL_CONTINUE;
}
-static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
+static int em_imul(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
+ emulate_2op_SrcV_nobyte("imul", ctxt->src, ctxt->dst, ctxt->eflags);
+ return X86EMUL_CONTINUE;
+}
- c->dst.val = c->src2.val;
+static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
+{
+ ctxt->dst.val = ctxt->src2.val;
return em_imul(ctxt);
}
static int em_cwd(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
-
- c->dst.type = OP_REG;
- c->dst.bytes = c->src.bytes;
- c->dst.addr.reg = &c->regs[VCPU_REGS_RDX];
- c->dst.val = ~((c->src.val >> (c->src.bytes * 8 - 1)) - 1);
+ ctxt->dst.type = OP_REG;
+ ctxt->dst.bytes = ctxt->src.bytes;
+ ctxt->dst.addr.reg = &ctxt->regs[VCPU_REGS_RDX];
+ ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
return X86EMUL_CONTINUE;
}
static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
u64 tsc = 0;
ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
- c->regs[VCPU_REGS_RAX] = (u32)tsc;
- c->regs[VCPU_REGS_RDX] = tsc >> 32;
+ ctxt->regs[VCPU_REGS_RAX] = (u32)tsc;
+ ctxt->regs[VCPU_REGS_RDX] = tsc >> 32;
return X86EMUL_CONTINUE;
}
static int em_mov(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
- c->dst.val = c->src.val;
+ ctxt->dst.val = ctxt->src.val;
return X86EMUL_CONTINUE;
}
+static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
+{
+ if (ctxt->modrm_reg > VCPU_SREG_GS)
+ return emulate_ud(ctxt);
+
+ ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
+ return X86EMUL_CONTINUE;
+}
+
+static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
+{
+ u16 sel = ctxt->src.val;
+
+ if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
+ return emulate_ud(ctxt);
+
+ if (ctxt->modrm_reg == VCPU_SREG_SS)
+ ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
+
+ /* Disable writeback. */
+ ctxt->dst.type = OP_NONE;
+ return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
+}
+
static int em_movdqu(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
- memcpy(&c->dst.vec_val, &c->src.vec_val, c->op_bytes);
+ memcpy(&ctxt->dst.vec_val, &ctxt->src.vec_val, ctxt->op_bytes);
return X86EMUL_CONTINUE;
}
static int em_invlpg(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
int rc;
ulong linear;
- rc = linearize(ctxt, c->src.addr.mem, 1, false, &linear);
+ rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
if (rc == X86EMUL_CONTINUE)
ctxt->ops->invlpg(ctxt, linear);
/* Disable writeback. */
- c->dst.type = OP_NONE;
+ ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
@@ -2714,10 +2661,9 @@ static int em_clts(struct x86_emulate_ctxt *ctxt)
static int em_vmcall(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
int rc;
- if (c->modrm_mod != 3 || c->modrm_rm != 1)
+ if (ctxt->modrm_mod != 3 || ctxt->modrm_rm != 1)
return X86EMUL_UNHANDLEABLE;
rc = ctxt->ops->fix_hypercall(ctxt);
@@ -2725,73 +2671,104 @@ static int em_vmcall(struct x86_emulate_ctxt *ctxt)
return rc;
/* Let the processor re-execute the fixed hypercall */
- c->eip = ctxt->eip;
+ ctxt->_eip = ctxt->eip;
/* Disable writeback. */
- c->dst.type = OP_NONE;
+ ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
static int em_lgdt(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
struct desc_ptr desc_ptr;
int rc;
- rc = read_descriptor(ctxt, c->src.addr.mem,
+ rc = read_descriptor(ctxt, ctxt->src.addr.mem,
&desc_ptr.size, &desc_ptr.address,
- c->op_bytes);
+ ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
ctxt->ops->set_gdt(ctxt, &desc_ptr);
/* Disable writeback. */
- c->dst.type = OP_NONE;
+ ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
int rc;
rc = ctxt->ops->fix_hypercall(ctxt);
/* Disable writeback. */
- c->dst.type = OP_NONE;
+ ctxt->dst.type = OP_NONE;
return rc;
}
static int em_lidt(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
struct desc_ptr desc_ptr;
int rc;
- rc = read_descriptor(ctxt, c->src.addr.mem,
+ rc = read_descriptor(ctxt, ctxt->src.addr.mem,
&desc_ptr.size, &desc_ptr.address,
- c->op_bytes);
+ ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
ctxt->ops->set_idt(ctxt, &desc_ptr);
/* Disable writeback. */
- c->dst.type = OP_NONE;
+ ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
static int em_smsw(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
-
- c->dst.bytes = 2;
- c->dst.val = ctxt->ops->get_cr(ctxt, 0);
+ ctxt->dst.bytes = 2;
+ ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
return X86EMUL_CONTINUE;
}
static int em_lmsw(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
- | (c->src.val & 0x0f));
- c->dst.type = OP_NONE;
+ | (ctxt->src.val & 0x0f));
+ ctxt->dst.type = OP_NONE;
+ return X86EMUL_CONTINUE;
+}
+
+static int em_loop(struct x86_emulate_ctxt *ctxt)
+{
+ register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RCX], -1);
+ if ((address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) != 0) &&
+ (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
+ jmp_rel(ctxt, ctxt->src.val);
+
+ return X86EMUL_CONTINUE;
+}
+
+static int em_jcxz(struct x86_emulate_ctxt *ctxt)
+{
+ if (address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) == 0)
+ jmp_rel(ctxt, ctxt->src.val);
+
+ return X86EMUL_CONTINUE;
+}
+
+static int em_cli(struct x86_emulate_ctxt *ctxt)
+{
+ if (emulator_bad_iopl(ctxt))
+ return emulate_gp(ctxt, 0);
+
+ ctxt->eflags &= ~X86_EFLAGS_IF;
+ return X86EMUL_CONTINUE;
+}
+
+static int em_sti(struct x86_emulate_ctxt *ctxt)
+{
+ if (emulator_bad_iopl(ctxt))
+ return emulate_gp(ctxt, 0);
+
+ ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
+ ctxt->eflags |= X86_EFLAGS_IF;
return X86EMUL_CONTINUE;
}
@@ -2809,9 +2786,7 @@ static bool valid_cr(int nr)
static int check_cr_read(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
-
- if (!valid_cr(c->modrm_reg))
+ if (!valid_cr(ctxt->modrm_reg))
return emulate_ud(ctxt);
return X86EMUL_CONTINUE;
@@ -2819,9 +2794,8 @@ static int check_cr_read(struct x86_emulate_ctxt *ctxt)
static int check_cr_write(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
- u64 new_val = c->src.val64;
- int cr = c->modrm_reg;
+ u64 new_val = ctxt->src.val64;
+ int cr = ctxt->modrm_reg;
u64 efer = 0;
static u64 cr_reserved_bits[] = {
@@ -2898,8 +2872,7 @@ static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
static int check_dr_read(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
- int dr = c->modrm_reg;
+ int dr = ctxt->modrm_reg;
u64 cr4;
if (dr > 7)
@@ -2917,9 +2890,8 @@ static int check_dr_read(struct x86_emulate_ctxt *ctxt)
static int check_dr_write(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
- u64 new_val = c->src.val64;
- int dr = c->modrm_reg;
+ u64 new_val = ctxt->src.val64;
+ int dr = ctxt->modrm_reg;
if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
return emulate_gp(ctxt, 0);
@@ -2941,7 +2913,7 @@ static int check_svme(struct x86_emulate_ctxt *ctxt)
static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
{
- u64 rax = ctxt->decode.regs[VCPU_REGS_RAX];
+ u64 rax = ctxt->regs[VCPU_REGS_RAX];
/* Valid physical address? */
if (rax & 0xffff000000000000ULL)
@@ -2963,7 +2935,7 @@ static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
{
u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
- u64 rcx = ctxt->decode.regs[VCPU_REGS_RCX];
+ u64 rcx = ctxt->regs[VCPU_REGS_RCX];
if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
(rcx > 3))
@@ -2974,10 +2946,8 @@ static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
static int check_perm_in(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
-
- c->dst.bytes = min(c->dst.bytes, 4u);
- if (!emulator_io_permited(ctxt, ctxt->ops, c->src.val, c->dst.bytes))
+ ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
+ if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
return emulate_gp(ctxt, 0);
return X86EMUL_CONTINUE;
@@ -2985,10 +2955,8 @@ static int check_perm_in(struct x86_emulate_ctxt *ctxt)
static int check_perm_out(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
-
- c->src.bytes = min(c->src.bytes, 4u);
- if (!emulator_io_permited(ctxt, ctxt->ops, c->dst.val, c->src.bytes))
+ ctxt->src.bytes = min(ctxt->src.bytes, 4u);
+ if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
return emulate_gp(ctxt, 0);
return X86EMUL_CONTINUE;
@@ -3165,12 +3133,15 @@ static struct opcode opcode_table[256] = {
G(DstMem | SrcImm | ModRM | Group, group1),
G(ByteOp | DstMem | SrcImm | ModRM | No64 | Group, group1),
G(DstMem | SrcImmByte | ModRM | Group, group1),
- D2bv(DstMem | SrcReg | ModRM), D2bv(DstMem | SrcReg | ModRM | Lock),
+ I2bv(DstMem | SrcReg | ModRM, em_test),
+ I2bv(DstMem | SrcReg | ModRM | Lock, em_xchg),
/* 0x88 - 0x8F */
I2bv(DstMem | SrcReg | ModRM | Mov, em_mov),
I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
- D(DstMem | SrcNone | ModRM | Mov), D(ModRM | SrcMem | NoAccess | DstReg),
- D(ImplicitOps | SrcMem16 | ModRM), G(0, group1A),
+ I(DstMem | SrcNone | ModRM | Mov, em_mov_rm_sreg),
+ D(ModRM | SrcMem | NoAccess | DstReg),
+ I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
+ G(0, group1A),
/* 0x90 - 0x97 */
DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
/* 0x98 - 0x9F */
@@ -3184,7 +3155,7 @@ static struct opcode opcode_table[256] = {
I2bv(SrcSI | DstDI | Mov | String, em_mov),
I2bv(SrcSI | DstDI | String, em_cmp),
/* 0xA8 - 0xAF */
- D2bv(DstAcc | SrcImm),
+ I2bv(DstAcc | SrcImm, em_test),
I2bv(SrcAcc | DstDI | Mov | String, em_mov),
I2bv(SrcSI | DstAcc | Mov | String, em_mov),
I2bv(SrcAcc | DstDI | String, em_cmp),
@@ -3195,25 +3166,26 @@ static struct opcode opcode_table[256] = {
/* 0xC0 - 0xC7 */
D2bv(DstMem | SrcImmByte | ModRM),
I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
- D(ImplicitOps | Stack),
+ I(ImplicitOps | Stack, em_ret),
D(DstReg | SrcMemFAddr | ModRM | No64), D(DstReg | SrcMemFAddr | ModRM | No64),
G(ByteOp, group11), G(0, group11),
/* 0xC8 - 0xCF */
- N, N, N, D(ImplicitOps | Stack),
+ N, N, N, I(ImplicitOps | Stack, em_ret_far),
D(ImplicitOps), DI(SrcImmByte, intn),
- D(ImplicitOps | No64), DI(ImplicitOps, iret),
+ D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
/* 0xD0 - 0xD7 */
D2bv(DstMem | SrcOne | ModRM), D2bv(DstMem | ModRM),
N, N, N, N,
/* 0xD8 - 0xDF */
N, N, N, N, N, N, N, N,
/* 0xE0 - 0xE7 */
- X4(D(SrcImmByte)),
+ X3(I(SrcImmByte, em_loop)),
+ I(SrcImmByte, em_jcxz),
D2bvIP(SrcImmUByte | DstAcc, in, check_perm_in),
D2bvIP(SrcAcc | DstImmUByte, out, check_perm_out),
/* 0xE8 - 0xEF */
D(SrcImm | Stack), D(SrcImm | ImplicitOps),
- D(SrcImmFAddr | No64), D(SrcImmByte | ImplicitOps),
+ I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps),
D2bvIP(SrcDX | DstAcc, in, check_perm_in),
D2bvIP(SrcAcc | DstDX, out, check_perm_out),
/* 0xF0 - 0xF7 */
@@ -3221,14 +3193,16 @@ static struct opcode opcode_table[256] = {
DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
G(ByteOp, group3), G(0, group3),
/* 0xF8 - 0xFF */
- D(ImplicitOps), D(ImplicitOps), D(ImplicitOps), D(ImplicitOps),
+ D(ImplicitOps), D(ImplicitOps),
+ I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
};
static struct opcode twobyte_table[256] = {
/* 0x00 - 0x0F */
G(0, group6), GD(0, &group7), N, N,
- N, D(ImplicitOps | VendorSpecific), DI(ImplicitOps | Priv, clts), N,
+ N, I(ImplicitOps | VendorSpecific, em_syscall),
+ II(ImplicitOps | Priv, em_clts, clts), N,
DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
N, D(ImplicitOps | ModRM), N, N,
/* 0x10 - 0x1F */
@@ -3245,7 +3219,8 @@ static struct opcode twobyte_table[256] = {
IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
DI(ImplicitOps | Priv, rdmsr),
DIP(ImplicitOps | Priv, rdpmc, check_rdpmc),
- D(ImplicitOps | VendorSpecific), D(ImplicitOps | Priv | VendorSpecific),
+ I(ImplicitOps | VendorSpecific, em_sysenter),
+ I(ImplicitOps | Priv | VendorSpecific, em_sysexit),
N, N,
N, N, N, N, N, N, N, N,
/* 0x40 - 0x4F */
@@ -3313,11 +3288,11 @@ static struct opcode twobyte_table[256] = {
#undef I2bv
#undef I6ALU
-static unsigned imm_size(struct decode_cache *c)
+static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
{
unsigned size;
- size = (c->d & ByteOp) ? 1 : c->op_bytes;
+ size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
if (size == 8)
size = 4;
return size;
@@ -3326,23 +3301,21 @@ static unsigned imm_size(struct decode_cache *c)
static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
unsigned size, bool sign_extension)
{
- struct decode_cache *c = &ctxt->decode;
- struct x86_emulate_ops *ops = ctxt->ops;
int rc = X86EMUL_CONTINUE;
op->type = OP_IMM;
op->bytes = size;
- op->addr.mem.ea = c->eip;
+ op->addr.mem.ea = ctxt->_eip;
/* NB. Immediates are sign-extended as necessary. */
switch (op->bytes) {
case 1:
- op->val = insn_fetch(s8, 1, c->eip);
+ op->val = insn_fetch(s8, 1, ctxt->_eip);
break;
case 2:
- op->val = insn_fetch(s16, 2, c->eip);
+ op->val = insn_fetch(s16, 2, ctxt->_eip);
break;
case 4:
- op->val = insn_fetch(s32, 4, c->eip);
+ op->val = insn_fetch(s32, 4, ctxt->_eip);
break;
}
if (!sign_extension) {
@@ -3362,11 +3335,8 @@ done:
return rc;
}
-int
-x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
+int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
{
- struct x86_emulate_ops *ops = ctxt->ops;
- struct decode_cache *c = &ctxt->decode;
int rc = X86EMUL_CONTINUE;
int mode = ctxt->mode;
int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
@@ -3374,11 +3344,11 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
struct opcode opcode;
struct operand memop = { .type = OP_NONE }, *memopp = NULL;
- c->eip = ctxt->eip;
- c->fetch.start = c->eip;
- c->fetch.end = c->fetch.start + insn_len;
+ ctxt->_eip = ctxt->eip;
+ ctxt->fetch.start = ctxt->_eip;
+ ctxt->fetch.end = ctxt->fetch.start + insn_len;
if (insn_len > 0)
- memcpy(c->fetch.data, insn, insn_len);
+ memcpy(ctxt->fetch.data, insn, insn_len);
switch (mode) {
case X86EMUL_MODE_REAL:
@@ -3399,46 +3369,46 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
return -1;
}
- c->op_bytes = def_op_bytes;
- c->ad_bytes = def_ad_bytes;
+ ctxt->op_bytes = def_op_bytes;
+ ctxt->ad_bytes = def_ad_bytes;
/* Legacy prefixes. */
for (;;) {
- switch (c->b = insn_fetch(u8, 1, c->eip)) {
+ switch (ctxt->b = insn_fetch(u8, 1, ctxt->_eip)) {
case 0x66: /* operand-size override */
op_prefix = true;
/* switch between 2/4 bytes */
- c->op_bytes = def_op_bytes ^ 6;
+ ctxt->op_bytes = def_op_bytes ^ 6;
break;
case 0x67: /* address-size override */
if (mode == X86EMUL_MODE_PROT64)
/* switch between 4/8 bytes */
- c->ad_bytes = def_ad_bytes ^ 12;
+ ctxt->ad_bytes = def_ad_bytes ^ 12;
else
/* switch between 2/4 bytes */
- c->ad_bytes = def_ad_bytes ^ 6;
+ ctxt->ad_bytes = def_ad_bytes ^ 6;
break;
case 0x26: /* ES override */
case 0x2e: /* CS override */
case 0x36: /* SS override */
case 0x3e: /* DS override */
- set_seg_override(c, (c->b >> 3) & 3);
+ set_seg_override(ctxt, (ctxt->b >> 3) & 3);
break;
case 0x64: /* FS override */
case 0x65: /* GS override */
- set_seg_override(c, c->b & 7);
+ set_seg_override(ctxt, ctxt->b & 7);
break;
case 0x40 ... 0x4f: /* REX */
if (mode != X86EMUL_MODE_PROT64)
goto done_prefixes;
- c->rex_prefix = c->b;
+ ctxt->rex_prefix = ctxt->b;
continue;
case 0xf0: /* LOCK */
- c->lock_prefix = 1;
+ ctxt->lock_prefix = 1;
break;
case 0xf2: /* REPNE/REPNZ */
case 0xf3: /* REP/REPE/REPZ */
- c->rep_prefix = c->b;
+ ctxt->rep_prefix = ctxt->b;
break;
default:
goto done_prefixes;
@@ -3446,50 +3416,50 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
/* Any legacy prefix after a REX prefix nullifies its effect. */
- c->rex_prefix = 0;
+ ctxt->rex_prefix = 0;
}
done_prefixes:
/* REX prefix. */
- if (c->rex_prefix & 8)
- c->op_bytes = 8; /* REX.W */
+ if (ctxt->rex_prefix & 8)
+ ctxt->op_bytes = 8; /* REX.W */
/* Opcode byte(s). */
- opcode = opcode_table[c->b];
+ opcode = opcode_table[ctxt->b];
/* Two-byte opcode? */
- if (c->b == 0x0f) {
- c->twobyte = 1;
- c->b = insn_fetch(u8, 1, c->eip);
- opcode = twobyte_table[c->b];
+ if (ctxt->b == 0x0f) {
+ ctxt->twobyte = 1;
+ ctxt->b = insn_fetch(u8, 1, ctxt->_eip);
+ opcode = twobyte_table[ctxt->b];
}
- c->d = opcode.flags;
+ ctxt->d = opcode.flags;
- while (c->d & GroupMask) {
- switch (c->d & GroupMask) {
+ while (ctxt->d & GroupMask) {
+ switch (ctxt->d & GroupMask) {
case Group:
- c->modrm = insn_fetch(u8, 1, c->eip);
- --c->eip;
- goffset = (c->modrm >> 3) & 7;
+ ctxt->modrm = insn_fetch(u8, 1, ctxt->_eip);
+ --ctxt->_eip;
+ goffset = (ctxt->modrm >> 3) & 7;
opcode = opcode.u.group[goffset];
break;
case GroupDual:
- c->modrm = insn_fetch(u8, 1, c->eip);
- --c->eip;
- goffset = (c->modrm >> 3) & 7;
- if ((c->modrm >> 6) == 3)
+ ctxt->modrm = insn_fetch(u8, 1, ctxt->_eip);
+ --ctxt->_eip;
+ goffset = (ctxt->modrm >> 3) & 7;
+ if ((ctxt->modrm >> 6) == 3)
opcode = opcode.u.gdual->mod3[goffset];
else
opcode = opcode.u.gdual->mod012[goffset];
break;
case RMExt:
- goffset = c->modrm & 7;
+ goffset = ctxt->modrm & 7;
opcode = opcode.u.group[goffset];
break;
case Prefix:
- if (c->rep_prefix && op_prefix)
+ if (ctxt->rep_prefix && op_prefix)
return X86EMUL_UNHANDLEABLE;
- simd_prefix = op_prefix ? 0x66 : c->rep_prefix;
+ simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
switch (simd_prefix) {
case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
@@ -3501,61 +3471,61 @@ done_prefixes:
return X86EMUL_UNHANDLEABLE;
}
- c->d &= ~GroupMask;
- c->d |= opcode.flags;
+ ctxt->d &= ~GroupMask;
+ ctxt->d |= opcode.flags;
}
- c->execute = opcode.u.execute;
- c->check_perm = opcode.check_perm;
- c->intercept = opcode.intercept;
+ ctxt->execute = opcode.u.execute;
+ ctxt->check_perm = opcode.check_perm;
+ ctxt->intercept = opcode.intercept;
/* Unrecognised? */
- if (c->d == 0 || (c->d & Undefined))
+ if (ctxt->d == 0 || (ctxt->d & Undefined))
return -1;
- if (!(c->d & VendorSpecific) && ctxt->only_vendor_specific_insn)
+ if (!(ctxt->d & VendorSpecific) && ctxt->only_vendor_specific_insn)
return -1;
- if (mode == X86EMUL_MODE_PROT64 && (c->d & Stack))
- c->op_bytes = 8;
+ if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
+ ctxt->op_bytes = 8;
- if (c->d & Op3264) {
+ if (ctxt->d & Op3264) {
if (mode == X86EMUL_MODE_PROT64)
- c->op_bytes = 8;
+ ctxt->op_bytes = 8;
else
- c->op_bytes = 4;
+ ctxt->op_bytes = 4;
}
- if (c->d & Sse)
- c->op_bytes = 16;
+ if (ctxt->d & Sse)
+ ctxt->op_bytes = 16;
/* ModRM and SIB bytes. */
- if (c->d & ModRM) {
- rc = decode_modrm(ctxt, ops, &memop);
- if (!c->has_seg_override)
- set_seg_override(c, c->modrm_seg);
- } else if (c->d & MemAbs)
- rc = decode_abs(ctxt, ops, &memop);
+ if (ctxt->d & ModRM) {
+ rc = decode_modrm(ctxt, &memop);
+ if (!ctxt->has_seg_override)
+ set_seg_override(ctxt, ctxt->modrm_seg);
+ } else if (ctxt->d & MemAbs)
+ rc = decode_abs(ctxt, &memop);
if (rc != X86EMUL_CONTINUE)
goto done;
- if (!c->has_seg_override)
- set_seg_override(c, VCPU_SREG_DS);
+ if (!ctxt->has_seg_override)
+ set_seg_override(ctxt, VCPU_SREG_DS);
- memop.addr.mem.seg = seg_override(ctxt, c);
+ memop.addr.mem.seg = seg_override(ctxt);
- if (memop.type == OP_MEM && c->ad_bytes != 8)
+ if (memop.type == OP_MEM && ctxt->ad_bytes != 8)
memop.addr.mem.ea = (u32)memop.addr.mem.ea;
/*
* Decode and fetch the source operand: register, memory
* or immediate.
*/
- switch (c->d & SrcMask) {
+ switch (ctxt->d & SrcMask) {
case SrcNone:
break;
case SrcReg:
- decode_register_operand(ctxt, &c->src, c, 0);
+ decode_register_operand(ctxt, &ctxt->src, 0);
break;
case SrcMem16:
memop.bytes = 2;
@@ -3564,60 +3534,60 @@ done_prefixes:
memop.bytes = 4;
goto srcmem_common;
case SrcMem:
- memop.bytes = (c->d & ByteOp) ? 1 :
- c->op_bytes;
+ memop.bytes = (ctxt->d & ByteOp) ? 1 :
+ ctxt->op_bytes;
srcmem_common:
- c->src = memop;
- memopp = &c->src;
+ ctxt->src = memop;
+ memopp = &ctxt->src;
break;
case SrcImmU16:
- rc = decode_imm(ctxt, &c->src, 2, false);
+ rc = decode_imm(ctxt, &ctxt->src, 2, false);
break;
case SrcImm:
- rc = decode_imm(ctxt, &c->src, imm_size(c), true);
+ rc = decode_imm(ctxt, &ctxt->src, imm_size(ctxt), true);
break;
case SrcImmU:
- rc = decode_imm(ctxt, &c->src, imm_size(c), false);
+ rc = decode_imm(ctxt, &ctxt->src, imm_size(ctxt), false);
break;
case SrcImmByte:
- rc = decode_imm(ctxt, &c->src, 1, true);
+ rc = decode_imm(ctxt, &ctxt->src, 1, true);
break;
case SrcImmUByte:
- rc = decode_imm(ctxt, &c->src, 1, false);
+ rc = decode_imm(ctxt, &ctxt->src, 1, false);
break;
case SrcAcc:
- c->src.type = OP_REG;
- c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
- c->src.addr.reg = &c->regs[VCPU_REGS_RAX];
- fetch_register_operand(&c->src);
+ ctxt->src.type = OP_REG;
+ ctxt->src.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
+ ctxt->src.addr.reg = &ctxt->regs[VCPU_REGS_RAX];
+ fetch_register_operand(&ctxt->src);
break;
case SrcOne:
- c->src.bytes = 1;
- c->src.val = 1;
+ ctxt->src.bytes = 1;
+ ctxt->src.val = 1;
break;
case SrcSI:
- c->src.type = OP_MEM;
- c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
- c->src.addr.mem.ea =
- register_address(c, c->regs[VCPU_REGS_RSI]);
- c->src.addr.mem.seg = seg_override(ctxt, c);
- c->src.val = 0;
+ ctxt->src.type = OP_MEM;
+ ctxt->src.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
+ ctxt->src.addr.mem.ea =
+ register_address(ctxt, ctxt->regs[VCPU_REGS_RSI]);
+ ctxt->src.addr.mem.seg = seg_override(ctxt);
+ ctxt->src.val = 0;
break;
case SrcImmFAddr:
- c->src.type = OP_IMM;
- c->src.addr.mem.ea = c->eip;
- c->src.bytes = c->op_bytes + 2;
- insn_fetch_arr(c->src.valptr, c->src.bytes, c->eip);
+ ctxt->src.type = OP_IMM;
+ ctxt->src.addr.mem.ea = ctxt->_eip;
+ ctxt->src.bytes = ctxt->op_bytes + 2;
+ insn_fetch_arr(ctxt->src.valptr, ctxt->src.bytes, ctxt->_eip);
break;
case SrcMemFAddr:
- memop.bytes = c->op_bytes + 2;
+ memop.bytes = ctxt->op_bytes + 2;
goto srcmem_common;
break;
case SrcDX:
- c->src.type = OP_REG;
- c->src.bytes = 2;
- c->src.addr.reg = &c->regs[VCPU_REGS_RDX];
- fetch_register_operand(&c->src);
+ ctxt->src.type = OP_REG;
+ ctxt->src.bytes = 2;
+ ctxt->src.addr.reg = &ctxt->regs[VCPU_REGS_RDX];
+ fetch_register_operand(&ctxt->src);
break;
}
@@ -3628,22 +3598,22 @@ done_prefixes:
* Decode and fetch the second source operand: register, memory
* or immediate.
*/
- switch (c->d & Src2Mask) {
+ switch (ctxt->d & Src2Mask) {
case Src2None:
break;
case Src2CL:
- c->src2.bytes = 1;
- c->src2.val = c->regs[VCPU_REGS_RCX] & 0x8;
+ ctxt->src2.bytes = 1;
+ ctxt->src2.val = ctxt->regs[VCPU_REGS_RCX] & 0x8;
break;
case Src2ImmByte:
- rc = decode_imm(ctxt, &c->src2, 1, true);
+ rc = decode_imm(ctxt, &ctxt->src2, 1, true);
break;
case Src2One:
- c->src2.bytes = 1;
- c->src2.val = 1;
+ ctxt->src2.bytes = 1;
+ ctxt->src2.val = 1;
break;
case Src2Imm:
- rc = decode_imm(ctxt, &c->src2, imm_size(c), true);
+ rc = decode_imm(ctxt, &ctxt->src2, imm_size(ctxt), true);
break;
}
@@ -3651,68 +3621,66 @@ done_prefixes:
goto done;
/* Decode and fetch the destination operand: register or memory. */
- switch (c->d & DstMask) {
+ switch (ctxt->d & DstMask) {
case DstReg:
- decode_register_operand(ctxt, &c->dst, c,
- c->twobyte && (c->b == 0xb6 || c->b == 0xb7));
+ decode_register_operand(ctxt, &ctxt->dst,
+ ctxt->twobyte && (ctxt->b == 0xb6 || ctxt->b == 0xb7));
break;
case DstImmUByte:
- c->dst.type = OP_IMM;
- c->dst.addr.mem.ea = c->eip;
- c->dst.bytes = 1;
- c->dst.val = insn_fetch(u8, 1, c->eip);
+ ctxt->dst.type = OP_IMM;
+ ctxt->dst.addr.mem.ea = ctxt->_eip;
+ ctxt->dst.bytes = 1;
+ ctxt->dst.val = insn_fetch(u8, 1, ctxt->_eip);
break;
case DstMem:
case DstMem64:
- c->dst = memop;
- memopp = &c->dst;
- if ((c->d & DstMask) == DstMem64)
- c->dst.bytes = 8;
+ ctxt->dst = memop;
+ memopp = &ctxt->dst;
+ if ((ctxt->d & DstMask) == DstMem64)
+ ctxt->dst.bytes = 8;
else
- c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
- if (c->d & BitOp)
- fetch_bit_operand(c);
- c->dst.orig_val = c->dst.val;
+ ctxt->dst.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
+ if (ctxt->d & BitOp)
+ fetch_bit_operand(ctxt);
+ ctxt->dst.orig_val = ctxt->dst.val;
break;
case DstAcc:
- c->dst.type = OP_REG;
- c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
- c->dst.addr.reg = &c->regs[VCPU_REGS_RAX];
- fetch_register_operand(&c->dst);
- c->dst.orig_val = c->dst.val;
+ ctxt->dst.type = OP_REG;
+ ctxt->dst.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
+ ctxt->dst.addr.reg = &ctxt->regs[VCPU_REGS_RAX];
+ fetch_register_operand(&ctxt->dst);
+ ctxt->dst.orig_val = ctxt->dst.val;
break;
case DstDI:
- c->dst.type = OP_MEM;
- c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
- c->dst.addr.mem.ea =
- register_address(c, c->regs[VCPU_REGS_RDI]);
- c->dst.addr.mem.seg = VCPU_SREG_ES;
- c->dst.val = 0;
+ ctxt->dst.type = OP_MEM;
+ ctxt->dst.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
+ ctxt->dst.addr.mem.ea =
+ register_address(ctxt, ctxt->regs[VCPU_REGS_RDI]);
+ ctxt->dst.addr.mem.seg = VCPU_SREG_ES;
+ ctxt->dst.val = 0;
break;
case DstDX:
- c->dst.type = OP_REG;
- c->dst.bytes = 2;
- c->dst.addr.reg = &c->regs[VCPU_REGS_RDX];
- fetch_register_operand(&c->dst);
+ ctxt->dst.type = OP_REG;
+ ctxt->dst.bytes = 2;
+ ctxt->dst.addr.reg = &ctxt->regs[VCPU_REGS_RDX];
+ fetch_register_operand(&ctxt->dst);
break;
case ImplicitOps:
/* Special instructions do their own operand decoding. */
default:
- c->dst.type = OP_NONE; /* Disable writeback. */
+ ctxt->dst.type = OP_NONE; /* Disable writeback. */
break;
}
done:
- if (memopp && memopp->type == OP_MEM && c->rip_relative)
- memopp->addr.mem.ea += c->eip;
+ if (memopp && memopp->type == OP_MEM && ctxt->rip_relative)
+ memopp->addr.mem.ea += ctxt->_eip;
return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
}
static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
{
- struct decode_cache *c = &ctxt->decode;
-
/* The second termination condition only applies for REPE
* and REPNE. Test if the repeat string operation prefix is
* REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
@@ -3720,304 +3688,232 @@ static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
* - if REPE/REPZ and ZF = 0 then done
* - if REPNE/REPNZ and ZF = 1 then done
*/
- if (((c->b == 0xa6) || (c->b == 0xa7) ||
- (c->b == 0xae) || (c->b == 0xaf))
- && (((c->rep_prefix == REPE_PREFIX) &&
+ if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
+ (ctxt->b == 0xae) || (ctxt->b == 0xaf))
+ && (((ctxt->rep_prefix == REPE_PREFIX) &&
((ctxt->eflags & EFLG_ZF) == 0))
- || ((c->rep_prefix == REPNE_PREFIX) &&
+ || ((ctxt->rep_prefix == REPNE_PREFIX) &&
((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
return true;
return false;
}
-int
-x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
+int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
{
struct x86_emulate_ops *ops = ctxt->ops;
u64 msr_data;
- struct decode_cache *c = &ctxt->decode;
int rc = X86EMUL_CONTINUE;
- int saved_dst_type = c->dst.type;
- int irq; /* Used for int 3, int, and into */
+ int saved_dst_type = ctxt->dst.type;
- ctxt->decode.mem_read.pos = 0;
+ ctxt->mem_read.pos = 0;
- if (ctxt->mode == X86EMUL_MODE_PROT64 && (c->d & No64)) {
+ if (ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) {
rc = emulate_ud(ctxt);
goto done;
}
/* LOCK prefix is allowed only with some instructions */
- if (c->lock_prefix && (!(c->d & Lock) || c->dst.type != OP_MEM)) {
+ if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
rc = emulate_ud(ctxt);
goto done;
}
- if ((c->d & SrcMask) == SrcMemFAddr && c->src.type != OP_MEM) {
+ if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
rc = emulate_ud(ctxt);
goto done;
}
- if ((c->d & Sse)
+ if ((ctxt->d & Sse)
&& ((ops->get_cr(ctxt, 0) & X86_CR0_EM)
|| !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
rc = emulate_ud(ctxt);
goto done;
}
- if ((c->d & Sse) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
+ if ((ctxt->d & Sse) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
rc = emulate_nm(ctxt);
goto done;
}
- if (unlikely(ctxt->guest_mode) && c->intercept) {
- rc = emulator_check_intercept(ctxt, c->intercept,
+ if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
+ rc = emulator_check_intercept(ctxt, ctxt->intercept,
X86_ICPT_PRE_EXCEPT);
if (rc != X86EMUL_CONTINUE)
goto done;
}
/* Privileged instruction can be executed only in CPL=0 */
- if ((c->d & Priv) && ops->cpl(ctxt)) {
+ if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
rc = emulate_gp(ctxt, 0);
goto done;
}
/* Instruction can only be executed in protected mode */
- if ((c->d & Prot) && !(ctxt->mode & X86EMUL_MODE_PROT)) {
+ if ((ctxt->d & Prot) && !(ctxt->mode & X86EMUL_MODE_PROT)) {
rc = emulate_ud(ctxt);
goto done;
}
/* Do instruction specific permission checks */
- if (c->check_perm) {
- rc = c->check_perm(ctxt);
+ if (ctxt->check_perm) {
+ rc = ctxt->check_perm(ctxt);
if (rc != X86EMUL_CONTINUE)
goto done;
}
- if (unlikely(ctxt->guest_mode) && c->intercept) {
- rc = emulator_check_intercept(ctxt, c->intercept,
+ if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
+ rc = emulator_check_intercept(ctxt, ctxt->intercept,
X86_ICPT_POST_EXCEPT);
if (rc != X86EMUL_CONTINUE)
goto done;
}
- if (c->rep_prefix && (c->d & String)) {
+ if (ctxt->rep_prefix && (ctxt->d & String)) {
/* All REP prefixes have the same first termination condition */
- if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0) {
- ctxt->eip = c->eip;
+ if (address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) == 0) {
+ ctxt->eip = ctxt->_eip;
goto done;
}
}
- if ((c->src.type == OP_MEM) && !(c->d & NoAccess)) {
- rc = segmented_read(ctxt, c->src.addr.mem,
- c->src.valptr, c->src.bytes);
+ if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
+ rc = segmented_read(ctxt, ctxt->src.addr.mem,
+ ctxt->src.valptr, ctxt->src.bytes);
if (rc != X86EMUL_CONTINUE)
goto done;
- c->src.orig_val64 = c->src.val64;
+ ctxt->src.orig_val64 = ctxt->src.val64;
}
- if (c->src2.type == OP_MEM) {
- rc = segmented_read(ctxt, c->src2.addr.mem,
- &c->src2.val, c->src2.bytes);
+ if (ctxt->src2.type == OP_MEM) {
+ rc = segmented_read(ctxt, ctxt->src2.addr.mem,
+ &ctxt->src2.val, ctxt->src2.bytes);
if (rc != X86EMUL_CONTINUE)
goto done;
}
- if ((c->d & DstMask) == ImplicitOps)
+ if ((ctxt->d & DstMask) == ImplicitOps)
goto special_insn;
- if ((c->dst.type == OP_MEM) && !(c->d & Mov)) {
+ if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
/* optimisation - avoid slow emulated read if Mov */
- rc = segmented_read(ctxt, c->dst.addr.mem,
- &c->dst.val, c->dst.bytes);
+ rc = segmented_read(ctxt, ctxt->dst.addr.mem,
+ &ctxt->dst.val, ctxt->dst.bytes);
if (rc != X86EMUL_CONTINUE)
goto done;
}
- c->dst.orig_val = c->dst.val;
+ ctxt->dst.orig_val = ctxt->dst.val;
special_insn:
- if (unlikely(ctxt->guest_mode) && c->intercept) {
- rc = emulator_check_intercept(ctxt, c->intercept,
+ if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
+ rc = emulator_check_intercept(ctxt, ctxt->intercept,
X86_ICPT_POST_MEMACCESS);
if (rc != X86EMUL_CONTINUE)
goto done;
}
- if (c->execute) {
- rc = c->execute(ctxt);
+ if (ctxt->execute) {
+ rc = ctxt->execute(ctxt);
if (rc != X86EMUL_CONTINUE)
goto done;
goto writeback;
}
- if (c->twobyte)
+ if (ctxt->twobyte)
goto twobyte_insn;
- switch (c->b) {
+ switch (ctxt->b) {
case 0x06: /* push es */
- rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_ES);
+ rc = emulate_push_sreg(ctxt, VCPU_SREG_ES);
break;
case 0x07: /* pop es */
- rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_ES);
+ rc = emulate_pop_sreg(ctxt, VCPU_SREG_ES);
break;
case 0x0e: /* push cs */
- rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_CS);
+ rc = emulate_push_sreg(ctxt, VCPU_SREG_CS);
break;
case 0x16: /* push ss */
- rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_SS);
+ rc = emulate_push_sreg(ctxt, VCPU_SREG_SS);
break;
case 0x17: /* pop ss */
- rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_SS);
+ rc = emulate_pop_sreg(ctxt, VCPU_SREG_SS);
break;
case 0x1e: /* push ds */
- rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_DS);
+ rc = emulate_push_sreg(ctxt, VCPU_SREG_DS);
break;
case 0x1f: /* pop ds */
- rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_DS);
+ rc = emulate_pop_sreg(ctxt, VCPU_SREG_DS);
break;
case 0x40 ... 0x47: /* inc r16/r32 */
- emulate_1op("inc", c->dst, ctxt->eflags);
+ emulate_1op("inc", ctxt->dst, ctxt->eflags);
break;
case 0x48 ... 0x4f: /* dec r16/r32 */
- emulate_1op("dec", c->dst, ctxt->eflags);
+ emulate_1op("dec", ctxt->dst, ctxt->eflags);
break;
case 0x63: /* movsxd */
if (ctxt->mode != X86EMUL_MODE_PROT64)
goto cannot_emulate;
- c->dst.val = (s32) c->src.val;
+ ctxt->dst.val = (s32) ctxt->src.val;
break;
case 0x6c: /* insb */
case 0x6d: /* insw/insd */
- c->src.val = c->regs[VCPU_REGS_RDX];
+ ctxt->src.val = ctxt->regs[VCPU_REGS_RDX];
goto do_io_in;
case 0x6e: /* outsb */
case 0x6f: /* outsw/outsd */
- c->dst.val = c->regs[VCPU_REGS_RDX];
+ ctxt->dst.val = ctxt->regs[VCPU_REGS_RDX];
goto do_io_out;
break;
case 0x70 ... 0x7f: /* jcc (short) */
- if (test_cc(c->b, ctxt->eflags))
- jmp_rel(c, c->src.val);
- break;
- case 0x84 ... 0x85:
- test:
- emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
- break;
- case 0x86 ... 0x87: /* xchg */
- xchg:
- /* Write back the register source. */
- c->src.val = c->dst.val;
- write_register_operand(&c->src);
- /*
- * Write back the memory destination with implicit LOCK
- * prefix.
- */
- c->dst.val = c->src.orig_val;
- c->lock_prefix = 1;
- break;
- case 0x8c: /* mov r/m, sreg */
- if (c->modrm_reg > VCPU_SREG_GS) {
- rc = emulate_ud(ctxt);
- goto done;
- }
- c->dst.val = get_segment_selector(ctxt, c->modrm_reg);
+ if (test_cc(ctxt->b, ctxt->eflags))
+ jmp_rel(ctxt, ctxt->src.val);
break;
case 0x8d: /* lea r16/r32, m */
- c->dst.val = c->src.addr.mem.ea;
+ ctxt->dst.val = ctxt->src.addr.mem.ea;
break;
- case 0x8e: { /* mov seg, r/m16 */
- uint16_t sel;
-
- sel = c->src.val;
-
- if (c->modrm_reg == VCPU_SREG_CS ||
- c->modrm_reg > VCPU_SREG_GS) {
- rc = emulate_ud(ctxt);
- goto done;
- }
-
- if (c->modrm_reg == VCPU_SREG_SS)
- ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
-
- rc = load_segment_descriptor(ctxt, ops, sel, c->modrm_reg);
-
- c->dst.type = OP_NONE; /* Disable writeback. */
- break;
- }
case 0x8f: /* pop (sole member of Grp1a) */
rc = em_grp1a(ctxt);
break;
case 0x90 ... 0x97: /* nop / xchg reg, rax */
- if (c->dst.addr.reg == &c->regs[VCPU_REGS_RAX])
+ if (ctxt->dst.addr.reg == &ctxt->regs[VCPU_REGS_RAX])
break;
- goto xchg;
+ rc = em_xchg(ctxt);
+ break;
case 0x98: /* cbw/cwde/cdqe */
- switch (c->op_bytes) {
- case 2: c->dst.val = (s8)c->dst.val; break;
- case 4: c->dst.val = (s16)c->dst.val; break;
- case 8: c->dst.val = (s32)c->dst.val; break;
+ switch (ctxt->op_bytes) {
+ case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
+ case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
+ case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
}
break;
- case 0xa8 ... 0xa9: /* test ax, imm */
- goto test;
case 0xc0 ... 0xc1:
rc = em_grp2(ctxt);
break;
- case 0xc3: /* ret */
- c->dst.type = OP_REG;
- c->dst.addr.reg = &c->eip;
- c->dst.bytes = c->op_bytes;
- rc = em_pop(ctxt);
- break;
case 0xc4: /* les */
- rc = emulate_load_segment(ctxt, ops, VCPU_SREG_ES);
+ rc = emulate_load_segment(ctxt, VCPU_SREG_ES);
break;
case 0xc5: /* lds */
- rc = emulate_load_segment(ctxt, ops, VCPU_SREG_DS);
- break;
- case 0xcb: /* ret far */
- rc = emulate_ret_far(ctxt, ops);
+ rc = emulate_load_segment(ctxt, VCPU_SREG_DS);
break;
case 0xcc: /* int3 */
- irq = 3;
- goto do_interrupt;
+ rc = emulate_int(ctxt, 3);
+ break;
case 0xcd: /* int n */
- irq = c->src.val;
- do_interrupt:
- rc = emulate_int(ctxt, ops, irq);
+ rc = emulate_int(ctxt, ctxt->src.val);
break;
case 0xce: /* into */
- if (ctxt->eflags & EFLG_OF) {
- irq = 4;
- goto do_interrupt;
- }
- break;
- case 0xcf: /* iret */
- rc = emulate_iret(ctxt, ops);
+ if (ctxt->eflags & EFLG_OF)
+ rc = emulate_int(ctxt, 4);
break;
case 0xd0 ... 0xd1: /* Grp2 */
rc = em_grp2(ctxt);
break;
case 0xd2 ... 0xd3: /* Grp2 */
- c->src.val = c->regs[VCPU_REGS_RCX];
+ ctxt->src.val = ctxt->regs[VCPU_REGS_RCX];
rc = em_grp2(ctxt);
break;
- case 0xe0 ... 0xe2: /* loop/loopz/loopnz */
- register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1);
- if (address_mask(c, c->regs[VCPU_REGS_RCX]) != 0 &&
- (c->b == 0xe2 || test_cc(c->b ^ 0x5, ctxt->eflags)))
- jmp_rel(c, c->src.val);
- break;
- case 0xe3: /* jcxz/jecxz/jrcxz */
- if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0)
- jmp_rel(c, c->src.val);
- break;
case 0xe4: /* inb */
case 0xe5: /* in */
goto do_io_in;
@@ -4025,35 +3921,30 @@ special_insn:
case 0xe7: /* out */
goto do_io_out;
case 0xe8: /* call (near) */ {
- long int rel = c->src.val;
- c->src.val = (unsigned long) c->eip;
- jmp_rel(c, rel);
+ long int rel = ctxt->src.val;
+ ctxt->src.val = (unsigned long) ctxt->_eip;
+ jmp_rel(ctxt, rel);
rc = em_push(ctxt);
break;
}
case 0xe9: /* jmp rel */
- goto jmp;
- case 0xea: /* jmp far */
- rc = em_jmp_far(ctxt);
- break;
- case 0xeb:
- jmp: /* jmp rel short */
- jmp_rel(c, c->src.val);
- c->dst.type = OP_NONE; /* Disable writeback. */
+ case 0xeb: /* jmp rel short */
+ jmp_rel(ctxt, ctxt->src.val);
+ ctxt->dst.type = OP_NONE; /* Disable writeback. */
break;
case 0xec: /* in al,dx */
case 0xed: /* in (e/r)ax,dx */
do_io_in:
- if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val,
- &c->dst.val))
+ if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
+ &ctxt->dst.val))
goto done; /* IO is needed */
break;
case 0xee: /* out dx,al */
case 0xef: /* out dx,(e/r)ax */
do_io_out:
- ops->pio_out_emulated(ctxt, c->src.bytes, c->dst.val,
- &c->src.val, 1);
- c->dst.type = OP_NONE; /* Disable writeback. */
+ ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
+ &ctxt->src.val, 1);
+ ctxt->dst.type = OP_NONE; /* Disable writeback. */
break;
case 0xf4: /* hlt */
ctxt->ops->halt(ctxt);
@@ -4071,22 +3962,6 @@ special_insn:
case 0xf9: /* stc */
ctxt->eflags |= EFLG_CF;
break;
- case 0xfa: /* cli */
- if (emulator_bad_iopl(ctxt, ops)) {
- rc = emulate_gp(ctxt, 0);
- goto done;
- } else
- ctxt->eflags &= ~X86_EFLAGS_IF;
- break;
- case 0xfb: /* sti */
- if (emulator_bad_iopl(ctxt, ops)) {
- rc = emulate_gp(ctxt, 0);
- goto done;
- } else {
- ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
- ctxt->eflags |= X86_EFLAGS_IF;
- }
- break;
case 0xfc: /* cld */
ctxt->eflags &= ~EFLG_DF;
break;
@@ -4115,40 +3990,40 @@ writeback:
* restore dst type in case the decoding will be reused
* (happens for string instruction )
*/
- c->dst.type = saved_dst_type;
+ ctxt->dst.type = saved_dst_type;
- if ((c->d & SrcMask) == SrcSI)
- string_addr_inc(ctxt, seg_override(ctxt, c),
- VCPU_REGS_RSI, &c->src);
+ if ((ctxt->d & SrcMask) == SrcSI)
+ string_addr_inc(ctxt, seg_override(ctxt),
+ VCPU_REGS_RSI, &ctxt->src);
- if ((c->d & DstMask) == DstDI)
+ if ((ctxt->d & DstMask) == DstDI)
string_addr_inc(ctxt, VCPU_SREG_ES, VCPU_REGS_RDI,
- &c->dst);
+ &ctxt->dst);
- if (c->rep_prefix && (c->d & String)) {
- struct read_cache *r = &ctxt->decode.io_read;
- register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1);
+ if (ctxt->rep_prefix && (ctxt->d & String)) {
+ struct read_cache *r = &ctxt->io_read;
+ register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RCX], -1);
if (!string_insn_completed(ctxt)) {
/*
* Re-enter guest when pio read ahead buffer is empty
* or, if it is not used, after each 1024 iteration.
*/
- if ((r->end != 0 || c->regs[VCPU_REGS_RCX] & 0x3ff) &&
+ if ((r->end != 0 || ctxt->regs[VCPU_REGS_RCX] & 0x3ff) &&
(r->end == 0 || r->end != r->pos)) {
/*
* Reset read cache. Usually happens before
* decode, but since instruction is restarted
* we have to do it here.
*/
- ctxt->decode.mem_read.end = 0;
+ ctxt->mem_read.end = 0;
return EMULATION_RESTART;
}
goto done; /* skip rip writeback */
}
}
- ctxt->eip = c->eip;
+ ctxt->eip = ctxt->_eip;
done:
if (rc == X86EMUL_PROPAGATE_FAULT)
@@ -4159,13 +4034,7 @@ done:
return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
twobyte_insn:
- switch (c->b) {
- case 0x05: /* syscall */
- rc = emulate_syscall(ctxt, ops);
- break;
- case 0x06:
- rc = em_clts(ctxt);
- break;
+ switch (ctxt->b) {
case 0x09: /* wbinvd */
(ctxt->ops->wbinvd)(ctxt);
break;
@@ -4174,21 +4043,21 @@ twobyte_insn:
case 0x18: /* Grp16 (prefetch/nop) */
break;
case 0x20: /* mov cr, reg */
- c->dst.val = ops->get_cr(ctxt, c->modrm_reg);
+ ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
break;
case 0x21: /* mov from dr to reg */
- ops->get_dr(ctxt, c->modrm_reg, &c->dst.val);
+ ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
break;
case 0x22: /* mov reg, cr */
- if (ops->set_cr(ctxt, c->modrm_reg, c->src.val)) {
+ if (ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val)) {
emulate_gp(ctxt, 0);
rc = X86EMUL_PROPAGATE_FAULT;
goto done;
}
- c->dst.type = OP_NONE;
+ ctxt->dst.type = OP_NONE;
break;
case 0x23: /* mov from reg to dr */
- if (ops->set_dr(ctxt, c->modrm_reg, c->src.val &
+ if (ops->set_dr(ctxt, ctxt->modrm_reg, ctxt->src.val &
((ctxt->mode == X86EMUL_MODE_PROT64) ?
~0ULL : ~0U)) < 0) {
/* #UD condition is already handled by the code above */
@@ -4197,13 +4066,13 @@ twobyte_insn:
goto done;
}
- c->dst.type = OP_NONE; /* no writeback */
+ ctxt->dst.type = OP_NONE; /* no writeback */
break;
case 0x30:
/* wrmsr */
- msr_data = (u32)c->regs[VCPU_REGS_RAX]
- | ((u64)c->regs[VCPU_REGS_RDX] << 32);
- if (ops->set_msr(ctxt, c->regs[VCPU_REGS_RCX], msr_data)) {
+ msr_data = (u32)ctxt->regs[VCPU_REGS_RAX]
+ | ((u64)ctxt->regs[VCPU_REGS_RDX] << 32);
+ if (ops->set_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], msr_data)) {
emulate_gp(ctxt, 0);
rc = X86EMUL_PROPAGATE_FAULT;
goto done;
@@ -4212,64 +4081,58 @@ twobyte_insn:
break;
case 0x32:
/* rdmsr */
- if (ops->get_msr(ctxt, c->regs[VCPU_REGS_RCX], &msr_data)) {
+ if (ops->get_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], &msr_data)) {
emulate_gp(ctxt, 0);
rc = X86EMUL_PROPAGATE_FAULT;
goto done;
} else {
- c->regs[VCPU_REGS_RAX] = (u32)msr_data;
- c->regs[VCPU_REGS_RDX] = msr_data >> 32;
+ ctxt->regs[VCPU_REGS_RAX] = (u32)msr_data;
+ ctxt->regs[VCPU_REGS_RDX] = msr_data >> 32;
}
rc = X86EMUL_CONTINUE;
break;
- case 0x34: /* sysenter */
- rc = emulate_sysenter(ctxt, ops);
- break;
- case 0x35: /* sysexit */
- rc = emulate_sysexit(ctxt, ops);
- break;
case 0x40 ... 0x4f: /* cmov */
- c->dst.val = c->dst.orig_val = c->src.val;
- if (!test_cc(c->b, ctxt->eflags))
- c->dst.type = OP_NONE; /* no writeback */
+ ctxt->dst.val = ctxt->dst.orig_val = ctxt->src.val;
+ if (!test_cc(ctxt->b, ctxt->eflags))
+ ctxt->dst.type = OP_NONE; /* no writeback */
break;
case 0x80 ... 0x8f: /* jnz rel, etc*/
- if (test_cc(c->b, ctxt->eflags))
- jmp_rel(c, c->src.val);
+ if (test_cc(ctxt->b, ctxt->eflags))
+ jmp_rel(ctxt, ctxt->src.val);
break;
case 0x90 ... 0x9f: /* setcc r/m8 */
- c->dst.val = test_cc(c->b, ctxt->eflags);
+ ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
break;
case 0xa0: /* push fs */
- rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_FS);
+ rc = emulate_push_sreg(ctxt, VCPU_SREG_FS);
break;
case 0xa1: /* pop fs */
- rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_FS);
+ rc = emulate_pop_sreg(ctxt, VCPU_SREG_FS);
break;
case 0xa3:
bt: /* bt */
- c->dst.type = OP_NONE;
+ ctxt->dst.type = OP_NONE;
/* only subword offset */
- c->src.val &= (c->dst.bytes << 3) - 1;
- emulate_2op_SrcV_nobyte("bt", c->src, c->dst, ctxt->eflags);
+ ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
+ emulate_2op_SrcV_nobyte("bt", ctxt->src, ctxt->dst, ctxt->eflags);
break;
case 0xa4: /* shld imm8, r, r/m */
case 0xa5: /* shld cl, r, r/m */
- emulate_2op_cl("shld", c->src2, c->src, c->dst, ctxt->eflags);
+ emulate_2op_cl("shld", ctxt->src2, ctxt->src, ctxt->dst, ctxt->eflags);
break;
case 0xa8: /* push gs */
- rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_GS);
+ rc = emulate_push_sreg(ctxt, VCPU_SREG_GS);
break;
case 0xa9: /* pop gs */
- rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_GS);
+ rc = emulate_pop_sreg(ctxt, VCPU_SREG_GS);
break;
case 0xab:
bts: /* bts */
- emulate_2op_SrcV_nobyte("bts", c->src, c->dst, ctxt->eflags);
+ emulate_2op_SrcV_nobyte("bts", ctxt->src, ctxt->dst, ctxt->eflags);
break;
case 0xac: /* shrd imm8, r, r/m */
case 0xad: /* shrd cl, r, r/m */
- emulate_2op_cl("shrd", c->src2, c->src, c->dst, ctxt->eflags);
+ emulate_2op_cl("shrd", ctxt->src2, ctxt->src, ctxt->dst, ctxt->eflags);
break;
case 0xae: /* clflush */
break;
@@ -4278,38 +4141,38 @@ twobyte_insn:
* Save real source value, then compare EAX against
* destination.
*/
- c->src.orig_val = c->src.val;
- c->src.val = c->regs[VCPU_REGS_RAX];
- emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
+ ctxt->src.orig_val = ctxt->src.val;
+ ctxt->src.val = ctxt->regs[VCPU_REGS_RAX];
+ emulate_2op_SrcV("cmp", ctxt->src, ctxt->dst, ctxt->eflags);
if (ctxt->eflags & EFLG_ZF) {
/* Success: write back to memory. */
- c->dst.val = c->src.orig_val;
+ ctxt->dst.val = ctxt->src.orig_val;
} else {
/* Failure: write the value we saw to EAX. */
- c->dst.type = OP_REG;
- c->dst.addr.reg = (unsigned long *)&c->regs[VCPU_REGS_RAX];
+ ctxt->dst.type = OP_REG;
+ ctxt->dst.addr.reg = (unsigned long *)&ctxt->regs[VCPU_REGS_RAX];
}
break;
case 0xb2: /* lss */
- rc = emulate_load_segment(ctxt, ops, VCPU_SREG_SS);
+ rc = emulate_load_segment(ctxt, VCPU_SREG_SS);
break;
case 0xb3:
btr: /* btr */
- emulate_2op_SrcV_nobyte("btr", c->src, c->dst, ctxt->eflags);
+ emulate_2op_SrcV_nobyte("btr", ctxt->src, ctxt->dst, ctxt->eflags);
break;
case 0xb4: /* lfs */
- rc = emulate_load_segment(ctxt, ops, VCPU_SREG_FS);
+ rc = emulate_load_segment(ctxt, VCPU_SREG_FS);
break;
case 0xb5: /* lgs */
- rc = emulate_load_segment(ctxt, ops, VCPU_SREG_GS);
+ rc = emulate_load_segment(ctxt, VCPU_SREG_GS);
break;
case 0xb6 ... 0xb7: /* movzx */
- c->dst.bytes = c->op_bytes;
- c->dst.val = (c->d & ByteOp) ? (u8) c->src.val
- : (u16) c->src.val;
+ ctxt->dst.bytes = ctxt->op_bytes;
+ ctxt->dst.val = (ctxt->d & ByteOp) ? (u8) ctxt->src.val
+ : (u16) ctxt->src.val;
break;
case 0xba: /* Grp8 */
- switch (c->modrm_reg & 3) {
+ switch (ctxt->modrm_reg & 3) {
case 0:
goto bt;
case 1:
@@ -4322,47 +4185,47 @@ twobyte_insn:
break;
case 0xbb:
btc: /* btc */
- emulate_2op_SrcV_nobyte("btc", c->src, c->dst, ctxt->eflags);
+ emulate_2op_SrcV_nobyte("btc", ctxt->src, ctxt->dst, ctxt->eflags);
break;
case 0xbc: { /* bsf */
u8 zf;
__asm__ ("bsf %2, %0; setz %1"
- : "=r"(c->dst.val), "=q"(zf)
- : "r"(c->src.val));
+ : "=r"(ctxt->dst.val), "=q"(zf)
+ : "r"(ctxt->src.val));
ctxt->eflags &= ~X86_EFLAGS_ZF;
if (zf) {
ctxt->eflags |= X86_EFLAGS_ZF;
- c->dst.type = OP_NONE; /* Disable writeback. */
+ ctxt->dst.type = OP_NONE; /* Disable writeback. */
}
break;
}
case 0xbd: { /* bsr */
u8 zf;
__asm__ ("bsr %2, %0; setz %1"
- : "=r"(c->dst.val), "=q"(zf)
- : "r"(c->src.val));
+ : "=r"(ctxt->dst.val), "=q"(zf)
+ : "r"(ctxt->src.val));
ctxt->eflags &= ~X86_EFLAGS_ZF;
if (zf) {
ctxt->eflags |= X86_EFLAGS_ZF;
- c->dst.type = OP_NONE; /* Disable writeback. */
+ ctxt->dst.type = OP_NONE; /* Disable writeback. */
}
break;
}
case 0xbe ... 0xbf: /* movsx */
- c->dst.bytes = c->op_bytes;
- c->dst.val = (c->d & ByteOp) ? (s8) c->src.val :
- (s16) c->src.val;
+ ctxt->dst.bytes = ctxt->op_bytes;
+ ctxt->dst.val = (ctxt->d & ByteOp) ? (s8) ctxt->src.val :
+ (s16) ctxt->src.val;
break;
case 0xc0 ... 0xc1: /* xadd */
- emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
+ emulate_2op_SrcV("add", ctxt->src, ctxt->dst, ctxt->eflags);
/* Write back the register source. */
- c->src.val = c->dst.orig_val;
- write_register_operand(&c->src);
+ ctxt->src.val = ctxt->dst.orig_val;
+ write_register_operand(&ctxt->src);
break;
case 0xc3: /* movnti */
- c->dst.bytes = c->op_bytes;
- c->dst.val = (c->op_bytes == 4) ? (u32) c->src.val :
- (u64) c->src.val;
+ ctxt->dst.bytes = ctxt->op_bytes;
+ ctxt->dst.val = (ctxt->op_bytes == 4) ? (u32) ctxt->src.val :
+ (u64) ctxt->src.val;
break;
case 0xc7: /* Grp9 (cmpxchg8b) */
rc = em_grp9(ctxt);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index aee38623b768..9335e1bf72ad 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -148,7 +148,7 @@ module_param(oos_shadow, bool, 0644);
#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
| PT64_NX_MASK)
-#define RMAP_EXT 4
+#define PTE_LIST_EXT 4
#define ACC_EXEC_MASK 1
#define ACC_WRITE_MASK PT_WRITABLE_MASK
@@ -164,16 +164,16 @@ module_param(oos_shadow, bool, 0644);
#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
-struct kvm_rmap_desc {
- u64 *sptes[RMAP_EXT];
- struct kvm_rmap_desc *more;
+struct pte_list_desc {
+ u64 *sptes[PTE_LIST_EXT];
+ struct pte_list_desc *more;
};
struct kvm_shadow_walk_iterator {
u64 addr;
hpa_t shadow_addr;
- int level;
u64 *sptep;
+ int level;
unsigned index;
};
@@ -182,32 +182,68 @@ struct kvm_shadow_walk_iterator {
shadow_walk_okay(&(_walker)); \
shadow_walk_next(&(_walker)))
-typedef void (*mmu_parent_walk_fn) (struct kvm_mmu_page *sp, u64 *spte);
+#define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte) \
+ for (shadow_walk_init(&(_walker), _vcpu, _addr); \
+ shadow_walk_okay(&(_walker)) && \
+ ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; }); \
+ __shadow_walk_next(&(_walker), spte))
-static struct kmem_cache *pte_chain_cache;
-static struct kmem_cache *rmap_desc_cache;
+static struct kmem_cache *pte_list_desc_cache;
static struct kmem_cache *mmu_page_header_cache;
static struct percpu_counter kvm_total_used_mmu_pages;
-static u64 __read_mostly shadow_trap_nonpresent_pte;
-static u64 __read_mostly shadow_notrap_nonpresent_pte;
static u64 __read_mostly shadow_nx_mask;
static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
static u64 __read_mostly shadow_user_mask;
static u64 __read_mostly shadow_accessed_mask;
static u64 __read_mostly shadow_dirty_mask;
+static u64 __read_mostly shadow_mmio_mask;
-static inline u64 rsvd_bits(int s, int e)
+static void mmu_spte_set(u64 *sptep, u64 spte);
+
+void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask)
{
- return ((1ULL << (e - s + 1)) - 1) << s;
+ shadow_mmio_mask = mmio_mask;
+}
+EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
+
+static void mark_mmio_spte(u64 *sptep, u64 gfn, unsigned access)
+{
+ access &= ACC_WRITE_MASK | ACC_USER_MASK;
+
+ trace_mark_mmio_spte(sptep, gfn, access);
+ mmu_spte_set(sptep, shadow_mmio_mask | access | gfn << PAGE_SHIFT);
}
-void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
+static bool is_mmio_spte(u64 spte)
{
- shadow_trap_nonpresent_pte = trap_pte;
- shadow_notrap_nonpresent_pte = notrap_pte;
+ return (spte & shadow_mmio_mask) == shadow_mmio_mask;
+}
+
+static gfn_t get_mmio_spte_gfn(u64 spte)
+{
+ return (spte & ~shadow_mmio_mask) >> PAGE_SHIFT;
+}
+
+static unsigned get_mmio_spte_access(u64 spte)
+{
+ return (spte & ~shadow_mmio_mask) & ~PAGE_MASK;
+}
+
+static bool set_mmio_spte(u64 *sptep, gfn_t gfn, pfn_t pfn, unsigned access)
+{
+ if (unlikely(is_noslot_pfn(pfn))) {
+ mark_mmio_spte(sptep, gfn, access);
+ return true;
+ }
+
+ return false;
+}
+
+static inline u64 rsvd_bits(int s, int e)
+{
+ return ((1ULL << (e - s + 1)) - 1) << s;
}
-EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
u64 dirty_mask, u64 nx_mask, u64 x_mask)
@@ -220,11 +256,6 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
-static bool is_write_protection(struct kvm_vcpu *vcpu)
-{
- return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
-}
-
static int is_cpuid_PSE36(void)
{
return 1;
@@ -237,8 +268,7 @@ static int is_nx(struct kvm_vcpu *vcpu)
static int is_shadow_present_pte(u64 pte)
{
- return pte != shadow_trap_nonpresent_pte
- && pte != shadow_notrap_nonpresent_pte;
+ return pte & PT_PRESENT_MASK && !is_mmio_spte(pte);
}
static int is_large_pte(u64 pte)
@@ -246,11 +276,6 @@ static int is_large_pte(u64 pte)
return pte & PT_PAGE_SIZE_MASK;
}
-static int is_writable_pte(unsigned long pte)
-{
- return pte & PT_WRITABLE_MASK;
-}
-
static int is_dirty_gpte(unsigned long pte)
{
return pte & PT_DIRTY_MASK;
@@ -282,26 +307,154 @@ static gfn_t pse36_gfn_delta(u32 gpte)
return (gpte & PT32_DIR_PSE36_MASK) << shift;
}
+#ifdef CONFIG_X86_64
static void __set_spte(u64 *sptep, u64 spte)
{
- set_64bit(sptep, spte);
+ *sptep = spte;
}
-static u64 __xchg_spte(u64 *sptep, u64 new_spte)
+static void __update_clear_spte_fast(u64 *sptep, u64 spte)
{
-#ifdef CONFIG_X86_64
- return xchg(sptep, new_spte);
+ *sptep = spte;
+}
+
+static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
+{
+ return xchg(sptep, spte);
+}
+
+static u64 __get_spte_lockless(u64 *sptep)
+{
+ return ACCESS_ONCE(*sptep);
+}
+
+static bool __check_direct_spte_mmio_pf(u64 spte)
+{
+ /* It is valid if the spte is zapped. */
+ return spte == 0ull;
+}
#else
- u64 old_spte;
+union split_spte {
+ struct {
+ u32 spte_low;
+ u32 spte_high;
+ };
+ u64 spte;
+};
- do {
- old_spte = *sptep;
- } while (cmpxchg64(sptep, old_spte, new_spte) != old_spte);
+static void count_spte_clear(u64 *sptep, u64 spte)
+{
+ struct kvm_mmu_page *sp = page_header(__pa(sptep));
- return old_spte;
-#endif
+ if (is_shadow_present_pte(spte))
+ return;
+
+ /* Ensure the spte is completely set before we increase the count */
+ smp_wmb();
+ sp->clear_spte_count++;
+}
+
+static void __set_spte(u64 *sptep, u64 spte)
+{
+ union split_spte *ssptep, sspte;
+
+ ssptep = (union split_spte *)sptep;
+ sspte = (union split_spte)spte;
+
+ ssptep->spte_high = sspte.spte_high;
+
+ /*
+ * If we map the spte from nonpresent to present, We should store
+ * the high bits firstly, then set present bit, so cpu can not
+ * fetch this spte while we are setting the spte.
+ */
+ smp_wmb();
+
+ ssptep->spte_low = sspte.spte_low;
}
+static void __update_clear_spte_fast(u64 *sptep, u64 spte)
+{
+ union split_spte *ssptep, sspte;
+
+ ssptep = (union split_spte *)sptep;
+ sspte = (union split_spte)spte;
+
+ ssptep->spte_low = sspte.spte_low;
+
+ /*
+ * If we map the spte from present to nonpresent, we should clear
+ * present bit firstly to avoid vcpu fetch the old high bits.
+ */
+ smp_wmb();
+
+ ssptep->spte_high = sspte.spte_high;
+ count_spte_clear(sptep, spte);
+}
+
+static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
+{
+ union split_spte *ssptep, sspte, orig;
+
+ ssptep = (union split_spte *)sptep;
+ sspte = (union split_spte)spte;
+
+ /* xchg acts as a barrier before the setting of the high bits */
+ orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low);
+ orig.spte_high = ssptep->spte_high = sspte.spte_high;
+ count_spte_clear(sptep, spte);
+
+ return orig.spte;
+}
+
+/*
+ * The idea using the light way get the spte on x86_32 guest is from
+ * gup_get_pte(arch/x86/mm/gup.c).
+ * The difference is we can not catch the spte tlb flush if we leave
+ * guest mode, so we emulate it by increase clear_spte_count when spte
+ * is cleared.
+ */
+static u64 __get_spte_lockless(u64 *sptep)
+{
+ struct kvm_mmu_page *sp = page_header(__pa(sptep));
+ union split_spte spte, *orig = (union split_spte *)sptep;
+ int count;
+
+retry:
+ count = sp->clear_spte_count;
+ smp_rmb();
+
+ spte.spte_low = orig->spte_low;
+ smp_rmb();
+
+ spte.spte_high = orig->spte_high;
+ smp_rmb();
+
+ if (unlikely(spte.spte_low != orig->spte_low ||
+ count != sp->clear_spte_count))
+ goto retry;
+
+ return spte.spte;
+}
+
+static bool __check_direct_spte_mmio_pf(u64 spte)
+{
+ union split_spte sspte = (union split_spte)spte;
+ u32 high_mmio_mask = shadow_mmio_mask >> 32;
+
+ /* It is valid if the spte is zapped. */
+ if (spte == 0ull)
+ return true;
+
+ /* It is valid if the spte is being zapped. */
+ if (sspte.spte_low == 0ull &&
+ (sspte.spte_high & high_mmio_mask) == high_mmio_mask)
+ return true;
+
+ return false;
+}
+#endif
+
static bool spte_has_volatile_bits(u64 spte)
{
if (!shadow_accessed_mask)
@@ -322,12 +475,30 @@ static bool spte_is_bit_cleared(u64 old_spte, u64 new_spte, u64 bit_mask)
return (old_spte & bit_mask) && !(new_spte & bit_mask);
}
-static void update_spte(u64 *sptep, u64 new_spte)
+/* Rules for using mmu_spte_set:
+ * Set the sptep from nonpresent to present.
+ * Note: the sptep being assigned *must* be either not present
+ * or in a state where the hardware will not attempt to update
+ * the spte.
+ */
+static void mmu_spte_set(u64 *sptep, u64 new_spte)
+{
+ WARN_ON(is_shadow_present_pte(*sptep));
+ __set_spte(sptep, new_spte);
+}
+
+/* Rules for using mmu_spte_update:
+ * Update the state bits, it means the mapped pfn is not changged.
+ */
+static void mmu_spte_update(u64 *sptep, u64 new_spte)
{
u64 mask, old_spte = *sptep;
WARN_ON(!is_rmap_spte(new_spte));
+ if (!is_shadow_present_pte(old_spte))
+ return mmu_spte_set(sptep, new_spte);
+
new_spte |= old_spte & shadow_dirty_mask;
mask = shadow_accessed_mask;
@@ -335,9 +506,9 @@ static void update_spte(u64 *sptep, u64 new_spte)
mask |= shadow_dirty_mask;
if (!spte_has_volatile_bits(old_spte) || (new_spte & mask) == mask)
- __set_spte(sptep, new_spte);
+ __update_clear_spte_fast(sptep, new_spte);
else
- old_spte = __xchg_spte(sptep, new_spte);
+ old_spte = __update_clear_spte_slow(sptep, new_spte);
if (!shadow_accessed_mask)
return;
@@ -348,6 +519,64 @@ static void update_spte(u64 *sptep, u64 new_spte)
kvm_set_pfn_dirty(spte_to_pfn(old_spte));
}
+/*
+ * Rules for using mmu_spte_clear_track_bits:
+ * It sets the sptep from present to nonpresent, and track the
+ * state bits, it is used to clear the last level sptep.
+ */
+static int mmu_spte_clear_track_bits(u64 *sptep)
+{
+ pfn_t pfn;
+ u64 old_spte = *sptep;
+
+ if (!spte_has_volatile_bits(old_spte))
+ __update_clear_spte_fast(sptep, 0ull);
+ else
+ old_spte = __update_clear_spte_slow(sptep, 0ull);
+
+ if (!is_rmap_spte(old_spte))
+ return 0;
+
+ pfn = spte_to_pfn(old_spte);
+ if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
+ kvm_set_pfn_accessed(pfn);
+ if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask))
+ kvm_set_pfn_dirty(pfn);
+ return 1;
+}
+
+/*
+ * Rules for using mmu_spte_clear_no_track:
+ * Directly clear spte without caring the state bits of sptep,
+ * it is used to set the upper level spte.
+ */
+static void mmu_spte_clear_no_track(u64 *sptep)
+{
+ __update_clear_spte_fast(sptep, 0ull);
+}
+
+static u64 mmu_spte_get_lockless(u64 *sptep)
+{
+ return __get_spte_lockless(sptep);
+}
+
+static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
+{
+ rcu_read_lock();
+ atomic_inc(&vcpu->kvm->arch.reader_counter);
+
+ /* Increase the counter before walking shadow page table */
+ smp_mb__after_atomic_inc();
+}
+
+static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
+{
+ /* Decrease the counter after walking shadow page table finished */
+ smp_mb__before_atomic_dec();
+ atomic_dec(&vcpu->kvm->arch.reader_counter);
+ rcu_read_unlock();
+}
+
static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
struct kmem_cache *base_cache, int min)
{
@@ -397,12 +626,8 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
{
int r;
- r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
- pte_chain_cache, 4);
- if (r)
- goto out;
- r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
- rmap_desc_cache, 4 + PTE_PREFETCH_NUM);
+ r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
+ pte_list_desc_cache, 8 + PTE_PREFETCH_NUM);
if (r)
goto out;
r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
@@ -416,8 +641,8 @@ out:
static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
{
- mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache, pte_chain_cache);
- mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache, rmap_desc_cache);
+ mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
+ pte_list_desc_cache);
mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache,
mmu_page_header_cache);
@@ -433,26 +658,15 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
return p;
}
-static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
-{
- return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
- sizeof(struct kvm_pte_chain));
-}
-
-static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
+static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu)
{
- kmem_cache_free(pte_chain_cache, pc);
+ return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache,
+ sizeof(struct pte_list_desc));
}
-static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
+static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc)
{
- return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
- sizeof(struct kvm_rmap_desc));
-}
-
-static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
-{
- kmem_cache_free(rmap_desc_cache, rd);
+ kmem_cache_free(pte_list_desc_cache, pte_list_desc);
}
static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
@@ -498,6 +712,7 @@ static void account_shadowed(struct kvm *kvm, gfn_t gfn)
linfo = lpage_info_slot(gfn, slot, i);
linfo->write_count += 1;
}
+ kvm->arch.indirect_shadow_pages++;
}
static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
@@ -513,6 +728,7 @@ static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
linfo->write_count -= 1;
WARN_ON(linfo->write_count < 0);
}
+ kvm->arch.indirect_shadow_pages--;
}
static int has_wrprotected_page(struct kvm *kvm,
@@ -588,67 +804,42 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
}
/*
- * Take gfn and return the reverse mapping to it.
- */
-
-static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
-{
- struct kvm_memory_slot *slot;
- struct kvm_lpage_info *linfo;
-
- slot = gfn_to_memslot(kvm, gfn);
- if (likely(level == PT_PAGE_TABLE_LEVEL))
- return &slot->rmap[gfn - slot->base_gfn];
-
- linfo = lpage_info_slot(gfn, slot, level);
-
- return &linfo->rmap_pde;
-}
-
-/*
- * Reverse mapping data structures:
+ * Pte mapping structures:
*
- * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
- * that points to page_address(page).
+ * If pte_list bit zero is zero, then pte_list point to the spte.
*
- * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
- * containing more mappings.
+ * If pte_list bit zero is one, (then pte_list & ~1) points to a struct
+ * pte_list_desc containing more mappings.
*
- * Returns the number of rmap entries before the spte was added or zero if
+ * Returns the number of pte entries before the spte was added or zero if
* the spte was not added.
*
*/
-static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
+static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
+ unsigned long *pte_list)
{
- struct kvm_mmu_page *sp;
- struct kvm_rmap_desc *desc;
- unsigned long *rmapp;
+ struct pte_list_desc *desc;
int i, count = 0;
- if (!is_rmap_spte(*spte))
- return count;
- sp = page_header(__pa(spte));
- kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
- rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
- if (!*rmapp) {
- rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
- *rmapp = (unsigned long)spte;
- } else if (!(*rmapp & 1)) {
- rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
- desc = mmu_alloc_rmap_desc(vcpu);
- desc->sptes[0] = (u64 *)*rmapp;
+ if (!*pte_list) {
+ rmap_printk("pte_list_add: %p %llx 0->1\n", spte, *spte);
+ *pte_list = (unsigned long)spte;
+ } else if (!(*pte_list & 1)) {
+ rmap_printk("pte_list_add: %p %llx 1->many\n", spte, *spte);
+ desc = mmu_alloc_pte_list_desc(vcpu);
+ desc->sptes[0] = (u64 *)*pte_list;
desc->sptes[1] = spte;
- *rmapp = (unsigned long)desc | 1;
+ *pte_list = (unsigned long)desc | 1;
++count;
} else {
- rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
- desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
- while (desc->sptes[RMAP_EXT-1] && desc->more) {
+ rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte);
+ desc = (struct pte_list_desc *)(*pte_list & ~1ul);
+ while (desc->sptes[PTE_LIST_EXT-1] && desc->more) {
desc = desc->more;
- count += RMAP_EXT;
+ count += PTE_LIST_EXT;
}
- if (desc->sptes[RMAP_EXT-1]) {
- desc->more = mmu_alloc_rmap_desc(vcpu);
+ if (desc->sptes[PTE_LIST_EXT-1]) {
+ desc->more = mmu_alloc_pte_list_desc(vcpu);
desc = desc->more;
}
for (i = 0; desc->sptes[i]; ++i)
@@ -658,59 +849,78 @@ static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
return count;
}
-static void rmap_desc_remove_entry(unsigned long *rmapp,
- struct kvm_rmap_desc *desc,
- int i,
- struct kvm_rmap_desc *prev_desc)
+static u64 *pte_list_next(unsigned long *pte_list, u64 *spte)
+{
+ struct pte_list_desc *desc;
+ u64 *prev_spte;
+ int i;
+
+ if (!*pte_list)
+ return NULL;
+ else if (!(*pte_list & 1)) {
+ if (!spte)
+ return (u64 *)*pte_list;
+ return NULL;
+ }
+ desc = (struct pte_list_desc *)(*pte_list & ~1ul);
+ prev_spte = NULL;
+ while (desc) {
+ for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) {
+ if (prev_spte == spte)
+ return desc->sptes[i];
+ prev_spte = desc->sptes[i];
+ }
+ desc = desc->more;
+ }
+ return NULL;
+}
+
+static void
+pte_list_desc_remove_entry(unsigned long *pte_list, struct pte_list_desc *desc,
+ int i, struct pte_list_desc *prev_desc)
{
int j;
- for (j = RMAP_EXT - 1; !desc->sptes[j] && j > i; --j)
+ for (j = PTE_LIST_EXT - 1; !desc->sptes[j] && j > i; --j)
;
desc->sptes[i] = desc->sptes[j];
desc->sptes[j] = NULL;
if (j != 0)
return;
if (!prev_desc && !desc->more)
- *rmapp = (unsigned long)desc->sptes[0];
+ *pte_list = (unsigned long)desc->sptes[0];
else
if (prev_desc)
prev_desc->more = desc->more;
else
- *rmapp = (unsigned long)desc->more | 1;
- mmu_free_rmap_desc(desc);
+ *pte_list = (unsigned long)desc->more | 1;
+ mmu_free_pte_list_desc(desc);
}
-static void rmap_remove(struct kvm *kvm, u64 *spte)
+static void pte_list_remove(u64 *spte, unsigned long *pte_list)
{
- struct kvm_rmap_desc *desc;
- struct kvm_rmap_desc *prev_desc;
- struct kvm_mmu_page *sp;
- gfn_t gfn;
- unsigned long *rmapp;
+ struct pte_list_desc *desc;
+ struct pte_list_desc *prev_desc;
int i;
- sp = page_header(__pa(spte));
- gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
- rmapp = gfn_to_rmap(kvm, gfn, sp->role.level);
- if (!*rmapp) {
- printk(KERN_ERR "rmap_remove: %p 0->BUG\n", spte);
+ if (!*pte_list) {
+ printk(KERN_ERR "pte_list_remove: %p 0->BUG\n", spte);
BUG();
- } else if (!(*rmapp & 1)) {
- rmap_printk("rmap_remove: %p 1->0\n", spte);
- if ((u64 *)*rmapp != spte) {
- printk(KERN_ERR "rmap_remove: %p 1->BUG\n", spte);
+ } else if (!(*pte_list & 1)) {
+ rmap_printk("pte_list_remove: %p 1->0\n", spte);
+ if ((u64 *)*pte_list != spte) {
+ printk(KERN_ERR "pte_list_remove: %p 1->BUG\n", spte);
BUG();
}
- *rmapp = 0;
+ *pte_list = 0;
} else {
- rmap_printk("rmap_remove: %p many->many\n", spte);
- desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
+ rmap_printk("pte_list_remove: %p many->many\n", spte);
+ desc = (struct pte_list_desc *)(*pte_list & ~1ul);
prev_desc = NULL;
while (desc) {
- for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i)
+ for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i)
if (desc->sptes[i] == spte) {
- rmap_desc_remove_entry(rmapp,
+ pte_list_desc_remove_entry(pte_list,
desc, i,
prev_desc);
return;
@@ -718,62 +928,80 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
prev_desc = desc;
desc = desc->more;
}
- pr_err("rmap_remove: %p many->many\n", spte);
+ pr_err("pte_list_remove: %p many->many\n", spte);
BUG();
}
}
-static int set_spte_track_bits(u64 *sptep, u64 new_spte)
+typedef void (*pte_list_walk_fn) (u64 *spte);
+static void pte_list_walk(unsigned long *pte_list, pte_list_walk_fn fn)
{
- pfn_t pfn;
- u64 old_spte = *sptep;
+ struct pte_list_desc *desc;
+ int i;
- if (!spte_has_volatile_bits(old_spte))
- __set_spte(sptep, new_spte);
- else
- old_spte = __xchg_spte(sptep, new_spte);
+ if (!*pte_list)
+ return;
- if (!is_rmap_spte(old_spte))
- return 0;
+ if (!(*pte_list & 1))
+ return fn((u64 *)*pte_list);
- pfn = spte_to_pfn(old_spte);
- if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
- kvm_set_pfn_accessed(pfn);
- if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask))
- kvm_set_pfn_dirty(pfn);
- return 1;
+ desc = (struct pte_list_desc *)(*pte_list & ~1ul);
+ while (desc) {
+ for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i)
+ fn(desc->sptes[i]);
+ desc = desc->more;
+ }
}
-static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte)
+/*
+ * Take gfn and return the reverse mapping to it.
+ */
+static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
{
- if (set_spte_track_bits(sptep, new_spte))
- rmap_remove(kvm, sptep);
+ struct kvm_memory_slot *slot;
+ struct kvm_lpage_info *linfo;
+
+ slot = gfn_to_memslot(kvm, gfn);
+ if (likely(level == PT_PAGE_TABLE_LEVEL))
+ return &slot->rmap[gfn - slot->base_gfn];
+
+ linfo = lpage_info_slot(gfn, slot, level);
+
+ return &linfo->rmap_pde;
+}
+
+static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
+{
+ struct kvm_mmu_page *sp;
+ unsigned long *rmapp;
+
+ sp = page_header(__pa(spte));
+ kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
+ rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
+ return pte_list_add(vcpu, spte, rmapp);
}
static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
{
- struct kvm_rmap_desc *desc;
- u64 *prev_spte;
- int i;
+ return pte_list_next(rmapp, spte);
+}
- if (!*rmapp)
- return NULL;
- else if (!(*rmapp & 1)) {
- if (!spte)
- return (u64 *)*rmapp;
- return NULL;
- }
- desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
- prev_spte = NULL;
- while (desc) {
- for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i) {
- if (prev_spte == spte)
- return desc->sptes[i];
- prev_spte = desc->sptes[i];
- }
- desc = desc->more;
- }
- return NULL;
+static void rmap_remove(struct kvm *kvm, u64 *spte)
+{
+ struct kvm_mmu_page *sp;
+ gfn_t gfn;
+ unsigned long *rmapp;
+
+ sp = page_header(__pa(spte));
+ gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
+ rmapp = gfn_to_rmap(kvm, gfn, sp->role.level);
+ pte_list_remove(spte, rmapp);
+}
+
+static void drop_spte(struct kvm *kvm, u64 *sptep)
+{
+ if (mmu_spte_clear_track_bits(sptep))
+ rmap_remove(kvm, sptep);
}
static int rmap_write_protect(struct kvm *kvm, u64 gfn)
@@ -790,7 +1018,7 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
BUG_ON(!(*spte & PT_PRESENT_MASK));
rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
if (is_writable_pte(*spte)) {
- update_spte(spte, *spte & ~PT_WRITABLE_MASK);
+ mmu_spte_update(spte, *spte & ~PT_WRITABLE_MASK);
write_protected = 1;
}
spte = rmap_next(kvm, rmapp, spte);
@@ -807,8 +1035,7 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
if (is_writable_pte(*spte)) {
- drop_spte(kvm, spte,
- shadow_trap_nonpresent_pte);
+ drop_spte(kvm, spte);
--kvm->stat.lpages;
spte = NULL;
write_protected = 1;
@@ -829,7 +1056,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
while ((spte = rmap_next(kvm, rmapp, NULL))) {
BUG_ON(!(*spte & PT_PRESENT_MASK));
rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
- drop_spte(kvm, spte, shadow_trap_nonpresent_pte);
+ drop_spte(kvm, spte);
need_tlb_flush = 1;
}
return need_tlb_flush;
@@ -851,7 +1078,7 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", spte, *spte);
need_flush = 1;
if (pte_write(*ptep)) {
- drop_spte(kvm, spte, shadow_trap_nonpresent_pte);
+ drop_spte(kvm, spte);
spte = rmap_next(kvm, rmapp, NULL);
} else {
new_spte = *spte &~ (PT64_BASE_ADDR_MASK);
@@ -860,7 +1087,8 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
new_spte &= ~PT_WRITABLE_MASK;
new_spte &= ~SPTE_HOST_WRITEABLE;
new_spte &= ~shadow_accessed_mask;
- set_spte_track_bits(spte, new_spte);
+ mmu_spte_clear_track_bits(spte);
+ mmu_spte_set(spte, new_spte);
spte = rmap_next(kvm, rmapp, spte);
}
}
@@ -1032,151 +1260,89 @@ static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr)
percpu_counter_add(&kvm_total_used_mmu_pages, nr);
}
-static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
+/*
+ * Remove the sp from shadow page cache, after call it,
+ * we can not find this sp from the cache, and the shadow
+ * page table is still valid.
+ * It should be under the protection of mmu lock.
+ */
+static void kvm_mmu_isolate_page(struct kvm_mmu_page *sp)
{
ASSERT(is_empty_shadow_page(sp->spt));
hlist_del(&sp->hash_link);
- list_del(&sp->link);
- free_page((unsigned long)sp->spt);
if (!sp->role.direct)
free_page((unsigned long)sp->gfns);
- kmem_cache_free(mmu_page_header_cache, sp);
- kvm_mod_used_mmu_pages(kvm, -1);
}
-static unsigned kvm_page_table_hashfn(gfn_t gfn)
+/*
+ * Free the shadow page table and the sp, we can do it
+ * out of the protection of mmu lock.
+ */
+static void kvm_mmu_free_page(struct kvm_mmu_page *sp)
{
- return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
+ list_del(&sp->link);
+ free_page((unsigned long)sp->spt);
+ kmem_cache_free(mmu_page_header_cache, sp);
}
-static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
- u64 *parent_pte, int direct)
+static unsigned kvm_page_table_hashfn(gfn_t gfn)
{
- struct kvm_mmu_page *sp;
-
- sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
- sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
- if (!direct)
- sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache,
- PAGE_SIZE);
- set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
- list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
- bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
- sp->multimapped = 0;
- sp->parent_pte = parent_pte;
- kvm_mod_used_mmu_pages(vcpu->kvm, +1);
- return sp;
+ return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
}
static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp, u64 *parent_pte)
{
- struct kvm_pte_chain *pte_chain;
- struct hlist_node *node;
- int i;
-
if (!parent_pte)
return;
- if (!sp->multimapped) {
- u64 *old = sp->parent_pte;
- if (!old) {
- sp->parent_pte = parent_pte;
- return;
- }
- sp->multimapped = 1;
- pte_chain = mmu_alloc_pte_chain(vcpu);
- INIT_HLIST_HEAD(&sp->parent_ptes);
- hlist_add_head(&pte_chain->link, &sp->parent_ptes);
- pte_chain->parent_ptes[0] = old;
- }
- hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) {
- if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
- continue;
- for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
- if (!pte_chain->parent_ptes[i]) {
- pte_chain->parent_ptes[i] = parent_pte;
- return;
- }
- }
- pte_chain = mmu_alloc_pte_chain(vcpu);
- BUG_ON(!pte_chain);
- hlist_add_head(&pte_chain->link, &sp->parent_ptes);
- pte_chain->parent_ptes[0] = parent_pte;
+ pte_list_add(vcpu, parent_pte, &sp->parent_ptes);
}
static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
u64 *parent_pte)
{
- struct kvm_pte_chain *pte_chain;
- struct hlist_node *node;
- int i;
-
- if (!sp->multimapped) {
- BUG_ON(sp->parent_pte != parent_pte);
- sp->parent_pte = NULL;
- return;
- }
- hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
- for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
- if (!pte_chain->parent_ptes[i])
- break;
- if (pte_chain->parent_ptes[i] != parent_pte)
- continue;
- while (i + 1 < NR_PTE_CHAIN_ENTRIES
- && pte_chain->parent_ptes[i + 1]) {
- pte_chain->parent_ptes[i]
- = pte_chain->parent_ptes[i + 1];
- ++i;
- }
- pte_chain->parent_ptes[i] = NULL;
- if (i == 0) {
- hlist_del(&pte_chain->link);
- mmu_free_pte_chain(pte_chain);
- if (hlist_empty(&sp->parent_ptes)) {
- sp->multimapped = 0;
- sp->parent_pte = NULL;
- }
- }
- return;
- }
- BUG();
+ pte_list_remove(parent_pte, &sp->parent_ptes);
}
-static void mmu_parent_walk(struct kvm_mmu_page *sp, mmu_parent_walk_fn fn)
+static void drop_parent_pte(struct kvm_mmu_page *sp,
+ u64 *parent_pte)
{
- struct kvm_pte_chain *pte_chain;
- struct hlist_node *node;
- struct kvm_mmu_page *parent_sp;
- int i;
-
- if (!sp->multimapped && sp->parent_pte) {
- parent_sp = page_header(__pa(sp->parent_pte));
- fn(parent_sp, sp->parent_pte);
- return;
- }
-
- hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
- for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
- u64 *spte = pte_chain->parent_ptes[i];
+ mmu_page_remove_parent_pte(sp, parent_pte);
+ mmu_spte_clear_no_track(parent_pte);
+}
- if (!spte)
- break;
- parent_sp = page_header(__pa(spte));
- fn(parent_sp, spte);
- }
+static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
+ u64 *parent_pte, int direct)
+{
+ struct kvm_mmu_page *sp;
+ sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache,
+ sizeof *sp);
+ sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
+ if (!direct)
+ sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache,
+ PAGE_SIZE);
+ set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
+ list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
+ bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
+ sp->parent_ptes = 0;
+ mmu_page_add_parent_pte(vcpu, sp, parent_pte);
+ kvm_mod_used_mmu_pages(vcpu->kvm, +1);
+ return sp;
}
-static void mark_unsync(struct kvm_mmu_page *sp, u64 *spte);
+static void mark_unsync(u64 *spte);
static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
{
- mmu_parent_walk(sp, mark_unsync);
+ pte_list_walk(&sp->parent_ptes, mark_unsync);
}
-static void mark_unsync(struct kvm_mmu_page *sp, u64 *spte)
+static void mark_unsync(u64 *spte)
{
+ struct kvm_mmu_page *sp;
unsigned int index;
+ sp = page_header(__pa(spte));
index = spte - sp->spt;
if (__test_and_set_bit(index, sp->unsync_child_bitmap))
return;
@@ -1185,15 +1351,6 @@ static void mark_unsync(struct kvm_mmu_page *sp, u64 *spte)
kvm_mmu_mark_parents_unsync(sp);
}
-static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
- struct kvm_mmu_page *sp)
-{
- int i;
-
- for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
- sp->spt[i] = shadow_trap_nonpresent_pte;
-}
-
static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp)
{
@@ -1475,6 +1632,14 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu,
}
}
+static void init_shadow_page_table(struct kvm_mmu_page *sp)
+{
+ int i;
+
+ for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
+ sp->spt[i] = 0ull;
+}
+
static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
gfn_t gfn,
gva_t gaddr,
@@ -1537,10 +1702,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
account_shadowed(vcpu->kvm, gfn);
}
- if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte)
- vcpu->arch.mmu.prefetch_page(vcpu, sp);
- else
- nonpaging_prefetch_page(vcpu, sp);
+ init_shadow_page_table(sp);
trace_kvm_mmu_get_page(sp, true);
return sp;
}
@@ -1572,21 +1734,28 @@ static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
if (iterator->level < PT_PAGE_TABLE_LEVEL)
return false;
- if (iterator->level == PT_PAGE_TABLE_LEVEL)
- if (is_large_pte(*iterator->sptep))
- return false;
-
iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
return true;
}
-static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
+static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator,
+ u64 spte)
{
- iterator->shadow_addr = *iterator->sptep & PT64_BASE_ADDR_MASK;
+ if (is_last_spte(spte, iterator->level)) {
+ iterator->level = 0;
+ return;
+ }
+
+ iterator->shadow_addr = spte & PT64_BASE_ADDR_MASK;
--iterator->level;
}
+static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
+{
+ return __shadow_walk_next(iterator, *iterator->sptep);
+}
+
static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp)
{
u64 spte;
@@ -1594,13 +1763,13 @@ static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp)
spte = __pa(sp->spt)
| PT_PRESENT_MASK | PT_ACCESSED_MASK
| PT_WRITABLE_MASK | PT_USER_MASK;
- __set_spte(sptep, spte);
+ mmu_spte_set(sptep, spte);
}
static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
{
if (is_large_pte(*sptep)) {
- drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
+ drop_spte(vcpu->kvm, sptep);
kvm_flush_remote_tlbs(vcpu->kvm);
}
}
@@ -1622,38 +1791,39 @@ static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
if (child->role.access == direct_access)
return;
- mmu_page_remove_parent_pte(child, sptep);
- __set_spte(sptep, shadow_trap_nonpresent_pte);
+ drop_parent_pte(child, sptep);
kvm_flush_remote_tlbs(vcpu->kvm);
}
}
+static void mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
+ u64 *spte)
+{
+ u64 pte;
+ struct kvm_mmu_page *child;
+
+ pte = *spte;
+ if (is_shadow_present_pte(pte)) {
+ if (is_last_spte(pte, sp->role.level))
+ drop_spte(kvm, spte);
+ else {
+ child = page_header(pte & PT64_BASE_ADDR_MASK);
+ drop_parent_pte(child, spte);
+ }
+ } else if (is_mmio_spte(pte))
+ mmu_spte_clear_no_track(spte);
+
+ if (is_large_pte(pte))
+ --kvm->stat.lpages;
+}
+
static void kvm_mmu_page_unlink_children(struct kvm *kvm,
struct kvm_mmu_page *sp)
{
unsigned i;
- u64 *pt;
- u64 ent;
-
- pt = sp->spt;
-
- for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
- ent = pt[i];
-
- if (is_shadow_present_pte(ent)) {
- if (!is_last_spte(ent, sp->role.level)) {
- ent &= PT64_BASE_ADDR_MASK;
- mmu_page_remove_parent_pte(page_header(ent),
- &pt[i]);
- } else {
- if (is_large_pte(ent))
- --kvm->stat.lpages;
- drop_spte(kvm, &pt[i],
- shadow_trap_nonpresent_pte);
- }
- }
- pt[i] = shadow_trap_nonpresent_pte;
- }
+
+ for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
+ mmu_page_zap_pte(kvm, sp, sp->spt + i);
}
static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
@@ -1674,20 +1844,8 @@ static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
{
u64 *parent_pte;
- while (sp->multimapped || sp->parent_pte) {
- if (!sp->multimapped)
- parent_pte = sp->parent_pte;
- else {
- struct kvm_pte_chain *chain;
-
- chain = container_of(sp->parent_ptes.first,
- struct kvm_pte_chain, link);
- parent_pte = chain->parent_ptes[0];
- }
- BUG_ON(!parent_pte);
- kvm_mmu_put_page(sp, parent_pte);
- __set_spte(parent_pte, shadow_trap_nonpresent_pte);
- }
+ while ((parent_pte = pte_list_next(&sp->parent_ptes, NULL)))
+ drop_parent_pte(sp, parent_pte);
}
static int mmu_zap_unsync_children(struct kvm *kvm,
@@ -1734,6 +1892,7 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
/* Count self */
ret++;
list_move(&sp->link, invalid_list);
+ kvm_mod_used_mmu_pages(kvm, -1);
} else {
list_move(&sp->link, &kvm->arch.active_mmu_pages);
kvm_reload_remote_mmus(kvm);
@@ -1744,6 +1903,30 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
return ret;
}
+static void kvm_mmu_isolate_pages(struct list_head *invalid_list)
+{
+ struct kvm_mmu_page *sp;
+
+ list_for_each_entry(sp, invalid_list, link)
+ kvm_mmu_isolate_page(sp);
+}
+
+static void free_pages_rcu(struct rcu_head *head)
+{
+ struct kvm_mmu_page *next, *sp;
+
+ sp = container_of(head, struct kvm_mmu_page, rcu);
+ while (sp) {
+ if (!list_empty(&sp->link))
+ next = list_first_entry(&sp->link,
+ struct kvm_mmu_page, link);
+ else
+ next = NULL;
+ kvm_mmu_free_page(sp);
+ sp = next;
+ }
+}
+
static void kvm_mmu_commit_zap_page(struct kvm *kvm,
struct list_head *invalid_list)
{
@@ -1754,10 +1937,21 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
kvm_flush_remote_tlbs(kvm);
+ if (atomic_read(&kvm->arch.reader_counter)) {
+ kvm_mmu_isolate_pages(invalid_list);
+ sp = list_first_entry(invalid_list, struct kvm_mmu_page, link);
+ list_del_init(invalid_list);
+
+ trace_kvm_mmu_delay_free_pages(sp);
+ call_rcu(&sp->rcu, free_pages_rcu);
+ return;
+ }
+
do {
sp = list_first_entry(invalid_list, struct kvm_mmu_page, link);
WARN_ON(!sp->role.invalid || sp->root_count);
- kvm_mmu_free_page(kvm, sp);
+ kvm_mmu_isolate_page(sp);
+ kvm_mmu_free_page(sp);
} while (!list_empty(invalid_list));
}
@@ -1783,8 +1977,8 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
page = container_of(kvm->arch.active_mmu_pages.prev,
struct kvm_mmu_page, link);
kvm_mmu_prepare_zap_page(kvm, page, &invalid_list);
- kvm_mmu_commit_zap_page(kvm, &invalid_list);
}
+ kvm_mmu_commit_zap_page(kvm, &invalid_list);
goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
}
@@ -1833,20 +2027,6 @@ static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
__set_bit(slot, sp->slot_bitmap);
}
-static void mmu_convert_notrap(struct kvm_mmu_page *sp)
-{
- int i;
- u64 *pt = sp->spt;
-
- if (shadow_trap_nonpresent_pte == shadow_notrap_nonpresent_pte)
- return;
-
- for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
- if (pt[i] == shadow_notrap_nonpresent_pte)
- __set_spte(&pt[i], shadow_trap_nonpresent_pte);
- }
-}
-
/*
* The function is based on mtrr_type_lookup() in
* arch/x86/kernel/cpu/mtrr/generic.c
@@ -1959,7 +2139,6 @@ static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
sp->unsync = 1;
kvm_mmu_mark_parents_unsync(sp);
- mmu_convert_notrap(sp);
}
static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
@@ -2002,13 +2181,16 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
unsigned pte_access, int user_fault,
- int write_fault, int dirty, int level,
+ int write_fault, int level,
gfn_t gfn, pfn_t pfn, bool speculative,
bool can_unsync, bool host_writable)
{
u64 spte, entry = *sptep;
int ret = 0;
+ if (set_mmio_spte(sptep, gfn, pfn, pte_access))
+ return 0;
+
/*
* We don't set the accessed bit, since we sometimes want to see
* whether the guest actually used the pte (in order to detect
@@ -2017,8 +2199,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
spte = PT_PRESENT_MASK;
if (!speculative)
spte |= shadow_accessed_mask;
- if (!dirty)
- pte_access &= ~ACC_WRITE_MASK;
+
if (pte_access & ACC_EXEC_MASK)
spte |= shadow_x_mask;
else
@@ -2045,15 +2226,24 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
if (level > PT_PAGE_TABLE_LEVEL &&
has_wrprotected_page(vcpu->kvm, gfn, level)) {
ret = 1;
- drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
+ drop_spte(vcpu->kvm, sptep);
goto done;
}
spte |= PT_WRITABLE_MASK;
if (!vcpu->arch.mmu.direct_map
- && !(pte_access & ACC_WRITE_MASK))
+ && !(pte_access & ACC_WRITE_MASK)) {
spte &= ~PT_USER_MASK;
+ /*
+ * If we converted a user page to a kernel page,
+ * so that the kernel can write to it when cr0.wp=0,
+ * then we should prevent the kernel from executing it
+ * if SMEP is enabled.
+ */
+ if (kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))
+ spte |= PT64_NX_MASK;
+ }
/*
* Optimization: for pte sync, if spte was writable the hash
@@ -2078,7 +2268,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
mark_page_dirty(vcpu->kvm, gfn);
set_pte:
- update_spte(sptep, spte);
+ mmu_spte_update(sptep, spte);
/*
* If we overwrite a writable spte with a read-only one we
* should flush remote TLBs. Otherwise rmap_write_protect
@@ -2093,8 +2283,8 @@ done:
static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
unsigned pt_access, unsigned pte_access,
- int user_fault, int write_fault, int dirty,
- int *ptwrite, int level, gfn_t gfn,
+ int user_fault, int write_fault,
+ int *emulate, int level, gfn_t gfn,
pfn_t pfn, bool speculative,
bool host_writable)
{
@@ -2117,26 +2307,28 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
u64 pte = *sptep;
child = page_header(pte & PT64_BASE_ADDR_MASK);
- mmu_page_remove_parent_pte(child, sptep);
- __set_spte(sptep, shadow_trap_nonpresent_pte);
+ drop_parent_pte(child, sptep);
kvm_flush_remote_tlbs(vcpu->kvm);
} else if (pfn != spte_to_pfn(*sptep)) {
pgprintk("hfn old %llx new %llx\n",
spte_to_pfn(*sptep), pfn);
- drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
+ drop_spte(vcpu->kvm, sptep);
kvm_flush_remote_tlbs(vcpu->kvm);
} else
was_rmapped = 1;
}
if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault,
- dirty, level, gfn, pfn, speculative, true,
+ level, gfn, pfn, speculative, true,
host_writable)) {
if (write_fault)
- *ptwrite = 1;
+ *emulate = 1;
kvm_mmu_flush_tlb(vcpu);
}
+ if (unlikely(is_mmio_spte(*sptep) && emulate))
+ *emulate = 1;
+
pgprintk("%s: setting spte %llx\n", __func__, *sptep);
pgprintk("instantiating %s PTE (%s) at %llx (%llx) addr %p\n",
is_large_pte(*sptep)? "2MB" : "4kB",
@@ -2145,11 +2337,13 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
if (!was_rmapped && is_large_pte(*sptep))
++vcpu->kvm->stat.lpages;
- page_header_update_slot(vcpu->kvm, sptep, gfn);
- if (!was_rmapped) {
- rmap_count = rmap_add(vcpu, sptep, gfn);
- if (rmap_count > RMAP_RECYCLE_THRESHOLD)
- rmap_recycle(vcpu, sptep, gfn);
+ if (is_shadow_present_pte(*sptep)) {
+ page_header_update_slot(vcpu->kvm, sptep, gfn);
+ if (!was_rmapped) {
+ rmap_count = rmap_add(vcpu, sptep, gfn);
+ if (rmap_count > RMAP_RECYCLE_THRESHOLD)
+ rmap_recycle(vcpu, sptep, gfn);
+ }
}
kvm_release_pfn_clean(pfn);
if (speculative) {
@@ -2170,8 +2364,8 @@ static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log);
if (!slot) {
- get_page(bad_page);
- return page_to_pfn(bad_page);
+ get_page(fault_page);
+ return page_to_pfn(fault_page);
}
hva = gfn_to_hva_memslot(slot, gfn);
@@ -2198,7 +2392,7 @@ static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
for (i = 0; i < ret; i++, gfn++, start++)
mmu_set_spte(vcpu, start, ACC_ALL,
- access, 0, 0, 1, NULL,
+ access, 0, 0, NULL,
sp->role.level, gfn,
page_to_pfn(pages[i]), true, true);
@@ -2217,7 +2411,7 @@ static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
spte = sp->spt + i;
for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
- if (*spte != shadow_trap_nonpresent_pte || spte == sptep) {
+ if (is_shadow_present_pte(*spte) || spte == sptep) {
if (!start)
continue;
if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0)
@@ -2254,7 +2448,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
{
struct kvm_shadow_walk_iterator iterator;
struct kvm_mmu_page *sp;
- int pt_write = 0;
+ int emulate = 0;
gfn_t pseudo_gfn;
for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
@@ -2262,14 +2456,14 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
unsigned pte_access = ACC_ALL;
mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, pte_access,
- 0, write, 1, &pt_write,
+ 0, write, &emulate,
level, gfn, pfn, prefault, map_writable);
direct_pte_prefetch(vcpu, iterator.sptep);
++vcpu->stat.pf_fixed;
break;
}
- if (*iterator.sptep == shadow_trap_nonpresent_pte) {
+ if (!is_shadow_present_pte(*iterator.sptep)) {
u64 base_addr = iterator.addr;
base_addr &= PT64_LVL_ADDR_MASK(iterator.level);
@@ -2283,14 +2477,14 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
return -ENOMEM;
}
- __set_spte(iterator.sptep,
- __pa(sp->spt)
- | PT_PRESENT_MASK | PT_WRITABLE_MASK
- | shadow_user_mask | shadow_x_mask
- | shadow_accessed_mask);
+ mmu_spte_set(iterator.sptep,
+ __pa(sp->spt)
+ | PT_PRESENT_MASK | PT_WRITABLE_MASK
+ | shadow_user_mask | shadow_x_mask
+ | shadow_accessed_mask);
}
}
- return pt_write;
+ return emulate;
}
static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk)
@@ -2306,16 +2500,15 @@ static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *
send_sig_info(SIGBUS, &info, tsk);
}
-static int kvm_handle_bad_page(struct kvm *kvm, gfn_t gfn, pfn_t pfn)
+static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, pfn_t pfn)
{
kvm_release_pfn_clean(pfn);
if (is_hwpoison_pfn(pfn)) {
- kvm_send_hwpoison_signal(gfn_to_hva(kvm, gfn), current);
+ kvm_send_hwpoison_signal(gfn_to_hva(vcpu->kvm, gfn), current);
return 0;
- } else if (is_fault_pfn(pfn))
- return -EFAULT;
+ }
- return 1;
+ return -EFAULT;
}
static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
@@ -2360,6 +2553,30 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
}
}
+static bool mmu_invalid_pfn(pfn_t pfn)
+{
+ return unlikely(is_invalid_pfn(pfn));
+}
+
+static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
+ pfn_t pfn, unsigned access, int *ret_val)
+{
+ bool ret = true;
+
+ /* The pfn is invalid, report the error! */
+ if (unlikely(is_invalid_pfn(pfn))) {
+ *ret_val = kvm_handle_bad_page(vcpu, gfn, pfn);
+ goto exit;
+ }
+
+ if (unlikely(is_noslot_pfn(pfn)))
+ vcpu_cache_mmio_info(vcpu, gva, gfn, access);
+
+ ret = false;
+exit:
+ return ret;
+}
+
static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
gva_t gva, pfn_t *pfn, bool write, bool *writable);
@@ -2394,9 +2611,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn,
if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable))
return 0;
- /* mmio */
- if (is_error_pfn(pfn))
- return kvm_handle_bad_page(vcpu->kvm, gfn, pfn);
+ if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r))
+ return r;
spin_lock(&vcpu->kvm->mmu_lock);
if (mmu_notifier_retry(vcpu, mmu_seq))
@@ -2623,6 +2839,7 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
return;
+ vcpu_clear_mmio_info(vcpu, ~0ul);
trace_kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
hpa_t root = vcpu->arch.mmu.root_hpa;
@@ -2667,6 +2884,94 @@ static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr,
return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access);
}
+static bool quickly_check_mmio_pf(struct kvm_vcpu *vcpu, u64 addr, bool direct)
+{
+ if (direct)
+ return vcpu_match_mmio_gpa(vcpu, addr);
+
+ return vcpu_match_mmio_gva(vcpu, addr);
+}
+
+
+/*
+ * On direct hosts, the last spte is only allows two states
+ * for mmio page fault:
+ * - It is the mmio spte
+ * - It is zapped or it is being zapped.
+ *
+ * This function completely checks the spte when the last spte
+ * is not the mmio spte.
+ */
+static bool check_direct_spte_mmio_pf(u64 spte)
+{
+ return __check_direct_spte_mmio_pf(spte);
+}
+
+static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr)
+{
+ struct kvm_shadow_walk_iterator iterator;
+ u64 spte = 0ull;
+
+ walk_shadow_page_lockless_begin(vcpu);
+ for_each_shadow_entry_lockless(vcpu, addr, iterator, spte)
+ if (!is_shadow_present_pte(spte))
+ break;
+ walk_shadow_page_lockless_end(vcpu);
+
+ return spte;
+}
+
+/*
+ * If it is a real mmio page fault, return 1 and emulat the instruction
+ * directly, return 0 to let CPU fault again on the address, -1 is
+ * returned if bug is detected.
+ */
+int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct)
+{
+ u64 spte;
+
+ if (quickly_check_mmio_pf(vcpu, addr, direct))
+ return 1;
+
+ spte = walk_shadow_page_get_mmio_spte(vcpu, addr);
+
+ if (is_mmio_spte(spte)) {
+ gfn_t gfn = get_mmio_spte_gfn(spte);
+ unsigned access = get_mmio_spte_access(spte);
+
+ if (direct)
+ addr = 0;
+
+ trace_handle_mmio_page_fault(addr, gfn, access);
+ vcpu_cache_mmio_info(vcpu, addr, gfn, access);
+ return 1;
+ }
+
+ /*
+ * It's ok if the gva is remapped by other cpus on shadow guest,
+ * it's a BUG if the gfn is not a mmio page.
+ */
+ if (direct && !check_direct_spte_mmio_pf(spte))
+ return -1;
+
+ /*
+ * If the page table is zapped by other cpus, let CPU fault again on
+ * the address.
+ */
+ return 0;
+}
+EXPORT_SYMBOL_GPL(handle_mmio_page_fault_common);
+
+static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr,
+ u32 error_code, bool direct)
+{
+ int ret;
+
+ ret = handle_mmio_page_fault_common(vcpu, addr, direct);
+ WARN_ON(ret < 0);
+ return ret;
+}
+
static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
u32 error_code, bool prefault)
{
@@ -2674,6 +2979,10 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
int r;
pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
+
+ if (unlikely(error_code & PFERR_RSVD_MASK))
+ return handle_mmio_page_fault(vcpu, gva, error_code, true);
+
r = mmu_topup_memory_caches(vcpu);
if (r)
return r;
@@ -2750,6 +3059,9 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
ASSERT(vcpu);
ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
+ if (unlikely(error_code & PFERR_RSVD_MASK))
+ return handle_mmio_page_fault(vcpu, gpa, error_code, true);
+
r = mmu_topup_memory_caches(vcpu);
if (r)
return r;
@@ -2767,9 +3079,9 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
return 0;
- /* mmio */
- if (is_error_pfn(pfn))
- return kvm_handle_bad_page(vcpu->kvm, gfn, pfn);
+ if (handle_abnormal_pfn(vcpu, 0, gfn, pfn, ACC_ALL, &r))
+ return r;
+
spin_lock(&vcpu->kvm->mmu_lock);
if (mmu_notifier_retry(vcpu, mmu_seq))
goto out_unlock;
@@ -2800,7 +3112,6 @@ static int nonpaging_init_context(struct kvm_vcpu *vcpu,
context->page_fault = nonpaging_page_fault;
context->gva_to_gpa = nonpaging_gva_to_gpa;
context->free = nonpaging_free;
- context->prefetch_page = nonpaging_prefetch_page;
context->sync_page = nonpaging_sync_page;
context->invlpg = nonpaging_invlpg;
context->update_pte = nonpaging_update_pte;
@@ -2848,6 +3159,23 @@ static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level)
return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0;
}
+static bool sync_mmio_spte(u64 *sptep, gfn_t gfn, unsigned access,
+ int *nr_present)
+{
+ if (unlikely(is_mmio_spte(*sptep))) {
+ if (gfn != get_mmio_spte_gfn(*sptep)) {
+ mmu_spte_clear_no_track(sptep);
+ return true;
+ }
+
+ (*nr_present)++;
+ mark_mmio_spte(sptep, gfn, access);
+ return true;
+ }
+
+ return false;
+}
+
#define PTTYPE 64
#include "paging_tmpl.h"
#undef PTTYPE
@@ -2930,7 +3258,6 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu,
context->new_cr3 = paging_new_cr3;
context->page_fault = paging64_page_fault;
context->gva_to_gpa = paging64_gva_to_gpa;
- context->prefetch_page = paging64_prefetch_page;
context->sync_page = paging64_sync_page;
context->invlpg = paging64_invlpg;
context->update_pte = paging64_update_pte;
@@ -2959,7 +3286,6 @@ static int paging32_init_context(struct kvm_vcpu *vcpu,
context->page_fault = paging32_page_fault;
context->gva_to_gpa = paging32_gva_to_gpa;
context->free = paging_free;
- context->prefetch_page = paging32_prefetch_page;
context->sync_page = paging32_sync_page;
context->invlpg = paging32_invlpg;
context->update_pte = paging32_update_pte;
@@ -2984,7 +3310,6 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
context->new_cr3 = nonpaging_new_cr3;
context->page_fault = tdp_page_fault;
context->free = nonpaging_free;
- context->prefetch_page = nonpaging_prefetch_page;
context->sync_page = nonpaging_sync_page;
context->invlpg = nonpaging_invlpg;
context->update_pte = nonpaging_update_pte;
@@ -3023,6 +3348,7 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
{
int r;
+ bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
ASSERT(vcpu);
ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
@@ -3037,6 +3363,8 @@ int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu);
vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu);
+ vcpu->arch.mmu.base_role.smep_andnot_wp
+ = smep && !is_write_protection(vcpu);
return r;
}
@@ -3141,27 +3469,6 @@ void kvm_mmu_unload(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_mmu_unload);
-static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
- struct kvm_mmu_page *sp,
- u64 *spte)
-{
- u64 pte;
- struct kvm_mmu_page *child;
-
- pte = *spte;
- if (is_shadow_present_pte(pte)) {
- if (is_last_spte(pte, sp->role.level))
- drop_spte(vcpu->kvm, spte, shadow_trap_nonpresent_pte);
- else {
- child = page_header(pte & PT64_BASE_ADDR_MASK);
- mmu_page_remove_parent_pte(child, spte);
- }
- }
- __set_spte(spte, shadow_trap_nonpresent_pte);
- if (is_large_pte(pte))
- --vcpu->kvm->stat.lpages;
-}
-
static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp, u64 *spte,
const void *new)
@@ -3233,6 +3540,13 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
int level, npte, invlpg_counter, r, flooded = 0;
bool remote_flush, local_flush, zap_page;
+ /*
+ * If we don't have indirect shadow pages, it means no page is
+ * write-protected, so we can exit simply.
+ */
+ if (!ACCESS_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
+ return;
+
zap_page = remote_flush = local_flush = false;
offset = offset_in_page(gpa);
@@ -3336,7 +3650,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
spte = &sp->spt[page_offset / sizeof(*spte)];
while (npte--) {
entry = *spte;
- mmu_pte_write_zap_pte(vcpu, sp, spte);
+ mmu_page_zap_pte(vcpu->kvm, sp, spte);
if (gentry &&
!((sp->role.word ^ vcpu->arch.mmu.base_role.word)
& mask.word))
@@ -3380,9 +3694,9 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
struct kvm_mmu_page, link);
kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
- kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
++vcpu->kvm->stat.mmu_recycled;
}
+ kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
}
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
@@ -3506,15 +3820,15 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
continue;
if (is_large_pte(pt[i])) {
- drop_spte(kvm, &pt[i],
- shadow_trap_nonpresent_pte);
+ drop_spte(kvm, &pt[i]);
--kvm->stat.lpages;
continue;
}
/* avoid RMW */
if (is_writable_pte(pt[i]))
- update_spte(&pt[i], pt[i] & ~PT_WRITABLE_MASK);
+ mmu_spte_update(&pt[i],
+ pt[i] & ~PT_WRITABLE_MASK);
}
}
kvm_flush_remote_tlbs(kvm);
@@ -3590,25 +3904,18 @@ static struct shrinker mmu_shrinker = {
static void mmu_destroy_caches(void)
{
- if (pte_chain_cache)
- kmem_cache_destroy(pte_chain_cache);
- if (rmap_desc_cache)
- kmem_cache_destroy(rmap_desc_cache);
+ if (pte_list_desc_cache)
+ kmem_cache_destroy(pte_list_desc_cache);
if (mmu_page_header_cache)
kmem_cache_destroy(mmu_page_header_cache);
}
int kvm_mmu_module_init(void)
{
- pte_chain_cache = kmem_cache_create("kvm_pte_chain",
- sizeof(struct kvm_pte_chain),
- 0, 0, NULL);
- if (!pte_chain_cache)
- goto nomem;
- rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
- sizeof(struct kvm_rmap_desc),
+ pte_list_desc_cache = kmem_cache_create("pte_list_desc",
+ sizeof(struct pte_list_desc),
0, 0, NULL);
- if (!rmap_desc_cache)
+ if (!pte_list_desc_cache)
goto nomem;
mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
@@ -3775,16 +4082,17 @@ out:
int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
{
struct kvm_shadow_walk_iterator iterator;
+ u64 spte;
int nr_sptes = 0;
- spin_lock(&vcpu->kvm->mmu_lock);
- for_each_shadow_entry(vcpu, addr, iterator) {
- sptes[iterator.level-1] = *iterator.sptep;
+ walk_shadow_page_lockless_begin(vcpu);
+ for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
+ sptes[iterator.level-1] = spte;
nr_sptes++;
- if (!is_shadow_present_pte(*iterator.sptep))
+ if (!is_shadow_present_pte(spte))
break;
}
- spin_unlock(&vcpu->kvm->mmu_lock);
+ walk_shadow_page_lockless_end(vcpu);
return nr_sptes;
}
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 7086ca85d3e7..e374db9af021 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -49,6 +49,8 @@
#define PFERR_FETCH_MASK (1U << 4)
int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
+void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask);
+int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct);
int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
@@ -76,4 +78,27 @@ static inline int is_present_gpte(unsigned long pte)
return pte & PT_PRESENT_MASK;
}
+static inline int is_writable_pte(unsigned long pte)
+{
+ return pte & PT_WRITABLE_MASK;
+}
+
+static inline bool is_write_protection(struct kvm_vcpu *vcpu)
+{
+ return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
+}
+
+static inline bool check_write_user_access(struct kvm_vcpu *vcpu,
+ bool write_fault, bool user_fault,
+ unsigned long pte)
+{
+ if (unlikely(write_fault && !is_writable_pte(pte)
+ && (user_fault || is_write_protection(vcpu))))
+ return false;
+
+ if (unlikely(user_fault && !(pte & PT_USER_MASK)))
+ return false;
+
+ return true;
+}
#endif
diff --git a/arch/x86/kvm/mmu_audit.c b/arch/x86/kvm/mmu_audit.c
index 5f6223b8bcf7..2460a265be23 100644
--- a/arch/x86/kvm/mmu_audit.c
+++ b/arch/x86/kvm/mmu_audit.c
@@ -99,18 +99,6 @@ static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level)
"level = %d\n", sp, level);
return;
}
-
- if (*sptep == shadow_notrap_nonpresent_pte) {
- audit_printk(vcpu->kvm, "notrap spte in unsync "
- "sp: %p\n", sp);
- return;
- }
- }
-
- if (sp->role.direct && *sptep == shadow_notrap_nonpresent_pte) {
- audit_printk(vcpu->kvm, "notrap spte in direct sp: %p\n",
- sp);
- return;
}
if (!is_shadow_present_pte(*sptep) || !is_last_spte(*sptep, level))
diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h
index b60b4fdb3eda..eed67f34146d 100644
--- a/arch/x86/kvm/mmutrace.h
+++ b/arch/x86/kvm/mmutrace.h
@@ -196,6 +196,54 @@ DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_prepare_zap_page,
TP_ARGS(sp)
);
+DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_delay_free_pages,
+ TP_PROTO(struct kvm_mmu_page *sp),
+
+ TP_ARGS(sp)
+);
+
+TRACE_EVENT(
+ mark_mmio_spte,
+ TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access),
+ TP_ARGS(sptep, gfn, access),
+
+ TP_STRUCT__entry(
+ __field(void *, sptep)
+ __field(gfn_t, gfn)
+ __field(unsigned, access)
+ ),
+
+ TP_fast_assign(
+ __entry->sptep = sptep;
+ __entry->gfn = gfn;
+ __entry->access = access;
+ ),
+
+ TP_printk("sptep:%p gfn %llx access %x", __entry->sptep, __entry->gfn,
+ __entry->access)
+);
+
+TRACE_EVENT(
+ handle_mmio_page_fault,
+ TP_PROTO(u64 addr, gfn_t gfn, unsigned access),
+ TP_ARGS(addr, gfn, access),
+
+ TP_STRUCT__entry(
+ __field(u64, addr)
+ __field(gfn_t, gfn)
+ __field(unsigned, access)
+ ),
+
+ TP_fast_assign(
+ __entry->addr = addr;
+ __entry->gfn = gfn;
+ __entry->access = access;
+ ),
+
+ TP_printk("addr:%llx gfn %llx access %x", __entry->addr, __entry->gfn,
+ __entry->access)
+);
+
TRACE_EVENT(
kvm_mmu_audit,
TP_PROTO(struct kvm_vcpu *vcpu, int audit_point),
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 9d03ad4dd5ec..507e2b844cfa 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -101,11 +101,15 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
return (ret != orig_pte);
}
-static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte)
+static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte,
+ bool last)
{
unsigned access;
access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
+ if (last && !is_dirty_gpte(gpte))
+ access &= ~ACC_WRITE_MASK;
+
#if PTTYPE == 64
if (vcpu->arch.mmu.nx)
access &= ~(gpte >> PT64_NX_SHIFT);
@@ -113,6 +117,24 @@ static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte)
return access;
}
+static bool FNAME(is_last_gpte)(struct guest_walker *walker,
+ struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
+ pt_element_t gpte)
+{
+ if (walker->level == PT_PAGE_TABLE_LEVEL)
+ return true;
+
+ if ((walker->level == PT_DIRECTORY_LEVEL) && is_large_pte(gpte) &&
+ (PTTYPE == 64 || is_pse(vcpu)))
+ return true;
+
+ if ((walker->level == PT_PDPE_LEVEL) && is_large_pte(gpte) &&
+ (mmu->root_level == PT64_ROOT_LEVEL))
+ return true;
+
+ return false;
+}
+
/*
* Fetch a guest pte for a guest virtual address
*/
@@ -125,18 +147,17 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
gfn_t table_gfn;
unsigned index, pt_access, uninitialized_var(pte_access);
gpa_t pte_gpa;
- bool eperm, present, rsvd_fault;
- int offset, write_fault, user_fault, fetch_fault;
-
- write_fault = access & PFERR_WRITE_MASK;
- user_fault = access & PFERR_USER_MASK;
- fetch_fault = access & PFERR_FETCH_MASK;
+ bool eperm;
+ int offset;
+ const int write_fault = access & PFERR_WRITE_MASK;
+ const int user_fault = access & PFERR_USER_MASK;
+ const int fetch_fault = access & PFERR_FETCH_MASK;
+ u16 errcode = 0;
trace_kvm_mmu_pagetable_walk(addr, write_fault, user_fault,
fetch_fault);
-walk:
- present = true;
- eperm = rsvd_fault = false;
+retry_walk:
+ eperm = false;
walker->level = mmu->root_level;
pte = mmu->get_cr3(vcpu);
@@ -144,10 +165,8 @@ walk:
if (walker->level == PT32E_ROOT_LEVEL) {
pte = kvm_pdptr_read_mmu(vcpu, mmu, (addr >> 30) & 3);
trace_kvm_mmu_paging_element(pte, walker->level);
- if (!is_present_gpte(pte)) {
- present = false;
+ if (!is_present_gpte(pte))
goto error;
- }
--walker->level;
}
#endif
@@ -170,42 +189,31 @@ walk:
real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn),
PFERR_USER_MASK|PFERR_WRITE_MASK);
- if (unlikely(real_gfn == UNMAPPED_GVA)) {
- present = false;
- break;
- }
+ if (unlikely(real_gfn == UNMAPPED_GVA))
+ goto error;
real_gfn = gpa_to_gfn(real_gfn);
host_addr = gfn_to_hva(vcpu->kvm, real_gfn);
- if (unlikely(kvm_is_error_hva(host_addr))) {
- present = false;
- break;
- }
+ if (unlikely(kvm_is_error_hva(host_addr)))
+ goto error;
ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
- if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte)))) {
- present = false;
- break;
- }
+ if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
+ goto error;
trace_kvm_mmu_paging_element(pte, walker->level);
- if (unlikely(!is_present_gpte(pte))) {
- present = false;
- break;
- }
+ if (unlikely(!is_present_gpte(pte)))
+ goto error;
if (unlikely(is_rsvd_bits_set(&vcpu->arch.mmu, pte,
walker->level))) {
- rsvd_fault = true;
- break;
+ errcode |= PFERR_RSVD_MASK | PFERR_PRESENT_MASK;
+ goto error;
}
- if (unlikely(write_fault && !is_writable_pte(pte)
- && (user_fault || is_write_protection(vcpu))))
- eperm = true;
-
- if (unlikely(user_fault && !(pte & PT_USER_MASK)))
+ if (!check_write_user_access(vcpu, write_fault, user_fault,
+ pte))
eperm = true;
#if PTTYPE == 64
@@ -213,39 +221,35 @@ walk:
eperm = true;
#endif
- if (!eperm && !rsvd_fault
- && unlikely(!(pte & PT_ACCESSED_MASK))) {
+ if (!eperm && unlikely(!(pte & PT_ACCESSED_MASK))) {
int ret;
trace_kvm_mmu_set_accessed_bit(table_gfn, index,
sizeof(pte));
ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index,
pte, pte|PT_ACCESSED_MASK);
- if (unlikely(ret < 0)) {
- present = false;
- break;
- } else if (ret)
- goto walk;
+ if (unlikely(ret < 0))
+ goto error;
+ else if (ret)
+ goto retry_walk;
mark_page_dirty(vcpu->kvm, table_gfn);
pte |= PT_ACCESSED_MASK;
}
- pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
-
walker->ptes[walker->level - 1] = pte;
- if ((walker->level == PT_PAGE_TABLE_LEVEL) ||
- ((walker->level == PT_DIRECTORY_LEVEL) &&
- is_large_pte(pte) &&
- (PTTYPE == 64 || is_pse(vcpu))) ||
- ((walker->level == PT_PDPE_LEVEL) &&
- is_large_pte(pte) &&
- mmu->root_level == PT64_ROOT_LEVEL)) {
+ if (FNAME(is_last_gpte)(walker, vcpu, mmu, pte)) {
int lvl = walker->level;
gpa_t real_gpa;
gfn_t gfn;
u32 ac;
+ /* check if the kernel is fetching from user page */
+ if (unlikely(pte_access & PT_USER_MASK) &&
+ kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))
+ if (fetch_fault && !user_fault)
+ eperm = true;
+
gfn = gpte_to_gfn_lvl(pte, lvl);
gfn += (addr & PT_LVL_OFFSET_MASK(lvl)) >> PAGE_SHIFT;
@@ -266,12 +270,14 @@ walk:
break;
}
- pt_access = pte_access;
+ pt_access &= FNAME(gpte_access)(vcpu, pte, false);
--walker->level;
}
- if (unlikely(!present || eperm || rsvd_fault))
+ if (unlikely(eperm)) {
+ errcode |= PFERR_PRESENT_MASK;
goto error;
+ }
if (write_fault && unlikely(!is_dirty_gpte(pte))) {
int ret;
@@ -279,17 +285,17 @@ walk:
trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index,
pte, pte|PT_DIRTY_MASK);
- if (unlikely(ret < 0)) {
- present = false;
+ if (unlikely(ret < 0))
goto error;
- } else if (ret)
- goto walk;
+ else if (ret)
+ goto retry_walk;
mark_page_dirty(vcpu->kvm, table_gfn);
pte |= PT_DIRTY_MASK;
walker->ptes[walker->level - 1] = pte;
}
+ pte_access = pt_access & FNAME(gpte_access)(vcpu, pte, true);
walker->pt_access = pt_access;
walker->pte_access = pte_access;
pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
@@ -297,19 +303,14 @@ walk:
return 1;
error:
+ errcode |= write_fault | user_fault;
+ if (fetch_fault && (mmu->nx ||
+ kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)))
+ errcode |= PFERR_FETCH_MASK;
+
walker->fault.vector = PF_VECTOR;
walker->fault.error_code_valid = true;
- walker->fault.error_code = 0;
- if (present)
- walker->fault.error_code |= PFERR_PRESENT_MASK;
-
- walker->fault.error_code |= write_fault | user_fault;
-
- if (fetch_fault && mmu->nx)
- walker->fault.error_code |= PFERR_FETCH_MASK;
- if (rsvd_fault)
- walker->fault.error_code |= PFERR_RSVD_MASK;
-
+ walker->fault.error_code = errcode;
walker->fault.address = addr;
walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu;
@@ -336,16 +337,11 @@ static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp, u64 *spte,
pt_element_t gpte)
{
- u64 nonpresent = shadow_trap_nonpresent_pte;
-
if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
goto no_present;
- if (!is_present_gpte(gpte)) {
- if (!sp->unsync)
- nonpresent = shadow_notrap_nonpresent_pte;
+ if (!is_present_gpte(gpte))
goto no_present;
- }
if (!(gpte & PT_ACCESSED_MASK))
goto no_present;
@@ -353,7 +349,7 @@ static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
return false;
no_present:
- drop_spte(vcpu->kvm, spte, nonpresent);
+ drop_spte(vcpu->kvm, spte);
return true;
}
@@ -369,9 +365,9 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
return;
pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
- pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
+ pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte, true);
pfn = gfn_to_pfn_atomic(vcpu->kvm, gpte_to_gfn(gpte));
- if (is_error_pfn(pfn)) {
+ if (mmu_invalid_pfn(pfn)) {
kvm_release_pfn_clean(pfn);
return;
}
@@ -381,7 +377,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
* vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1).
*/
mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
- is_dirty_gpte(gpte), NULL, PT_PAGE_TABLE_LEVEL,
+ NULL, PT_PAGE_TABLE_LEVEL,
gpte_to_gfn(gpte), pfn, true, true);
}
@@ -432,12 +428,11 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
unsigned pte_access;
gfn_t gfn;
pfn_t pfn;
- bool dirty;
if (spte == sptep)
continue;
- if (*spte != shadow_trap_nonpresent_pte)
+ if (is_shadow_present_pte(*spte))
continue;
gpte = gptep[i];
@@ -445,18 +440,18 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
continue;
- pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
+ pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte,
+ true);
gfn = gpte_to_gfn(gpte);
- dirty = is_dirty_gpte(gpte);
pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
- (pte_access & ACC_WRITE_MASK) && dirty);
- if (is_error_pfn(pfn)) {
+ pte_access & ACC_WRITE_MASK);
+ if (mmu_invalid_pfn(pfn)) {
kvm_release_pfn_clean(pfn);
break;
}
mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
- dirty, NULL, PT_PAGE_TABLE_LEVEL, gfn,
+ NULL, PT_PAGE_TABLE_LEVEL, gfn,
pfn, true, true);
}
}
@@ -467,12 +462,11 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
struct guest_walker *gw,
int user_fault, int write_fault, int hlevel,
- int *ptwrite, pfn_t pfn, bool map_writable,
+ int *emulate, pfn_t pfn, bool map_writable,
bool prefault)
{
unsigned access = gw->pt_access;
struct kvm_mmu_page *sp = NULL;
- bool dirty = is_dirty_gpte(gw->ptes[gw->level - 1]);
int top_level;
unsigned direct_access;
struct kvm_shadow_walk_iterator it;
@@ -480,9 +474,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
if (!is_present_gpte(gw->ptes[gw->level - 1]))
return NULL;
- direct_access = gw->pt_access & gw->pte_access;
- if (!dirty)
- direct_access &= ~ACC_WRITE_MASK;
+ direct_access = gw->pte_access;
top_level = vcpu->arch.mmu.root_level;
if (top_level == PT32E_ROOT_LEVEL)
@@ -540,8 +532,8 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
link_shadow_page(it.sptep, sp);
}
- mmu_set_spte(vcpu, it.sptep, access, gw->pte_access & access,
- user_fault, write_fault, dirty, ptwrite, it.level,
+ mmu_set_spte(vcpu, it.sptep, access, gw->pte_access,
+ user_fault, write_fault, emulate, it.level,
gw->gfn, pfn, prefault, map_writable);
FNAME(pte_prefetch)(vcpu, gw, it.sptep);
@@ -575,7 +567,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
int user_fault = error_code & PFERR_USER_MASK;
struct guest_walker walker;
u64 *sptep;
- int write_pt = 0;
+ int emulate = 0;
int r;
pfn_t pfn;
int level = PT_PAGE_TABLE_LEVEL;
@@ -585,6 +577,10 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
+ if (unlikely(error_code & PFERR_RSVD_MASK))
+ return handle_mmio_page_fault(vcpu, addr, error_code,
+ mmu_is_nested(vcpu));
+
r = mmu_topup_memory_caches(vcpu);
if (r)
return r;
@@ -623,9 +619,9 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
&map_writable))
return 0;
- /* mmio */
- if (is_error_pfn(pfn))
- return kvm_handle_bad_page(vcpu->kvm, walker.gfn, pfn);
+ if (handle_abnormal_pfn(vcpu, mmu_is_nested(vcpu) ? 0 : addr,
+ walker.gfn, pfn, walker.pte_access, &r))
+ return r;
spin_lock(&vcpu->kvm->mmu_lock);
if (mmu_notifier_retry(vcpu, mmu_seq))
@@ -636,19 +632,19 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
if (!force_pt_level)
transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level);
sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
- level, &write_pt, pfn, map_writable, prefault);
+ level, &emulate, pfn, map_writable, prefault);
(void)sptep;
- pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
- sptep, *sptep, write_pt);
+ pgprintk("%s: shadow pte %p %llx emulate %d\n", __func__,
+ sptep, *sptep, emulate);
- if (!write_pt)
+ if (!emulate)
vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
++vcpu->stat.pf_fixed;
trace_kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
spin_unlock(&vcpu->kvm->mmu_lock);
- return write_pt;
+ return emulate;
out_unlock:
spin_unlock(&vcpu->kvm->mmu_lock);
@@ -665,6 +661,8 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
u64 *sptep;
int need_flush = 0;
+ vcpu_clear_mmio_info(vcpu, gva);
+
spin_lock(&vcpu->kvm->mmu_lock);
for_each_shadow_entry(vcpu, gva, iterator) {
@@ -688,11 +686,11 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
if (is_shadow_present_pte(*sptep)) {
if (is_large_pte(*sptep))
--vcpu->kvm->stat.lpages;
- drop_spte(vcpu->kvm, sptep,
- shadow_trap_nonpresent_pte);
+ drop_spte(vcpu->kvm, sptep);
need_flush = 1;
- } else
- __set_spte(sptep, shadow_trap_nonpresent_pte);
+ } else if (is_mmio_spte(*sptep))
+ mmu_spte_clear_no_track(sptep);
+
break;
}
@@ -752,36 +750,6 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
return gpa;
}
-static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
- struct kvm_mmu_page *sp)
-{
- int i, j, offset, r;
- pt_element_t pt[256 / sizeof(pt_element_t)];
- gpa_t pte_gpa;
-
- if (sp->role.direct
- || (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) {
- nonpaging_prefetch_page(vcpu, sp);
- return;
- }
-
- pte_gpa = gfn_to_gpa(sp->gfn);
- if (PTTYPE == 32) {
- offset = sp->role.quadrant << PT64_LEVEL_BITS;
- pte_gpa += offset * sizeof(pt_element_t);
- }
-
- for (i = 0; i < PT64_ENT_PER_PAGE; i += ARRAY_SIZE(pt)) {
- r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, pt, sizeof pt);
- pte_gpa += ARRAY_SIZE(pt) * sizeof(pt_element_t);
- for (j = 0; j < ARRAY_SIZE(pt); ++j)
- if (r || is_present_gpte(pt[j]))
- sp->spt[i+j] = shadow_trap_nonpresent_pte;
- else
- sp->spt[i+j] = shadow_notrap_nonpresent_pte;
- }
-}
-
/*
* Using the cached information from sp->gfns is safe because:
* - The spte has a reference to the struct page, so the pfn for a given gfn
@@ -817,7 +785,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
gpa_t pte_gpa;
gfn_t gfn;
- if (!is_shadow_present_pte(sp->spt[i]))
+ if (!sp->spt[i])
continue;
pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
@@ -826,26 +794,30 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
sizeof(pt_element_t)))
return -EINVAL;
- gfn = gpte_to_gfn(gpte);
-
if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
vcpu->kvm->tlbs_dirty++;
continue;
}
+ gfn = gpte_to_gfn(gpte);
+ pte_access = sp->role.access;
+ pte_access &= FNAME(gpte_access)(vcpu, gpte, true);
+
+ if (sync_mmio_spte(&sp->spt[i], gfn, pte_access, &nr_present))
+ continue;
+
if (gfn != sp->gfns[i]) {
- drop_spte(vcpu->kvm, &sp->spt[i],
- shadow_trap_nonpresent_pte);
+ drop_spte(vcpu->kvm, &sp->spt[i]);
vcpu->kvm->tlbs_dirty++;
continue;
}
nr_present++;
- pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
+
host_writable = sp->spt[i] & SPTE_HOST_WRITEABLE;
set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
- is_dirty_gpte(gpte), PT_PAGE_TABLE_LEVEL, gfn,
+ PT_PAGE_TABLE_LEVEL, gfn,
spte_to_pfn(sp->spt[i]), true, false,
host_writable);
}
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 506e4fe23adc..475d1c948501 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1496,11 +1496,14 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
update_cr0_intercept(svm);
}
-static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+static int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE;
unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
+ if (cr4 & X86_CR4_VMXE)
+ return 1;
+
if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
svm_flush_tlb(vcpu);
@@ -1510,6 +1513,7 @@ static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
cr4 |= host_cr4_mce;
to_svm(vcpu)->vmcb->save.cr4 = cr4;
mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
+ return 0;
}
static void svm_set_segment(struct kvm_vcpu *vcpu,
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index db932760ea82..3ff898c104f7 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -675,12 +675,12 @@ TRACE_EVENT(kvm_emulate_insn,
),
TP_fast_assign(
- __entry->rip = vcpu->arch.emulate_ctxt.decode.fetch.start;
+ __entry->rip = vcpu->arch.emulate_ctxt.fetch.start;
__entry->csbase = kvm_x86_ops->get_segment_base(vcpu, VCPU_SREG_CS);
- __entry->len = vcpu->arch.emulate_ctxt.decode.eip
- - vcpu->arch.emulate_ctxt.decode.fetch.start;
+ __entry->len = vcpu->arch.emulate_ctxt._eip
+ - vcpu->arch.emulate_ctxt.fetch.start;
memcpy(__entry->insn,
- vcpu->arch.emulate_ctxt.decode.fetch.data,
+ vcpu->arch.emulate_ctxt.fetch.data,
15);
__entry->flags = kei_decode_mode(vcpu->arch.emulate_ctxt.mode);
__entry->failed = failed;
@@ -698,6 +698,29 @@ TRACE_EVENT(kvm_emulate_insn,
#define trace_kvm_emulate_insn_start(vcpu) trace_kvm_emulate_insn(vcpu, 0)
#define trace_kvm_emulate_insn_failed(vcpu) trace_kvm_emulate_insn(vcpu, 1)
+TRACE_EVENT(
+ vcpu_match_mmio,
+ TP_PROTO(gva_t gva, gpa_t gpa, bool write, bool gpa_match),
+ TP_ARGS(gva, gpa, write, gpa_match),
+
+ TP_STRUCT__entry(
+ __field(gva_t, gva)
+ __field(gpa_t, gpa)
+ __field(bool, write)
+ __field(bool, gpa_match)
+ ),
+
+ TP_fast_assign(
+ __entry->gva = gva;
+ __entry->gpa = gpa;
+ __entry->write = write;
+ __entry->gpa_match = gpa_match
+ ),
+
+ TP_printk("gva %#lx gpa %#llx %s %s", __entry->gva, __entry->gpa,
+ __entry->write ? "Write" : "Read",
+ __entry->gpa_match ? "GPA" : "GVA")
+);
#endif /* _TRACE_KVM_H */
#undef TRACE_INCLUDE_PATH
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index d48ec60ea421..e65a158dee64 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -43,13 +43,12 @@
#include "trace.h"
#define __ex(x) __kvm_handle_fault_on_reboot(x)
+#define __ex_clear(x, reg) \
+ ____kvm_handle_fault_on_reboot(x, "xor " reg " , " reg)
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
-static int __read_mostly bypass_guest_pf = 1;
-module_param(bypass_guest_pf, bool, S_IRUGO);
-
static int __read_mostly enable_vpid = 1;
module_param_named(vpid, enable_vpid, bool, 0444);
@@ -72,6 +71,14 @@ module_param(vmm_exclusive, bool, S_IRUGO);
static int __read_mostly yield_on_hlt = 1;
module_param(yield_on_hlt, bool, S_IRUGO);
+/*
+ * If nested=1, nested virtualization is supported, i.e., guests may use
+ * VMX and be a hypervisor for its own guests. If nested=0, guests may not
+ * use VMX instructions.
+ */
+static int __read_mostly nested = 0;
+module_param(nested, bool, S_IRUGO);
+
#define KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST \
(X86_CR0_WP | X86_CR0_NE | X86_CR0_NW | X86_CR0_CD)
#define KVM_GUEST_CR0_MASK \
@@ -109,6 +116,7 @@ static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
module_param(ple_window, int, S_IRUGO);
#define NR_AUTOLOAD_MSRS 1
+#define VMCS02_POOL_SIZE 1
struct vmcs {
u32 revision_id;
@@ -116,17 +124,237 @@ struct vmcs {
char data[0];
};
+/*
+ * Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also
+ * remember whether it was VMLAUNCHed, and maintain a linked list of all VMCSs
+ * loaded on this CPU (so we can clear them if the CPU goes down).
+ */
+struct loaded_vmcs {
+ struct vmcs *vmcs;
+ int cpu;
+ int launched;
+ struct list_head loaded_vmcss_on_cpu_link;
+};
+
struct shared_msr_entry {
unsigned index;
u64 data;
u64 mask;
};
+/*
+ * struct vmcs12 describes the state that our guest hypervisor (L1) keeps for a
+ * single nested guest (L2), hence the name vmcs12. Any VMX implementation has
+ * a VMCS structure, and vmcs12 is our emulated VMX's VMCS. This structure is
+ * stored in guest memory specified by VMPTRLD, but is opaque to the guest,
+ * which must access it using VMREAD/VMWRITE/VMCLEAR instructions.
+ * More than one of these structures may exist, if L1 runs multiple L2 guests.
+ * nested_vmx_run() will use the data here to build a vmcs02: a VMCS for the
+ * underlying hardware which will be used to run L2.
+ * This structure is packed to ensure that its layout is identical across
+ * machines (necessary for live migration).
+ * If there are changes in this struct, VMCS12_REVISION must be changed.
+ */
+typedef u64 natural_width;
+struct __packed vmcs12 {
+ /* According to the Intel spec, a VMCS region must start with the
+ * following two fields. Then follow implementation-specific data.
+ */
+ u32 revision_id;
+ u32 abort;
+
+ u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */
+ u32 padding[7]; /* room for future expansion */
+
+ u64 io_bitmap_a;
+ u64 io_bitmap_b;
+ u64 msr_bitmap;
+ u64 vm_exit_msr_store_addr;
+ u64 vm_exit_msr_load_addr;
+ u64 vm_entry_msr_load_addr;
+ u64 tsc_offset;
+ u64 virtual_apic_page_addr;
+ u64 apic_access_addr;
+ u64 ept_pointer;
+ u64 guest_physical_address;
+ u64 vmcs_link_pointer;
+ u64 guest_ia32_debugctl;
+ u64 guest_ia32_pat;
+ u64 guest_ia32_efer;
+ u64 guest_ia32_perf_global_ctrl;
+ u64 guest_pdptr0;
+ u64 guest_pdptr1;
+ u64 guest_pdptr2;
+ u64 guest_pdptr3;
+ u64 host_ia32_pat;
+ u64 host_ia32_efer;
+ u64 host_ia32_perf_global_ctrl;
+ u64 padding64[8]; /* room for future expansion */
+ /*
+ * To allow migration of L1 (complete with its L2 guests) between
+ * machines of different natural widths (32 or 64 bit), we cannot have
+ * unsigned long fields with no explict size. We use u64 (aliased
+ * natural_width) instead. Luckily, x86 is little-endian.
+ */
+ natural_width cr0_guest_host_mask;
+ natural_width cr4_guest_host_mask;
+ natural_width cr0_read_shadow;
+ natural_width cr4_read_shadow;
+ natural_width cr3_target_value0;
+ natural_width cr3_target_value1;
+ natural_width cr3_target_value2;
+ natural_width cr3_target_value3;
+ natural_width exit_qualification;
+ natural_width guest_linear_address;
+ natural_width guest_cr0;
+ natural_width guest_cr3;
+ natural_width guest_cr4;
+ natural_width guest_es_base;
+ natural_width guest_cs_base;
+ natural_width guest_ss_base;
+ natural_width guest_ds_base;
+ natural_width guest_fs_base;
+ natural_width guest_gs_base;
+ natural_width guest_ldtr_base;
+ natural_width guest_tr_base;
+ natural_width guest_gdtr_base;
+ natural_width guest_idtr_base;
+ natural_width guest_dr7;
+ natural_width guest_rsp;
+ natural_width guest_rip;
+ natural_width guest_rflags;
+ natural_width guest_pending_dbg_exceptions;
+ natural_width guest_sysenter_esp;
+ natural_width guest_sysenter_eip;
+ natural_width host_cr0;
+ natural_width host_cr3;
+ natural_width host_cr4;
+ natural_width host_fs_base;
+ natural_width host_gs_base;
+ natural_width host_tr_base;
+ natural_width host_gdtr_base;
+ natural_width host_idtr_base;
+ natural_width host_ia32_sysenter_esp;
+ natural_width host_ia32_sysenter_eip;
+ natural_width host_rsp;
+ natural_width host_rip;
+ natural_width paddingl[8]; /* room for future expansion */
+ u32 pin_based_vm_exec_control;
+ u32 cpu_based_vm_exec_control;
+ u32 exception_bitmap;
+ u32 page_fault_error_code_mask;
+ u32 page_fault_error_code_match;
+ u32 cr3_target_count;
+ u32 vm_exit_controls;
+ u32 vm_exit_msr_store_count;
+ u32 vm_exit_msr_load_count;
+ u32 vm_entry_controls;
+ u32 vm_entry_msr_load_count;
+ u32 vm_entry_intr_info_field;
+ u32 vm_entry_exception_error_code;
+ u32 vm_entry_instruction_len;
+ u32 tpr_threshold;
+ u32 secondary_vm_exec_control;
+ u32 vm_instruction_error;
+ u32 vm_exit_reason;
+ u32 vm_exit_intr_info;
+ u32 vm_exit_intr_error_code;
+ u32 idt_vectoring_info_field;
+ u32 idt_vectoring_error_code;
+ u32 vm_exit_instruction_len;
+ u32 vmx_instruction_info;
+ u32 guest_es_limit;
+ u32 guest_cs_limit;
+ u32 guest_ss_limit;
+ u32 guest_ds_limit;
+ u32 guest_fs_limit;
+ u32 guest_gs_limit;
+ u32 guest_ldtr_limit;
+ u32 guest_tr_limit;
+ u32 guest_gdtr_limit;
+ u32 guest_idtr_limit;
+ u32 guest_es_ar_bytes;
+ u32 guest_cs_ar_bytes;
+ u32 guest_ss_ar_bytes;
+ u32 guest_ds_ar_bytes;
+ u32 guest_fs_ar_bytes;
+ u32 guest_gs_ar_bytes;
+ u32 guest_ldtr_ar_bytes;
+ u32 guest_tr_ar_bytes;
+ u32 guest_interruptibility_info;
+ u32 guest_activity_state;
+ u32 guest_sysenter_cs;
+ u32 host_ia32_sysenter_cs;
+ u32 padding32[8]; /* room for future expansion */
+ u16 virtual_processor_id;
+ u16 guest_es_selector;
+ u16 guest_cs_selector;
+ u16 guest_ss_selector;
+ u16 guest_ds_selector;
+ u16 guest_fs_selector;
+ u16 guest_gs_selector;
+ u16 guest_ldtr_selector;
+ u16 guest_tr_selector;
+ u16 host_es_selector;
+ u16 host_cs_selector;
+ u16 host_ss_selector;
+ u16 host_ds_selector;
+ u16 host_fs_selector;
+ u16 host_gs_selector;
+ u16 host_tr_selector;
+};
+
+/*
+ * VMCS12_REVISION is an arbitrary id that should be changed if the content or
+ * layout of struct vmcs12 is changed. MSR_IA32_VMX_BASIC returns this id, and
+ * VMPTRLD verifies that the VMCS region that L1 is loading contains this id.
+ */
+#define VMCS12_REVISION 0x11e57ed0
+
+/*
+ * VMCS12_SIZE is the number of bytes L1 should allocate for the VMXON region
+ * and any VMCS region. Although only sizeof(struct vmcs12) are used by the
+ * current implementation, 4K are reserved to avoid future complications.
+ */
+#define VMCS12_SIZE 0x1000
+
+/* Used to remember the last vmcs02 used for some recently used vmcs12s */
+struct vmcs02_list {
+ struct list_head list;
+ gpa_t vmptr;
+ struct loaded_vmcs vmcs02;
+};
+
+/*
+ * The nested_vmx structure is part of vcpu_vmx, and holds information we need
+ * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
+ */
+struct nested_vmx {
+ /* Has the level1 guest done vmxon? */
+ bool vmxon;
+
+ /* The guest-physical address of the current VMCS L1 keeps for L2 */
+ gpa_t current_vmptr;
+ /* The host-usable pointer to the above */
+ struct page *current_vmcs12_page;
+ struct vmcs12 *current_vmcs12;
+
+ /* vmcs02_list cache of VMCSs recently used to run L2 guests */
+ struct list_head vmcs02_pool;
+ int vmcs02_num;
+ u64 vmcs01_tsc_offset;
+ /* L2 must run next, and mustn't decide to exit to L1. */
+ bool nested_run_pending;
+ /*
+ * Guest pages referred to in vmcs02 with host-physical pointers, so
+ * we must keep them pinned while L2 runs.
+ */
+ struct page *apic_access_page;
+};
+
struct vcpu_vmx {
struct kvm_vcpu vcpu;
- struct list_head local_vcpus_link;
unsigned long host_rsp;
- int launched;
u8 fail;
u8 cpl;
bool nmi_known_unmasked;
@@ -140,7 +368,14 @@ struct vcpu_vmx {
u64 msr_host_kernel_gs_base;
u64 msr_guest_kernel_gs_base;
#endif
- struct vmcs *vmcs;
+ /*
+ * loaded_vmcs points to the VMCS currently used in this vcpu. For a
+ * non-nested (L1) guest, it always points to vmcs01. For a nested
+ * guest (L2), it points to a different VMCS.
+ */
+ struct loaded_vmcs vmcs01;
+ struct loaded_vmcs *loaded_vmcs;
+ bool __launched; /* temporary, used in vmx_vcpu_run */
struct msr_autoload {
unsigned nr;
struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS];
@@ -176,6 +411,9 @@ struct vcpu_vmx {
u32 exit_reason;
bool rdtscp_enabled;
+
+ /* Support for a guest hypervisor (nested VMX) */
+ struct nested_vmx nested;
};
enum segment_cache_field {
@@ -192,6 +430,174 @@ static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
return container_of(vcpu, struct vcpu_vmx, vcpu);
}
+#define VMCS12_OFFSET(x) offsetof(struct vmcs12, x)
+#define FIELD(number, name) [number] = VMCS12_OFFSET(name)
+#define FIELD64(number, name) [number] = VMCS12_OFFSET(name), \
+ [number##_HIGH] = VMCS12_OFFSET(name)+4
+
+static unsigned short vmcs_field_to_offset_table[] = {
+ FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id),
+ FIELD(GUEST_ES_SELECTOR, guest_es_selector),
+ FIELD(GUEST_CS_SELECTOR, guest_cs_selector),
+ FIELD(GUEST_SS_SELECTOR, guest_ss_selector),
+ FIELD(GUEST_DS_SELECTOR, guest_ds_selector),
+ FIELD(GUEST_FS_SELECTOR, guest_fs_selector),
+ FIELD(GUEST_GS_SELECTOR, guest_gs_selector),
+ FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector),
+ FIELD(GUEST_TR_SELECTOR, guest_tr_selector),
+ FIELD(HOST_ES_SELECTOR, host_es_selector),
+ FIELD(HOST_CS_SELECTOR, host_cs_selector),
+ FIELD(HOST_SS_SELECTOR, host_ss_selector),
+ FIELD(HOST_DS_SELECTOR, host_ds_selector),
+ FIELD(HOST_FS_SELECTOR, host_fs_selector),
+ FIELD(HOST_GS_SELECTOR, host_gs_selector),
+ FIELD(HOST_TR_SELECTOR, host_tr_selector),
+ FIELD64(IO_BITMAP_A, io_bitmap_a),
+ FIELD64(IO_BITMAP_B, io_bitmap_b),
+ FIELD64(MSR_BITMAP, msr_bitmap),
+ FIELD64(VM_EXIT_MSR_STORE_ADDR, vm_exit_msr_store_addr),
+ FIELD64(VM_EXIT_MSR_LOAD_ADDR, vm_exit_msr_load_addr),
+ FIELD64(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr),
+ FIELD64(TSC_OFFSET, tsc_offset),
+ FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr),
+ FIELD64(APIC_ACCESS_ADDR, apic_access_addr),
+ FIELD64(EPT_POINTER, ept_pointer),
+ FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address),
+ FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer),
+ FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl),
+ FIELD64(GUEST_IA32_PAT, guest_ia32_pat),
+ FIELD64(GUEST_IA32_EFER, guest_ia32_efer),
+ FIELD64(GUEST_IA32_PERF_GLOBAL_CTRL, guest_ia32_perf_global_ctrl),
+ FIELD64(GUEST_PDPTR0, guest_pdptr0),
+ FIELD64(GUEST_PDPTR1, guest_pdptr1),
+ FIELD64(GUEST_PDPTR2, guest_pdptr2),
+ FIELD64(GUEST_PDPTR3, guest_pdptr3),
+ FIELD64(HOST_IA32_PAT, host_ia32_pat),
+ FIELD64(HOST_IA32_EFER, host_ia32_efer),
+ FIELD64(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl),
+ FIELD(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control),
+ FIELD(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control),
+ FIELD(EXCEPTION_BITMAP, exception_bitmap),
+ FIELD(PAGE_FAULT_ERROR_CODE_MASK, page_fault_error_code_mask),
+ FIELD(PAGE_FAULT_ERROR_CODE_MATCH, page_fault_error_code_match),
+ FIELD(CR3_TARGET_COUNT, cr3_target_count),
+ FIELD(VM_EXIT_CONTROLS, vm_exit_controls),
+ FIELD(VM_EXIT_MSR_STORE_COUNT, vm_exit_msr_store_count),
+ FIELD(VM_EXIT_MSR_LOAD_COUNT, vm_exit_msr_load_count),
+ FIELD(VM_ENTRY_CONTROLS, vm_entry_controls),
+ FIELD(VM_ENTRY_MSR_LOAD_COUNT, vm_entry_msr_load_count),
+ FIELD(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field),
+ FIELD(VM_ENTRY_EXCEPTION_ERROR_CODE, vm_entry_exception_error_code),
+ FIELD(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len),
+ FIELD(TPR_THRESHOLD, tpr_threshold),
+ FIELD(SECONDARY_VM_EXEC_CONTROL, secondary_vm_exec_control),
+ FIELD(VM_INSTRUCTION_ERROR, vm_instruction_error),
+ FIELD(VM_EXIT_REASON, vm_exit_reason),
+ FIELD(VM_EXIT_INTR_INFO, vm_exit_intr_info),
+ FIELD(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code),
+ FIELD(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field),
+ FIELD(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code),
+ FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len),
+ FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info),
+ FIELD(GUEST_ES_LIMIT, guest_es_limit),
+ FIELD(GUEST_CS_LIMIT, guest_cs_limit),
+ FIELD(GUEST_SS_LIMIT, guest_ss_limit),
+ FIELD(GUEST_DS_LIMIT, guest_ds_limit),
+ FIELD(GUEST_FS_LIMIT, guest_fs_limit),
+ FIELD(GUEST_GS_LIMIT, guest_gs_limit),
+ FIELD(GUEST_LDTR_LIMIT, guest_ldtr_limit),
+ FIELD(GUEST_TR_LIMIT, guest_tr_limit),
+ FIELD(GUEST_GDTR_LIMIT, guest_gdtr_limit),
+ FIELD(GUEST_IDTR_LIMIT, guest_idtr_limit),
+ FIELD(GUEST_ES_AR_BYTES, guest_es_ar_bytes),
+ FIELD(GUEST_CS_AR_BYTES, guest_cs_ar_bytes),
+ FIELD(GUEST_SS_AR_BYTES, guest_ss_ar_bytes),
+ FIELD(GUEST_DS_AR_BYTES, guest_ds_ar_bytes),
+ FIELD(GUEST_FS_AR_BYTES, guest_fs_ar_bytes),
+ FIELD(GUEST_GS_AR_BYTES, guest_gs_ar_bytes),
+ FIELD(GUEST_LDTR_AR_BYTES, guest_ldtr_ar_bytes),
+ FIELD(GUEST_TR_AR_BYTES, guest_tr_ar_bytes),
+ FIELD(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info),
+ FIELD(GUEST_ACTIVITY_STATE, guest_activity_state),
+ FIELD(GUEST_SYSENTER_CS, guest_sysenter_cs),
+ FIELD(HOST_IA32_SYSENTER_CS, host_ia32_sysenter_cs),
+ FIELD(CR0_GUEST_HOST_MASK, cr0_guest_host_mask),
+ FIELD(CR4_GUEST_HOST_MASK, cr4_guest_host_mask),
+ FIELD(CR0_READ_SHADOW, cr0_read_shadow),
+ FIELD(CR4_READ_SHADOW, cr4_read_shadow),
+ FIELD(CR3_TARGET_VALUE0, cr3_target_value0),
+ FIELD(CR3_TARGET_VALUE1, cr3_target_value1),
+ FIELD(CR3_TARGET_VALUE2, cr3_target_value2),
+ FIELD(CR3_TARGET_VALUE3, cr3_target_value3),
+ FIELD(EXIT_QUALIFICATION, exit_qualification),
+ FIELD(GUEST_LINEAR_ADDRESS, guest_linear_address),
+ FIELD(GUEST_CR0, guest_cr0),
+ FIELD(GUEST_CR3, guest_cr3),
+ FIELD(GUEST_CR4, guest_cr4),
+ FIELD(GUEST_ES_BASE, guest_es_base),
+ FIELD(GUEST_CS_BASE, guest_cs_base),
+ FIELD(GUEST_SS_BASE, guest_ss_base),
+ FIELD(GUEST_DS_BASE, guest_ds_base),
+ FIELD(GUEST_FS_BASE, guest_fs_base),
+ FIELD(GUEST_GS_BASE, guest_gs_base),
+ FIELD(GUEST_LDTR_BASE, guest_ldtr_base),
+ FIELD(GUEST_TR_BASE, guest_tr_base),
+ FIELD(GUEST_GDTR_BASE, guest_gdtr_base),
+ FIELD(GUEST_IDTR_BASE, guest_idtr_base),
+ FIELD(GUEST_DR7, guest_dr7),
+ FIELD(GUEST_RSP, guest_rsp),
+ FIELD(GUEST_RIP, guest_rip),
+ FIELD(GUEST_RFLAGS, guest_rflags),
+ FIELD(GUEST_PENDING_DBG_EXCEPTIONS, guest_pending_dbg_exceptions),
+ FIELD(GUEST_SYSENTER_ESP, guest_sysenter_esp),
+ FIELD(GUEST_SYSENTER_EIP, guest_sysenter_eip),
+ FIELD(HOST_CR0, host_cr0),
+ FIELD(HOST_CR3, host_cr3),
+ FIELD(HOST_CR4, host_cr4),
+ FIELD(HOST_FS_BASE, host_fs_base),
+ FIELD(HOST_GS_BASE, host_gs_base),
+ FIELD(HOST_TR_BASE, host_tr_base),
+ FIELD(HOST_GDTR_BASE, host_gdtr_base),
+ FIELD(HOST_IDTR_BASE, host_idtr_base),
+ FIELD(HOST_IA32_SYSENTER_ESP, host_ia32_sysenter_esp),
+ FIELD(HOST_IA32_SYSENTER_EIP, host_ia32_sysenter_eip),
+ FIELD(HOST_RSP, host_rsp),
+ FIELD(HOST_RIP, host_rip),
+};
+static const int max_vmcs_field = ARRAY_SIZE(vmcs_field_to_offset_table);
+
+static inline short vmcs_field_to_offset(unsigned long field)
+{
+ if (field >= max_vmcs_field || vmcs_field_to_offset_table[field] == 0)
+ return -1;
+ return vmcs_field_to_offset_table[field];
+}
+
+static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
+{
+ return to_vmx(vcpu)->nested.current_vmcs12;
+}
+
+static struct page *nested_get_page(struct kvm_vcpu *vcpu, gpa_t addr)
+{
+ struct page *page = gfn_to_page(vcpu->kvm, addr >> PAGE_SHIFT);
+ if (is_error_page(page)) {
+ kvm_release_page_clean(page);
+ return NULL;
+ }
+ return page;
+}
+
+static void nested_release_page(struct page *page)
+{
+ kvm_release_page_dirty(page);
+}
+
+static void nested_release_page_clean(struct page *page)
+{
+ kvm_release_page_clean(page);
+}
+
static u64 construct_eptp(unsigned long root_hpa);
static void kvm_cpu_vmxon(u64 addr);
static void kvm_cpu_vmxoff(void);
@@ -200,7 +606,11 @@ static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
static DEFINE_PER_CPU(struct vmcs *, vmxarea);
static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
-static DEFINE_PER_CPU(struct list_head, vcpus_on_cpu);
+/*
+ * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
+ * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
+ */
+static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
static DEFINE_PER_CPU(struct desc_ptr, host_gdt);
static unsigned long *vmx_io_bitmap_a;
@@ -442,6 +852,35 @@ static inline bool report_flexpriority(void)
return flexpriority_enabled;
}
+static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
+{
+ return vmcs12->cpu_based_vm_exec_control & bit;
+}
+
+static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
+{
+ return (vmcs12->cpu_based_vm_exec_control &
+ CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
+ (vmcs12->secondary_vm_exec_control & bit);
+}
+
+static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12,
+ struct kvm_vcpu *vcpu)
+{
+ return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
+}
+
+static inline bool is_exception(u32 intr_info)
+{
+ return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
+ == (INTR_TYPE_HARD_EXCEPTION | INTR_INFO_VALID_MASK);
+}
+
+static void nested_vmx_vmexit(struct kvm_vcpu *vcpu);
+static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12,
+ u32 reason, unsigned long qualification);
+
static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
{
int i;
@@ -501,6 +940,13 @@ static void vmcs_clear(struct vmcs *vmcs)
vmcs, phys_addr);
}
+static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
+{
+ vmcs_clear(loaded_vmcs->vmcs);
+ loaded_vmcs->cpu = -1;
+ loaded_vmcs->launched = 0;
+}
+
static void vmcs_load(struct vmcs *vmcs)
{
u64 phys_addr = __pa(vmcs);
@@ -510,29 +956,28 @@ static void vmcs_load(struct vmcs *vmcs)
: "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
: "cc", "memory");
if (error)
- printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n",
+ printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n",
vmcs, phys_addr);
}
-static void __vcpu_clear(void *arg)
+static void __loaded_vmcs_clear(void *arg)
{
- struct vcpu_vmx *vmx = arg;
+ struct loaded_vmcs *loaded_vmcs = arg;
int cpu = raw_smp_processor_id();
- if (vmx->vcpu.cpu == cpu)
- vmcs_clear(vmx->vmcs);
- if (per_cpu(current_vmcs, cpu) == vmx->vmcs)
+ if (loaded_vmcs->cpu != cpu)
+ return; /* vcpu migration can race with cpu offline */
+ if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
per_cpu(current_vmcs, cpu) = NULL;
- list_del(&vmx->local_vcpus_link);
- vmx->vcpu.cpu = -1;
- vmx->launched = 0;
+ list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link);
+ loaded_vmcs_init(loaded_vmcs);
}
-static void vcpu_clear(struct vcpu_vmx *vmx)
+static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
{
- if (vmx->vcpu.cpu == -1)
- return;
- smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 1);
+ if (loaded_vmcs->cpu != -1)
+ smp_call_function_single(
+ loaded_vmcs->cpu, __loaded_vmcs_clear, loaded_vmcs, 1);
}
static inline void vpid_sync_vcpu_single(struct vcpu_vmx *vmx)
@@ -585,26 +1030,26 @@ static inline void ept_sync_individual_addr(u64 eptp, gpa_t gpa)
}
}
-static unsigned long vmcs_readl(unsigned long field)
+static __always_inline unsigned long vmcs_readl(unsigned long field)
{
- unsigned long value = 0;
+ unsigned long value;
- asm volatile (__ex(ASM_VMX_VMREAD_RDX_RAX)
- : "+a"(value) : "d"(field) : "cc");
+ asm volatile (__ex_clear(ASM_VMX_VMREAD_RDX_RAX, "%0")
+ : "=a"(value) : "d"(field) : "cc");
return value;
}
-static u16 vmcs_read16(unsigned long field)
+static __always_inline u16 vmcs_read16(unsigned long field)
{
return vmcs_readl(field);
}
-static u32 vmcs_read32(unsigned long field)
+static __always_inline u32 vmcs_read32(unsigned long field)
{
return vmcs_readl(field);
}
-static u64 vmcs_read64(unsigned long field)
+static __always_inline u64 vmcs_read64(unsigned long field)
{
#ifdef CONFIG_X86_64
return vmcs_readl(field);
@@ -731,6 +1176,15 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
if (vcpu->fpu_active)
eb &= ~(1u << NM_VECTOR);
+
+ /* When we are running a nested L2 guest and L1 specified for it a
+ * certain exception bitmap, we must trap the same exceptions and pass
+ * them to L1. When running L2, we will only handle the exceptions
+ * specified above if L1 did not want them.
+ */
+ if (is_guest_mode(vcpu))
+ eb |= get_vmcs12(vcpu)->exception_bitmap;
+
vmcs_write32(EXCEPTION_BITMAP, eb);
}
@@ -971,22 +1425,22 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (!vmm_exclusive)
kvm_cpu_vmxon(phys_addr);
- else if (vcpu->cpu != cpu)
- vcpu_clear(vmx);
+ else if (vmx->loaded_vmcs->cpu != cpu)
+ loaded_vmcs_clear(vmx->loaded_vmcs);
- if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
- per_cpu(current_vmcs, cpu) = vmx->vmcs;
- vmcs_load(vmx->vmcs);
+ if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
+ per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
+ vmcs_load(vmx->loaded_vmcs->vmcs);
}
- if (vcpu->cpu != cpu) {
+ if (vmx->loaded_vmcs->cpu != cpu) {
struct desc_ptr *gdt = &__get_cpu_var(host_gdt);
unsigned long sysenter_esp;
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
local_irq_disable();
- list_add(&vmx->local_vcpus_link,
- &per_cpu(vcpus_on_cpu, cpu));
+ list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
+ &per_cpu(loaded_vmcss_on_cpu, cpu));
local_irq_enable();
/*
@@ -998,6 +1452,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
+ vmx->loaded_vmcs->cpu = cpu;
}
}
@@ -1005,7 +1460,8 @@ static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
{
__vmx_load_host_state(to_vmx(vcpu));
if (!vmm_exclusive) {
- __vcpu_clear(to_vmx(vcpu));
+ __loaded_vmcs_clear(to_vmx(vcpu)->loaded_vmcs);
+ vcpu->cpu = -1;
kvm_cpu_vmxoff();
}
}
@@ -1023,19 +1479,55 @@ static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
vmcs_writel(GUEST_CR0, cr0);
update_exception_bitmap(vcpu);
vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
+ if (is_guest_mode(vcpu))
+ vcpu->arch.cr0_guest_owned_bits &=
+ ~get_vmcs12(vcpu)->cr0_guest_host_mask;
vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
}
static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
+/*
+ * Return the cr0 value that a nested guest would read. This is a combination
+ * of the real cr0 used to run the guest (guest_cr0), and the bits shadowed by
+ * its hypervisor (cr0_read_shadow).
+ */
+static inline unsigned long nested_read_cr0(struct vmcs12 *fields)
+{
+ return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) |
+ (fields->cr0_read_shadow & fields->cr0_guest_host_mask);
+}
+static inline unsigned long nested_read_cr4(struct vmcs12 *fields)
+{
+ return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) |
+ (fields->cr4_read_shadow & fields->cr4_guest_host_mask);
+}
+
static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
{
+ /* Note that there is no vcpu->fpu_active = 0 here. The caller must
+ * set this *before* calling this function.
+ */
vmx_decache_cr0_guest_bits(vcpu);
vmcs_set_bits(GUEST_CR0, X86_CR0_TS | X86_CR0_MP);
update_exception_bitmap(vcpu);
vcpu->arch.cr0_guest_owned_bits = 0;
vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
- vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
+ if (is_guest_mode(vcpu)) {
+ /*
+ * L1's specified read shadow might not contain the TS bit,
+ * so now that we turned on shadowing of this bit, we need to
+ * set this bit of the shadow. Like in nested_vmx_run we need
+ * nested_read_cr0(vmcs12), but vmcs12->guest_cr0 is not yet
+ * up-to-date here because we just decached cr0.TS (and we'll
+ * only update vmcs12->guest_cr0 on nested exit).
+ */
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ vmcs12->guest_cr0 = (vmcs12->guest_cr0 & ~X86_CR0_TS) |
+ (vcpu->arch.cr0 & X86_CR0_TS);
+ vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
+ } else
+ vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
}
static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
@@ -1119,6 +1611,25 @@ static void vmx_clear_hlt(struct kvm_vcpu *vcpu)
vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
}
+/*
+ * KVM wants to inject page-faults which it got to the guest. This function
+ * checks whether in a nested guest, we need to inject them to L1 or L2.
+ * This function assumes it is called with the exit reason in vmcs02 being
+ * a #PF exception (this is the only case in which KVM injects a #PF when L2
+ * is running).
+ */
+static int nested_pf_handled(struct kvm_vcpu *vcpu)
+{
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+
+ /* TODO: also check PFEC_MATCH/MASK, not just EB.PF. */
+ if (!(vmcs12->exception_bitmap & PF_VECTOR))
+ return 0;
+
+ nested_vmx_vmexit(vcpu);
+ return 1;
+}
+
static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
bool has_error_code, u32 error_code,
bool reinject)
@@ -1126,6 +1637,10 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 intr_info = nr | INTR_INFO_VALID_MASK;
+ if (nr == PF_VECTOR && is_guest_mode(vcpu) &&
+ nested_pf_handled(vcpu))
+ return;
+
if (has_error_code) {
vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
intr_info |= INTR_INFO_DELIVER_CODE_MASK;
@@ -1248,12 +1763,24 @@ static void vmx_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz)
static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
{
vmcs_write64(TSC_OFFSET, offset);
+ if (is_guest_mode(vcpu))
+ /*
+ * We're here if L1 chose not to trap the TSC MSR. Since
+ * prepare_vmcs12() does not copy tsc_offset, we need to also
+ * set the vmcs12 field here.
+ */
+ get_vmcs12(vcpu)->tsc_offset = offset -
+ to_vmx(vcpu)->nested.vmcs01_tsc_offset;
}
static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment)
{
u64 offset = vmcs_read64(TSC_OFFSET);
vmcs_write64(TSC_OFFSET, offset + adjustment);
+ if (is_guest_mode(vcpu)) {
+ /* Even when running L2, the adjustment needs to apply to L1 */
+ to_vmx(vcpu)->nested.vmcs01_tsc_offset += adjustment;
+ }
}
static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
@@ -1261,6 +1788,236 @@ static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
return target_tsc - native_read_tsc();
}
+static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 1, 0);
+ return best && (best->ecx & (1 << (X86_FEATURE_VMX & 31)));
+}
+
+/*
+ * nested_vmx_allowed() checks whether a guest should be allowed to use VMX
+ * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for
+ * all guests if the "nested" module option is off, and can also be disabled
+ * for a single guest by disabling its VMX cpuid bit.
+ */
+static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu)
+{
+ return nested && guest_cpuid_has_vmx(vcpu);
+}
+
+/*
+ * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
+ * returned for the various VMX controls MSRs when nested VMX is enabled.
+ * The same values should also be used to verify that vmcs12 control fields are
+ * valid during nested entry from L1 to L2.
+ * Each of these control msrs has a low and high 32-bit half: A low bit is on
+ * if the corresponding bit in the (32-bit) control field *must* be on, and a
+ * bit in the high half is on if the corresponding bit in the control field
+ * may be on. See also vmx_control_verify().
+ * TODO: allow these variables to be modified (downgraded) by module options
+ * or other means.
+ */
+static u32 nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high;
+static u32 nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high;
+static u32 nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high;
+static u32 nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high;
+static u32 nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high;
+static __init void nested_vmx_setup_ctls_msrs(void)
+{
+ /*
+ * Note that as a general rule, the high half of the MSRs (bits in
+ * the control fields which may be 1) should be initialized by the
+ * intersection of the underlying hardware's MSR (i.e., features which
+ * can be supported) and the list of features we want to expose -
+ * because they are known to be properly supported in our code.
+ * Also, usually, the low half of the MSRs (bits which must be 1) can
+ * be set to 0, meaning that L1 may turn off any of these bits. The
+ * reason is that if one of these bits is necessary, it will appear
+ * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
+ * fields of vmcs01 and vmcs02, will turn these bits off - and
+ * nested_vmx_exit_handled() will not pass related exits to L1.
+ * These rules have exceptions below.
+ */
+
+ /* pin-based controls */
+ /*
+ * According to the Intel spec, if bit 55 of VMX_BASIC is off (as it is
+ * in our case), bits 1, 2 and 4 (i.e., 0x16) must be 1 in this MSR.
+ */
+ nested_vmx_pinbased_ctls_low = 0x16 ;
+ nested_vmx_pinbased_ctls_high = 0x16 |
+ PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING |
+ PIN_BASED_VIRTUAL_NMIS;
+
+ /* exit controls */
+ nested_vmx_exit_ctls_low = 0;
+ /* Note that guest use of VM_EXIT_ACK_INTR_ON_EXIT is not supported. */
+#ifdef CONFIG_X86_64
+ nested_vmx_exit_ctls_high = VM_EXIT_HOST_ADDR_SPACE_SIZE;
+#else
+ nested_vmx_exit_ctls_high = 0;
+#endif
+
+ /* entry controls */
+ rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
+ nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high);
+ nested_vmx_entry_ctls_low = 0;
+ nested_vmx_entry_ctls_high &=
+ VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_IA32E_MODE;
+
+ /* cpu-based controls */
+ rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
+ nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high);
+ nested_vmx_procbased_ctls_low = 0;
+ nested_vmx_procbased_ctls_high &=
+ CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_USE_TSC_OFFSETING |
+ CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
+ CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
+ CPU_BASED_CR3_STORE_EXITING |
+#ifdef CONFIG_X86_64
+ CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
+#endif
+ CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
+ CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_EXITING |
+ CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
+ /*
+ * We can allow some features even when not supported by the
+ * hardware. For example, L1 can specify an MSR bitmap - and we
+ * can use it to avoid exits to L1 - even when L0 runs L2
+ * without MSR bitmaps.
+ */
+ nested_vmx_procbased_ctls_high |= CPU_BASED_USE_MSR_BITMAPS;
+
+ /* secondary cpu-based controls */
+ rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
+ nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high);
+ nested_vmx_secondary_ctls_low = 0;
+ nested_vmx_secondary_ctls_high &=
+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+}
+
+static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
+{
+ /*
+ * Bits 0 in high must be 0, and bits 1 in low must be 1.
+ */
+ return ((control & high) | low) == control;
+}
+
+static inline u64 vmx_control_msr(u32 low, u32 high)
+{
+ return low | ((u64)high << 32);
+}
+
+/*
+ * If we allow our guest to use VMX instructions (i.e., nested VMX), we should
+ * also let it use VMX-specific MSRs.
+ * vmx_get_vmx_msr() and vmx_set_vmx_msr() return 1 when we handled a
+ * VMX-specific MSR, or 0 when we haven't (and the caller should handle it
+ * like all other MSRs).
+ */
+static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
+{
+ if (!nested_vmx_allowed(vcpu) && msr_index >= MSR_IA32_VMX_BASIC &&
+ msr_index <= MSR_IA32_VMX_TRUE_ENTRY_CTLS) {
+ /*
+ * According to the spec, processors which do not support VMX
+ * should throw a #GP(0) when VMX capability MSRs are read.
+ */
+ kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
+ return 1;
+ }
+
+ switch (msr_index) {
+ case MSR_IA32_FEATURE_CONTROL:
+ *pdata = 0;
+ break;
+ case MSR_IA32_VMX_BASIC:
+ /*
+ * This MSR reports some information about VMX support. We
+ * should return information about the VMX we emulate for the
+ * guest, and the VMCS structure we give it - not about the
+ * VMX support of the underlying hardware.
+ */
+ *pdata = VMCS12_REVISION |
+ ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
+ (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
+ break;
+ case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
+ case MSR_IA32_VMX_PINBASED_CTLS:
+ *pdata = vmx_control_msr(nested_vmx_pinbased_ctls_low,
+ nested_vmx_pinbased_ctls_high);
+ break;
+ case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
+ case MSR_IA32_VMX_PROCBASED_CTLS:
+ *pdata = vmx_control_msr(nested_vmx_procbased_ctls_low,
+ nested_vmx_procbased_ctls_high);
+ break;
+ case MSR_IA32_VMX_TRUE_EXIT_CTLS:
+ case MSR_IA32_VMX_EXIT_CTLS:
+ *pdata = vmx_control_msr(nested_vmx_exit_ctls_low,
+ nested_vmx_exit_ctls_high);
+ break;
+ case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
+ case MSR_IA32_VMX_ENTRY_CTLS:
+ *pdata = vmx_control_msr(nested_vmx_entry_ctls_low,
+ nested_vmx_entry_ctls_high);
+ break;
+ case MSR_IA32_VMX_MISC:
+ *pdata = 0;
+ break;
+ /*
+ * These MSRs specify bits which the guest must keep fixed (on or off)
+ * while L1 is in VMXON mode (in L1's root mode, or running an L2).
+ * We picked the standard core2 setting.
+ */
+#define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
+#define VMXON_CR4_ALWAYSON X86_CR4_VMXE
+ case MSR_IA32_VMX_CR0_FIXED0:
+ *pdata = VMXON_CR0_ALWAYSON;
+ break;
+ case MSR_IA32_VMX_CR0_FIXED1:
+ *pdata = -1ULL;
+ break;
+ case MSR_IA32_VMX_CR4_FIXED0:
+ *pdata = VMXON_CR4_ALWAYSON;
+ break;
+ case MSR_IA32_VMX_CR4_FIXED1:
+ *pdata = -1ULL;
+ break;
+ case MSR_IA32_VMX_VMCS_ENUM:
+ *pdata = 0x1f;
+ break;
+ case MSR_IA32_VMX_PROCBASED_CTLS2:
+ *pdata = vmx_control_msr(nested_vmx_secondary_ctls_low,
+ nested_vmx_secondary_ctls_high);
+ break;
+ case MSR_IA32_VMX_EPT_VPID_CAP:
+ /* Currently, no nested ept or nested vpid */
+ *pdata = 0;
+ break;
+ default:
+ return 0;
+ }
+
+ return 1;
+}
+
+static int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
+{
+ if (!nested_vmx_allowed(vcpu))
+ return 0;
+
+ if (msr_index == MSR_IA32_FEATURE_CONTROL)
+ /* TODO: the right thing. */
+ return 1;
+ /*
+ * No need to treat VMX capability MSRs specially: If we don't handle
+ * them, handle_wrmsr will #GP(0), which is correct (they are readonly)
+ */
+ return 0;
+}
+
/*
* Reads an msr value (of 'msr_index') into 'pdata'.
* Returns 0 on success, non-0 otherwise.
@@ -1309,6 +2066,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
/* Otherwise falls through */
default:
vmx_load_host_state(to_vmx(vcpu));
+ if (vmx_get_vmx_msr(vcpu, msr_index, pdata))
+ return 0;
msr = find_msr_entry(to_vmx(vcpu), msr_index);
if (msr) {
vmx_load_host_state(to_vmx(vcpu));
@@ -1380,6 +2139,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
return 1;
/* Otherwise falls through */
default:
+ if (vmx_set_vmx_msr(vcpu, msr_index, data))
+ break;
msr = find_msr_entry(vmx, msr_index);
if (msr) {
vmx_load_host_state(vmx);
@@ -1469,7 +2230,7 @@ static int hardware_enable(void *garbage)
if (read_cr4() & X86_CR4_VMXE)
return -EBUSY;
- INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu));
+ INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
test_bits = FEATURE_CONTROL_LOCKED;
@@ -1493,14 +2254,14 @@ static int hardware_enable(void *garbage)
return 0;
}
-static void vmclear_local_vcpus(void)
+static void vmclear_local_loaded_vmcss(void)
{
int cpu = raw_smp_processor_id();
- struct vcpu_vmx *vmx, *n;
+ struct loaded_vmcs *v, *n;
- list_for_each_entry_safe(vmx, n, &per_cpu(vcpus_on_cpu, cpu),
- local_vcpus_link)
- __vcpu_clear(vmx);
+ list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu),
+ loaded_vmcss_on_cpu_link)
+ __loaded_vmcs_clear(v);
}
@@ -1515,7 +2276,7 @@ static void kvm_cpu_vmxoff(void)
static void hardware_disable(void *garbage)
{
if (vmm_exclusive) {
- vmclear_local_vcpus();
+ vmclear_local_loaded_vmcss();
kvm_cpu_vmxoff();
}
write_cr4(read_cr4() & ~X86_CR4_VMXE);
@@ -1696,6 +2457,18 @@ static void free_vmcs(struct vmcs *vmcs)
free_pages((unsigned long)vmcs, vmcs_config.order);
}
+/*
+ * Free a VMCS, but before that VMCLEAR it on the CPU where it was last loaded
+ */
+static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
+{
+ if (!loaded_vmcs->vmcs)
+ return;
+ loaded_vmcs_clear(loaded_vmcs);
+ free_vmcs(loaded_vmcs->vmcs);
+ loaded_vmcs->vmcs = NULL;
+}
+
static void free_kvm_area(void)
{
int cpu;
@@ -1756,6 +2529,9 @@ static __init int hardware_setup(void)
if (!cpu_has_vmx_ple())
ple_gap = 0;
+ if (nested)
+ nested_vmx_setup_ctls_msrs();
+
return alloc_kvm_area();
}
@@ -2041,7 +2817,7 @@ static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
(unsigned long *)&vcpu->arch.regs_dirty);
}
-static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
+static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
unsigned long cr0,
@@ -2139,11 +2915,23 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
vmcs_writel(GUEST_CR3, guest_cr3);
}
-static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
unsigned long hw_cr4 = cr4 | (to_vmx(vcpu)->rmode.vm86_active ?
KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
+ if (cr4 & X86_CR4_VMXE) {
+ /*
+ * To use VMXON (and later other VMX instructions), a guest
+ * must first be able to turn on cr4.VMXE (see handle_vmon()).
+ * So basically the check on whether to allow nested VMX
+ * is here.
+ */
+ if (!nested_vmx_allowed(vcpu))
+ return 1;
+ } else if (to_vmx(vcpu)->nested.vmxon)
+ return 1;
+
vcpu->arch.cr4 = cr4;
if (enable_ept) {
if (!is_paging(vcpu)) {
@@ -2156,6 +2944,7 @@ static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
vmcs_writel(CR4_READ_SHADOW, cr4);
vmcs_writel(GUEST_CR4, hw_cr4);
+ return 0;
}
static void vmx_get_segment(struct kvm_vcpu *vcpu,
@@ -2721,18 +3510,110 @@ static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only)
}
/*
+ * Set up the vmcs's constant host-state fields, i.e., host-state fields that
+ * will not change in the lifetime of the guest.
+ * Note that host-state that does change is set elsewhere. E.g., host-state
+ * that is set differently for each CPU is set in vmx_vcpu_load(), not here.
+ */
+static void vmx_set_constant_host_state(void)
+{
+ u32 low32, high32;
+ unsigned long tmpl;
+ struct desc_ptr dt;
+
+ vmcs_writel(HOST_CR0, read_cr0() | X86_CR0_TS); /* 22.2.3 */
+ vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
+
+ vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
+ vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
+ vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
+ vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
+ vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */
+
+ native_store_idt(&dt);
+ vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
+
+ asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
+ vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
+
+ rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
+ vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
+ rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl);
+ vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl); /* 22.2.3 */
+
+ if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) {
+ rdmsr(MSR_IA32_CR_PAT, low32, high32);
+ vmcs_write64(HOST_IA32_PAT, low32 | ((u64) high32 << 32));
+ }
+}
+
+static void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
+{
+ vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS;
+ if (enable_ept)
+ vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
+ if (is_guest_mode(&vmx->vcpu))
+ vmx->vcpu.arch.cr4_guest_owned_bits &=
+ ~get_vmcs12(&vmx->vcpu)->cr4_guest_host_mask;
+ vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits);
+}
+
+static u32 vmx_exec_control(struct vcpu_vmx *vmx)
+{
+ u32 exec_control = vmcs_config.cpu_based_exec_ctrl;
+ if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) {
+ exec_control &= ~CPU_BASED_TPR_SHADOW;
+#ifdef CONFIG_X86_64
+ exec_control |= CPU_BASED_CR8_STORE_EXITING |
+ CPU_BASED_CR8_LOAD_EXITING;
+#endif
+ }
+ if (!enable_ept)
+ exec_control |= CPU_BASED_CR3_STORE_EXITING |
+ CPU_BASED_CR3_LOAD_EXITING |
+ CPU_BASED_INVLPG_EXITING;
+ return exec_control;
+}
+
+static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
+{
+ u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
+ if (!vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
+ exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+ if (vmx->vpid == 0)
+ exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
+ if (!enable_ept) {
+ exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
+ enable_unrestricted_guest = 0;
+ }
+ if (!enable_unrestricted_guest)
+ exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
+ if (!ple_gap)
+ exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
+ return exec_control;
+}
+
+static void ept_set_mmio_spte_mask(void)
+{
+ /*
+ * EPT Misconfigurations can be generated if the value of bits 2:0
+ * of an EPT paging-structure entry is 110b (write/execute).
+ * Also, magic bits (0xffull << 49) is set to quickly identify mmio
+ * spte.
+ */
+ kvm_mmu_set_mmio_spte_mask(0xffull << 49 | 0x6ull);
+}
+
+/*
* Sets up the vmcs for emulated real mode.
*/
static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
{
- u32 host_sysenter_cs, msr_low, msr_high;
- u32 junk;
- u64 host_pat;
+#ifdef CONFIG_X86_64
unsigned long a;
- struct desc_ptr dt;
+#endif
int i;
- unsigned long kvm_vmx_return;
- u32 exec_control;
/* I/O */
vmcs_write64(IO_BITMAP_A, __pa(vmx_io_bitmap_a));
@@ -2747,36 +3628,11 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
vmcs_write32(PIN_BASED_VM_EXEC_CONTROL,
vmcs_config.pin_based_exec_ctrl);
- exec_control = vmcs_config.cpu_based_exec_ctrl;
- if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) {
- exec_control &= ~CPU_BASED_TPR_SHADOW;
-#ifdef CONFIG_X86_64
- exec_control |= CPU_BASED_CR8_STORE_EXITING |
- CPU_BASED_CR8_LOAD_EXITING;
-#endif
- }
- if (!enable_ept)
- exec_control |= CPU_BASED_CR3_STORE_EXITING |
- CPU_BASED_CR3_LOAD_EXITING |
- CPU_BASED_INVLPG_EXITING;
- vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
+ vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmx_exec_control(vmx));
if (cpu_has_secondary_exec_ctrls()) {
- exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
- if (!vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
- exec_control &=
- ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
- if (vmx->vpid == 0)
- exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
- if (!enable_ept) {
- exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
- enable_unrestricted_guest = 0;
- }
- if (!enable_unrestricted_guest)
- exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
- if (!ple_gap)
- exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
- vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
+ vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
+ vmx_secondary_exec_control(vmx));
}
if (ple_gap) {
@@ -2784,20 +3640,13 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
vmcs_write32(PLE_WINDOW, ple_window);
}
- vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, !!bypass_guest_pf);
- vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, !!bypass_guest_pf);
+ vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
+ vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */
- vmcs_writel(HOST_CR0, read_cr0() | X86_CR0_TS); /* 22.2.3 */
- vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
- vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
-
- vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
- vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
- vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */
vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */
- vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
+ vmx_set_constant_host_state();
#ifdef CONFIG_X86_64
rdmsrl(MSR_FS_BASE, a);
vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
@@ -2808,32 +3657,15 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
#endif
- vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */
-
- native_store_idt(&dt);
- vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
-
- asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
- vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
- rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk);
- vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs);
- rdmsrl(MSR_IA32_SYSENTER_ESP, a);
- vmcs_writel(HOST_IA32_SYSENTER_ESP, a); /* 22.2.3 */
- rdmsrl(MSR_IA32_SYSENTER_EIP, a);
- vmcs_writel(HOST_IA32_SYSENTER_EIP, a); /* 22.2.3 */
-
- if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) {
- rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high);
- host_pat = msr_low | ((u64) msr_high << 32);
- vmcs_write64(HOST_IA32_PAT, host_pat);
- }
if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
+ u32 msr_low, msr_high;
+ u64 host_pat;
rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high);
host_pat = msr_low | ((u64) msr_high << 32);
/* Write the default value follow host pat */
@@ -2863,10 +3695,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
- vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS;
- if (enable_ept)
- vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
- vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits);
+ set_cr4_guest_host_mask(vmx);
kvm_write_tsc(&vmx->vcpu, 0);
@@ -2990,9 +3819,25 @@ out:
return ret;
}
+/*
+ * In nested virtualization, check if L1 asked to exit on external interrupts.
+ * For most existing hypervisors, this will always return true.
+ */
+static bool nested_exit_on_intr(struct kvm_vcpu *vcpu)
+{
+ return get_vmcs12(vcpu)->pin_based_vm_exec_control &
+ PIN_BASED_EXT_INTR_MASK;
+}
+
static void enable_irq_window(struct kvm_vcpu *vcpu)
{
u32 cpu_based_vm_exec_control;
+ if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
+ /* We can get here when nested_run_pending caused
+ * vmx_interrupt_allowed() to return false. In this case, do
+ * nothing - the interrupt will be injected later.
+ */
+ return;
cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
@@ -3049,6 +3894,9 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ if (is_guest_mode(vcpu))
+ return;
+
if (!cpu_has_virtual_nmis()) {
/*
* Tracking the NMI-blocked state in software is built upon
@@ -3115,6 +3963,17 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
{
+ if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) {
+ struct vmcs12 *vmcs12;
+ if (to_vmx(vcpu)->nested.nested_run_pending)
+ return 0;
+ nested_vmx_vmexit(vcpu);
+ vmcs12 = get_vmcs12(vcpu);
+ vmcs12->vm_exit_reason = EXIT_REASON_EXTERNAL_INTERRUPT;
+ vmcs12->vm_exit_intr_info = 0;
+ /* fall through to normal code, but now in L1, not L2 */
+ }
+
return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
!(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
@@ -3356,6 +4215,58 @@ vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
hypercall[2] = 0xc1;
}
+/* called to set cr0 as approriate for a mov-to-cr0 exit. */
+static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
+{
+ if (to_vmx(vcpu)->nested.vmxon &&
+ ((val & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON))
+ return 1;
+
+ if (is_guest_mode(vcpu)) {
+ /*
+ * We get here when L2 changed cr0 in a way that did not change
+ * any of L1's shadowed bits (see nested_vmx_exit_handled_cr),
+ * but did change L0 shadowed bits. This can currently happen
+ * with the TS bit: L0 may want to leave TS on (for lazy fpu
+ * loading) while pretending to allow the guest to change it.
+ */
+ if (kvm_set_cr0(vcpu, (val & vcpu->arch.cr0_guest_owned_bits) |
+ (vcpu->arch.cr0 & ~vcpu->arch.cr0_guest_owned_bits)))
+ return 1;
+ vmcs_writel(CR0_READ_SHADOW, val);
+ return 0;
+ } else
+ return kvm_set_cr0(vcpu, val);
+}
+
+static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val)
+{
+ if (is_guest_mode(vcpu)) {
+ if (kvm_set_cr4(vcpu, (val & vcpu->arch.cr4_guest_owned_bits) |
+ (vcpu->arch.cr4 & ~vcpu->arch.cr4_guest_owned_bits)))
+ return 1;
+ vmcs_writel(CR4_READ_SHADOW, val);
+ return 0;
+ } else
+ return kvm_set_cr4(vcpu, val);
+}
+
+/* called to set cr0 as approriate for clts instruction exit. */
+static void handle_clts(struct kvm_vcpu *vcpu)
+{
+ if (is_guest_mode(vcpu)) {
+ /*
+ * We get here when L2 did CLTS, and L1 didn't shadow CR0.TS
+ * but we did (!fpu_active). We need to keep GUEST_CR0.TS on,
+ * just pretend it's off (also in arch.cr0 for fpu_activate).
+ */
+ vmcs_writel(CR0_READ_SHADOW,
+ vmcs_readl(CR0_READ_SHADOW) & ~X86_CR0_TS);
+ vcpu->arch.cr0 &= ~X86_CR0_TS;
+ } else
+ vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
+}
+
static int handle_cr(struct kvm_vcpu *vcpu)
{
unsigned long exit_qualification, val;
@@ -3372,7 +4283,7 @@ static int handle_cr(struct kvm_vcpu *vcpu)
trace_kvm_cr_write(cr, val);
switch (cr) {
case 0:
- err = kvm_set_cr0(vcpu, val);
+ err = handle_set_cr0(vcpu, val);
kvm_complete_insn_gp(vcpu, err);
return 1;
case 3:
@@ -3380,7 +4291,7 @@ static int handle_cr(struct kvm_vcpu *vcpu)
kvm_complete_insn_gp(vcpu, err);
return 1;
case 4:
- err = kvm_set_cr4(vcpu, val);
+ err = handle_set_cr4(vcpu, val);
kvm_complete_insn_gp(vcpu, err);
return 1;
case 8: {
@@ -3398,7 +4309,7 @@ static int handle_cr(struct kvm_vcpu *vcpu)
};
break;
case 2: /* clts */
- vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
+ handle_clts(vcpu);
trace_kvm_cr_write(0, kvm_read_cr0(vcpu));
skip_emulated_instruction(vcpu);
vmx_fpu_activate(vcpu);
@@ -3574,12 +4485,6 @@ static int handle_vmcall(struct kvm_vcpu *vcpu)
return 1;
}
-static int handle_vmx_insn(struct kvm_vcpu *vcpu)
-{
- kvm_queue_exception(vcpu, UD_VECTOR);
- return 1;
-}
-
static int handle_invd(struct kvm_vcpu *vcpu)
{
return emulate_instruction(vcpu, 0) == EMULATE_DONE;
@@ -3777,11 +4682,19 @@ static void ept_misconfig_inspect_spte(struct kvm_vcpu *vcpu, u64 spte,
static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
{
u64 sptes[4];
- int nr_sptes, i;
+ int nr_sptes, i, ret;
gpa_t gpa;
gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
+ ret = handle_mmio_page_fault_common(vcpu, gpa, true);
+ if (likely(ret == 1))
+ return x86_emulate_instruction(vcpu, gpa, 0, NULL, 0) ==
+ EMULATE_DONE;
+ if (unlikely(!ret))
+ return 1;
+
+ /* It is the real ept misconfig */
printk(KERN_ERR "EPT: Misconfiguration.\n");
printk(KERN_ERR "EPT: GPA: 0x%llx\n", gpa);
@@ -3866,6 +4779,639 @@ static int handle_invalid_op(struct kvm_vcpu *vcpu)
}
/*
+ * To run an L2 guest, we need a vmcs02 based on the L1-specified vmcs12.
+ * We could reuse a single VMCS for all the L2 guests, but we also want the
+ * option to allocate a separate vmcs02 for each separate loaded vmcs12 - this
+ * allows keeping them loaded on the processor, and in the future will allow
+ * optimizations where prepare_vmcs02 doesn't need to set all the fields on
+ * every entry if they never change.
+ * So we keep, in vmx->nested.vmcs02_pool, a cache of size VMCS02_POOL_SIZE
+ * (>=0) with a vmcs02 for each recently loaded vmcs12s, most recent first.
+ *
+ * The following functions allocate and free a vmcs02 in this pool.
+ */
+
+/* Get a VMCS from the pool to use as vmcs02 for the current vmcs12. */
+static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx)
+{
+ struct vmcs02_list *item;
+ list_for_each_entry(item, &vmx->nested.vmcs02_pool, list)
+ if (item->vmptr == vmx->nested.current_vmptr) {
+ list_move(&item->list, &vmx->nested.vmcs02_pool);
+ return &item->vmcs02;
+ }
+
+ if (vmx->nested.vmcs02_num >= max(VMCS02_POOL_SIZE, 1)) {
+ /* Recycle the least recently used VMCS. */
+ item = list_entry(vmx->nested.vmcs02_pool.prev,
+ struct vmcs02_list, list);
+ item->vmptr = vmx->nested.current_vmptr;
+ list_move(&item->list, &vmx->nested.vmcs02_pool);
+ return &item->vmcs02;
+ }
+
+ /* Create a new VMCS */
+ item = (struct vmcs02_list *)
+ kmalloc(sizeof(struct vmcs02_list), GFP_KERNEL);
+ if (!item)
+ return NULL;
+ item->vmcs02.vmcs = alloc_vmcs();
+ if (!item->vmcs02.vmcs) {
+ kfree(item);
+ return NULL;
+ }
+ loaded_vmcs_init(&item->vmcs02);
+ item->vmptr = vmx->nested.current_vmptr;
+ list_add(&(item->list), &(vmx->nested.vmcs02_pool));
+ vmx->nested.vmcs02_num++;
+ return &item->vmcs02;
+}
+
+/* Free and remove from pool a vmcs02 saved for a vmcs12 (if there is one) */
+static void nested_free_vmcs02(struct vcpu_vmx *vmx, gpa_t vmptr)
+{
+ struct vmcs02_list *item;
+ list_for_each_entry(item, &vmx->nested.vmcs02_pool, list)
+ if (item->vmptr == vmptr) {
+ free_loaded_vmcs(&item->vmcs02);
+ list_del(&item->list);
+ kfree(item);
+ vmx->nested.vmcs02_num--;
+ return;
+ }
+}
+
+/*
+ * Free all VMCSs saved for this vcpu, except the one pointed by
+ * vmx->loaded_vmcs. These include the VMCSs in vmcs02_pool (except the one
+ * currently used, if running L2), and vmcs01 when running L2.
+ */
+static void nested_free_all_saved_vmcss(struct vcpu_vmx *vmx)
+{
+ struct vmcs02_list *item, *n;
+ list_for_each_entry_safe(item, n, &vmx->nested.vmcs02_pool, list) {
+ if (vmx->loaded_vmcs != &item->vmcs02)
+ free_loaded_vmcs(&item->vmcs02);
+ list_del(&item->list);
+ kfree(item);
+ }
+ vmx->nested.vmcs02_num = 0;
+
+ if (vmx->loaded_vmcs != &vmx->vmcs01)
+ free_loaded_vmcs(&vmx->vmcs01);
+}
+
+/*
+ * Emulate the VMXON instruction.
+ * Currently, we just remember that VMX is active, and do not save or even
+ * inspect the argument to VMXON (the so-called "VMXON pointer") because we
+ * do not currently need to store anything in that guest-allocated memory
+ * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their
+ * argument is different from the VMXON pointer (which the spec says they do).
+ */
+static int handle_vmon(struct kvm_vcpu *vcpu)
+{
+ struct kvm_segment cs;
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ /* The Intel VMX Instruction Reference lists a bunch of bits that
+ * are prerequisite to running VMXON, most notably cr4.VMXE must be
+ * set to 1 (see vmx_set_cr4() for when we allow the guest to set this).
+ * Otherwise, we should fail with #UD. We test these now:
+ */
+ if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE) ||
+ !kvm_read_cr0_bits(vcpu, X86_CR0_PE) ||
+ (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+ }
+
+ vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
+ if (is_long_mode(vcpu) && !cs.l) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+ }
+
+ if (vmx_get_cpl(vcpu)) {
+ kvm_inject_gp(vcpu, 0);
+ return 1;
+ }
+
+ INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool));
+ vmx->nested.vmcs02_num = 0;
+
+ vmx->nested.vmxon = true;
+
+ skip_emulated_instruction(vcpu);
+ return 1;
+}
+
+/*
+ * Intel's VMX Instruction Reference specifies a common set of prerequisites
+ * for running VMX instructions (except VMXON, whose prerequisites are
+ * slightly different). It also specifies what exception to inject otherwise.
+ */
+static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
+{
+ struct kvm_segment cs;
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ if (!vmx->nested.vmxon) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 0;
+ }
+
+ vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
+ if ((vmx_get_rflags(vcpu) & X86_EFLAGS_VM) ||
+ (is_long_mode(vcpu) && !cs.l)) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 0;
+ }
+
+ if (vmx_get_cpl(vcpu)) {
+ kvm_inject_gp(vcpu, 0);
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * Free whatever needs to be freed from vmx->nested when L1 goes down, or
+ * just stops using VMX.
+ */
+static void free_nested(struct vcpu_vmx *vmx)
+{
+ if (!vmx->nested.vmxon)
+ return;
+ vmx->nested.vmxon = false;
+ if (vmx->nested.current_vmptr != -1ull) {
+ kunmap(vmx->nested.current_vmcs12_page);
+ nested_release_page(vmx->nested.current_vmcs12_page);
+ vmx->nested.current_vmptr = -1ull;
+ vmx->nested.current_vmcs12 = NULL;
+ }
+ /* Unpin physical memory we referred to in current vmcs02 */
+ if (vmx->nested.apic_access_page) {
+ nested_release_page(vmx->nested.apic_access_page);
+ vmx->nested.apic_access_page = 0;
+ }
+
+ nested_free_all_saved_vmcss(vmx);
+}
+
+/* Emulate the VMXOFF instruction */
+static int handle_vmoff(struct kvm_vcpu *vcpu)
+{
+ if (!nested_vmx_check_permission(vcpu))
+ return 1;
+ free_nested(to_vmx(vcpu));
+ skip_emulated_instruction(vcpu);
+ return 1;
+}
+
+/*
+ * Decode the memory-address operand of a vmx instruction, as recorded on an
+ * exit caused by such an instruction (run by a guest hypervisor).
+ * On success, returns 0. When the operand is invalid, returns 1 and throws
+ * #UD or #GP.
+ */
+static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
+ unsigned long exit_qualification,
+ u32 vmx_instruction_info, gva_t *ret)
+{
+ /*
+ * According to Vol. 3B, "Information for VM Exits Due to Instruction
+ * Execution", on an exit, vmx_instruction_info holds most of the
+ * addressing components of the operand. Only the displacement part
+ * is put in exit_qualification (see 3B, "Basic VM-Exit Information").
+ * For how an actual address is calculated from all these components,
+ * refer to Vol. 1, "Operand Addressing".
+ */
+ int scaling = vmx_instruction_info & 3;
+ int addr_size = (vmx_instruction_info >> 7) & 7;
+ bool is_reg = vmx_instruction_info & (1u << 10);
+ int seg_reg = (vmx_instruction_info >> 15) & 7;
+ int index_reg = (vmx_instruction_info >> 18) & 0xf;
+ bool index_is_valid = !(vmx_instruction_info & (1u << 22));
+ int base_reg = (vmx_instruction_info >> 23) & 0xf;
+ bool base_is_valid = !(vmx_instruction_info & (1u << 27));
+
+ if (is_reg) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+ }
+
+ /* Addr = segment_base + offset */
+ /* offset = base + [index * scale] + displacement */
+ *ret = vmx_get_segment_base(vcpu, seg_reg);
+ if (base_is_valid)
+ *ret += kvm_register_read(vcpu, base_reg);
+ if (index_is_valid)
+ *ret += kvm_register_read(vcpu, index_reg)<<scaling;
+ *ret += exit_qualification; /* holds the displacement */
+
+ if (addr_size == 1) /* 32 bit */
+ *ret &= 0xffffffff;
+
+ /*
+ * TODO: throw #GP (and return 1) in various cases that the VM*
+ * instructions require it - e.g., offset beyond segment limit,
+ * unusable or unreadable/unwritable segment, non-canonical 64-bit
+ * address, and so on. Currently these are not checked.
+ */
+ return 0;
+}
+
+/*
+ * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
+ * set the success or error code of an emulated VMX instruction, as specified
+ * by Vol 2B, VMX Instruction Reference, "Conventions".
+ */
+static void nested_vmx_succeed(struct kvm_vcpu *vcpu)
+{
+ vmx_set_rflags(vcpu, vmx_get_rflags(vcpu)
+ & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
+ X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF));
+}
+
+static void nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
+{
+ vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
+ & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
+ X86_EFLAGS_SF | X86_EFLAGS_OF))
+ | X86_EFLAGS_CF);
+}
+
+static void nested_vmx_failValid(struct kvm_vcpu *vcpu,
+ u32 vm_instruction_error)
+{
+ if (to_vmx(vcpu)->nested.current_vmptr == -1ull) {
+ /*
+ * failValid writes the error number to the current VMCS, which
+ * can't be done there isn't a current VMCS.
+ */
+ nested_vmx_failInvalid(vcpu);
+ return;
+ }
+ vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
+ & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
+ X86_EFLAGS_SF | X86_EFLAGS_OF))
+ | X86_EFLAGS_ZF);
+ get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error;
+}
+
+/* Emulate the VMCLEAR instruction */
+static int handle_vmclear(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ gva_t gva;
+ gpa_t vmptr;
+ struct vmcs12 *vmcs12;
+ struct page *page;
+ struct x86_exception e;
+
+ if (!nested_vmx_check_permission(vcpu))
+ return 1;
+
+ if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
+ vmcs_read32(VMX_INSTRUCTION_INFO), &gva))
+ return 1;
+
+ if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
+ sizeof(vmptr), &e)) {
+ kvm_inject_page_fault(vcpu, &e);
+ return 1;
+ }
+
+ if (!IS_ALIGNED(vmptr, PAGE_SIZE)) {
+ nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS);
+ skip_emulated_instruction(vcpu);
+ return 1;
+ }
+
+ if (vmptr == vmx->nested.current_vmptr) {
+ kunmap(vmx->nested.current_vmcs12_page);
+ nested_release_page(vmx->nested.current_vmcs12_page);
+ vmx->nested.current_vmptr = -1ull;
+ vmx->nested.current_vmcs12 = NULL;
+ }
+
+ page = nested_get_page(vcpu, vmptr);
+ if (page == NULL) {
+ /*
+ * For accurate processor emulation, VMCLEAR beyond available
+ * physical memory should do nothing at all. However, it is
+ * possible that a nested vmx bug, not a guest hypervisor bug,
+ * resulted in this case, so let's shut down before doing any
+ * more damage:
+ */
+ kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
+ return 1;
+ }
+ vmcs12 = kmap(page);
+ vmcs12->launch_state = 0;
+ kunmap(page);
+ nested_release_page(page);
+
+ nested_free_vmcs02(vmx, vmptr);
+
+ skip_emulated_instruction(vcpu);
+ nested_vmx_succeed(vcpu);
+ return 1;
+}
+
+static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch);
+
+/* Emulate the VMLAUNCH instruction */
+static int handle_vmlaunch(struct kvm_vcpu *vcpu)
+{
+ return nested_vmx_run(vcpu, true);
+}
+
+/* Emulate the VMRESUME instruction */
+static int handle_vmresume(struct kvm_vcpu *vcpu)
+{
+
+ return nested_vmx_run(vcpu, false);
+}
+
+enum vmcs_field_type {
+ VMCS_FIELD_TYPE_U16 = 0,
+ VMCS_FIELD_TYPE_U64 = 1,
+ VMCS_FIELD_TYPE_U32 = 2,
+ VMCS_FIELD_TYPE_NATURAL_WIDTH = 3
+};
+
+static inline int vmcs_field_type(unsigned long field)
+{
+ if (0x1 & field) /* the *_HIGH fields are all 32 bit */
+ return VMCS_FIELD_TYPE_U32;
+ return (field >> 13) & 0x3 ;
+}
+
+static inline int vmcs_field_readonly(unsigned long field)
+{
+ return (((field >> 10) & 0x3) == 1);
+}
+
+/*
+ * Read a vmcs12 field. Since these can have varying lengths and we return
+ * one type, we chose the biggest type (u64) and zero-extend the return value
+ * to that size. Note that the caller, handle_vmread, might need to use only
+ * some of the bits we return here (e.g., on 32-bit guests, only 32 bits of
+ * 64-bit fields are to be returned).
+ */
+static inline bool vmcs12_read_any(struct kvm_vcpu *vcpu,
+ unsigned long field, u64 *ret)
+{
+ short offset = vmcs_field_to_offset(field);
+ char *p;
+
+ if (offset < 0)
+ return 0;
+
+ p = ((char *)(get_vmcs12(vcpu))) + offset;
+
+ switch (vmcs_field_type(field)) {
+ case VMCS_FIELD_TYPE_NATURAL_WIDTH:
+ *ret = *((natural_width *)p);
+ return 1;
+ case VMCS_FIELD_TYPE_U16:
+ *ret = *((u16 *)p);
+ return 1;
+ case VMCS_FIELD_TYPE_U32:
+ *ret = *((u32 *)p);
+ return 1;
+ case VMCS_FIELD_TYPE_U64:
+ *ret = *((u64 *)p);
+ return 1;
+ default:
+ return 0; /* can never happen. */
+ }
+}
+
+/*
+ * VMX instructions which assume a current vmcs12 (i.e., that VMPTRLD was
+ * used before) all generate the same failure when it is missing.
+ */
+static int nested_vmx_check_vmcs12(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ if (vmx->nested.current_vmptr == -1ull) {
+ nested_vmx_failInvalid(vcpu);
+ skip_emulated_instruction(vcpu);
+ return 0;
+ }
+ return 1;
+}
+
+static int handle_vmread(struct kvm_vcpu *vcpu)
+{
+ unsigned long field;
+ u64 field_value;
+ unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+ u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+ gva_t gva = 0;
+
+ if (!nested_vmx_check_permission(vcpu) ||
+ !nested_vmx_check_vmcs12(vcpu))
+ return 1;
+
+ /* Decode instruction info and find the field to read */
+ field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
+ /* Read the field, zero-extended to a u64 field_value */
+ if (!vmcs12_read_any(vcpu, field, &field_value)) {
+ nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
+ skip_emulated_instruction(vcpu);
+ return 1;
+ }
+ /*
+ * Now copy part of this value to register or memory, as requested.
+ * Note that the number of bits actually copied is 32 or 64 depending
+ * on the guest's mode (32 or 64 bit), not on the given field's length.
+ */
+ if (vmx_instruction_info & (1u << 10)) {
+ kvm_register_write(vcpu, (((vmx_instruction_info) >> 3) & 0xf),
+ field_value);
+ } else {
+ if (get_vmx_mem_address(vcpu, exit_qualification,
+ vmx_instruction_info, &gva))
+ return 1;
+ /* _system ok, as nested_vmx_check_permission verified cpl=0 */
+ kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva,
+ &field_value, (is_long_mode(vcpu) ? 8 : 4), NULL);
+ }
+
+ nested_vmx_succeed(vcpu);
+ skip_emulated_instruction(vcpu);
+ return 1;
+}
+
+
+static int handle_vmwrite(struct kvm_vcpu *vcpu)
+{
+ unsigned long field;
+ gva_t gva;
+ unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+ u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+ char *p;
+ short offset;
+ /* The value to write might be 32 or 64 bits, depending on L1's long
+ * mode, and eventually we need to write that into a field of several
+ * possible lengths. The code below first zero-extends the value to 64
+ * bit (field_value), and then copies only the approriate number of
+ * bits into the vmcs12 field.
+ */
+ u64 field_value = 0;
+ struct x86_exception e;
+
+ if (!nested_vmx_check_permission(vcpu) ||
+ !nested_vmx_check_vmcs12(vcpu))
+ return 1;
+
+ if (vmx_instruction_info & (1u << 10))
+ field_value = kvm_register_read(vcpu,
+ (((vmx_instruction_info) >> 3) & 0xf));
+ else {
+ if (get_vmx_mem_address(vcpu, exit_qualification,
+ vmx_instruction_info, &gva))
+ return 1;
+ if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva,
+ &field_value, (is_long_mode(vcpu) ? 8 : 4), &e)) {
+ kvm_inject_page_fault(vcpu, &e);
+ return 1;
+ }
+ }
+
+
+ field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
+ if (vmcs_field_readonly(field)) {
+ nested_vmx_failValid(vcpu,
+ VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
+ skip_emulated_instruction(vcpu);
+ return 1;
+ }
+
+ offset = vmcs_field_to_offset(field);
+ if (offset < 0) {
+ nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
+ skip_emulated_instruction(vcpu);
+ return 1;
+ }
+ p = ((char *) get_vmcs12(vcpu)) + offset;
+
+ switch (vmcs_field_type(field)) {
+ case VMCS_FIELD_TYPE_U16:
+ *(u16 *)p = field_value;
+ break;
+ case VMCS_FIELD_TYPE_U32:
+ *(u32 *)p = field_value;
+ break;
+ case VMCS_FIELD_TYPE_U64:
+ *(u64 *)p = field_value;
+ break;
+ case VMCS_FIELD_TYPE_NATURAL_WIDTH:
+ *(natural_width *)p = field_value;
+ break;
+ default:
+ nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
+ skip_emulated_instruction(vcpu);
+ return 1;
+ }
+
+ nested_vmx_succeed(vcpu);
+ skip_emulated_instruction(vcpu);
+ return 1;
+}
+
+/* Emulate the VMPTRLD instruction */
+static int handle_vmptrld(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ gva_t gva;
+ gpa_t vmptr;
+ struct x86_exception e;
+
+ if (!nested_vmx_check_permission(vcpu))
+ return 1;
+
+ if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
+ vmcs_read32(VMX_INSTRUCTION_INFO), &gva))
+ return 1;
+
+ if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
+ sizeof(vmptr), &e)) {
+ kvm_inject_page_fault(vcpu, &e);
+ return 1;
+ }
+
+ if (!IS_ALIGNED(vmptr, PAGE_SIZE)) {
+ nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS);
+ skip_emulated_instruction(vcpu);
+ return 1;
+ }
+
+ if (vmx->nested.current_vmptr != vmptr) {
+ struct vmcs12 *new_vmcs12;
+ struct page *page;
+ page = nested_get_page(vcpu, vmptr);
+ if (page == NULL) {
+ nested_vmx_failInvalid(vcpu);
+ skip_emulated_instruction(vcpu);
+ return 1;
+ }
+ new_vmcs12 = kmap(page);
+ if (new_vmcs12->revision_id != VMCS12_REVISION) {
+ kunmap(page);
+ nested_release_page_clean(page);
+ nested_vmx_failValid(vcpu,
+ VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
+ skip_emulated_instruction(vcpu);
+ return 1;
+ }
+ if (vmx->nested.current_vmptr != -1ull) {
+ kunmap(vmx->nested.current_vmcs12_page);
+ nested_release_page(vmx->nested.current_vmcs12_page);
+ }
+
+ vmx->nested.current_vmptr = vmptr;
+ vmx->nested.current_vmcs12 = new_vmcs12;
+ vmx->nested.current_vmcs12_page = page;
+ }
+
+ nested_vmx_succeed(vcpu);
+ skip_emulated_instruction(vcpu);
+ return 1;
+}
+
+/* Emulate the VMPTRST instruction */
+static int handle_vmptrst(struct kvm_vcpu *vcpu)
+{
+ unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+ u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+ gva_t vmcs_gva;
+ struct x86_exception e;
+
+ if (!nested_vmx_check_permission(vcpu))
+ return 1;
+
+ if (get_vmx_mem_address(vcpu, exit_qualification,
+ vmx_instruction_info, &vmcs_gva))
+ return 1;
+ /* ok to use *_system, as nested_vmx_check_permission verified cpl=0 */
+ if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva,
+ (void *)&to_vmx(vcpu)->nested.current_vmptr,
+ sizeof(u64), &e)) {
+ kvm_inject_page_fault(vcpu, &e);
+ return 1;
+ }
+ nested_vmx_succeed(vcpu);
+ skip_emulated_instruction(vcpu);
+ return 1;
+}
+
+/*
* The exit handlers return 1 if the exit was handled fully and guest execution
* may resume. Otherwise they set the kvm_run parameter to indicate what needs
* to be done to userspace and return 0.
@@ -3886,15 +5432,15 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
[EXIT_REASON_INVD] = handle_invd,
[EXIT_REASON_INVLPG] = handle_invlpg,
[EXIT_REASON_VMCALL] = handle_vmcall,
- [EXIT_REASON_VMCLEAR] = handle_vmx_insn,
- [EXIT_REASON_VMLAUNCH] = handle_vmx_insn,
- [EXIT_REASON_VMPTRLD] = handle_vmx_insn,
- [EXIT_REASON_VMPTRST] = handle_vmx_insn,
- [EXIT_REASON_VMREAD] = handle_vmx_insn,
- [EXIT_REASON_VMRESUME] = handle_vmx_insn,
- [EXIT_REASON_VMWRITE] = handle_vmx_insn,
- [EXIT_REASON_VMOFF] = handle_vmx_insn,
- [EXIT_REASON_VMON] = handle_vmx_insn,
+ [EXIT_REASON_VMCLEAR] = handle_vmclear,
+ [EXIT_REASON_VMLAUNCH] = handle_vmlaunch,
+ [EXIT_REASON_VMPTRLD] = handle_vmptrld,
+ [EXIT_REASON_VMPTRST] = handle_vmptrst,
+ [EXIT_REASON_VMREAD] = handle_vmread,
+ [EXIT_REASON_VMRESUME] = handle_vmresume,
+ [EXIT_REASON_VMWRITE] = handle_vmwrite,
+ [EXIT_REASON_VMOFF] = handle_vmoff,
+ [EXIT_REASON_VMON] = handle_vmon,
[EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold,
[EXIT_REASON_APIC_ACCESS] = handle_apic_access,
[EXIT_REASON_WBINVD] = handle_wbinvd,
@@ -3911,6 +5457,229 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
static const int kvm_vmx_max_exit_handlers =
ARRAY_SIZE(kvm_vmx_exit_handlers);
+/*
+ * Return 1 if we should exit from L2 to L1 to handle an MSR access access,
+ * rather than handle it ourselves in L0. I.e., check whether L1 expressed
+ * disinterest in the current event (read or write a specific MSR) by using an
+ * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps.
+ */
+static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12, u32 exit_reason)
+{
+ u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX];
+ gpa_t bitmap;
+
+ if (!nested_cpu_has(get_vmcs12(vcpu), CPU_BASED_USE_MSR_BITMAPS))
+ return 1;
+
+ /*
+ * The MSR_BITMAP page is divided into four 1024-byte bitmaps,
+ * for the four combinations of read/write and low/high MSR numbers.
+ * First we need to figure out which of the four to use:
+ */
+ bitmap = vmcs12->msr_bitmap;
+ if (exit_reason == EXIT_REASON_MSR_WRITE)
+ bitmap += 2048;
+ if (msr_index >= 0xc0000000) {
+ msr_index -= 0xc0000000;
+ bitmap += 1024;
+ }
+
+ /* Then read the msr_index'th bit from this bitmap: */
+ if (msr_index < 1024*8) {
+ unsigned char b;
+ kvm_read_guest(vcpu->kvm, bitmap + msr_index/8, &b, 1);
+ return 1 & (b >> (msr_index & 7));
+ } else
+ return 1; /* let L1 handle the wrong parameter */
+}
+
+/*
+ * Return 1 if we should exit from L2 to L1 to handle a CR access exit,
+ * rather than handle it ourselves in L0. I.e., check if L1 wanted to
+ * intercept (via guest_host_mask etc.) the current event.
+ */
+static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
+ unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+ int cr = exit_qualification & 15;
+ int reg = (exit_qualification >> 8) & 15;
+ unsigned long val = kvm_register_read(vcpu, reg);
+
+ switch ((exit_qualification >> 4) & 3) {
+ case 0: /* mov to cr */
+ switch (cr) {
+ case 0:
+ if (vmcs12->cr0_guest_host_mask &
+ (val ^ vmcs12->cr0_read_shadow))
+ return 1;
+ break;
+ case 3:
+ if ((vmcs12->cr3_target_count >= 1 &&
+ vmcs12->cr3_target_value0 == val) ||
+ (vmcs12->cr3_target_count >= 2 &&
+ vmcs12->cr3_target_value1 == val) ||
+ (vmcs12->cr3_target_count >= 3 &&
+ vmcs12->cr3_target_value2 == val) ||
+ (vmcs12->cr3_target_count >= 4 &&
+ vmcs12->cr3_target_value3 == val))
+ return 0;
+ if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING))
+ return 1;
+ break;
+ case 4:
+ if (vmcs12->cr4_guest_host_mask &
+ (vmcs12->cr4_read_shadow ^ val))
+ return 1;
+ break;
+ case 8:
+ if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING))
+ return 1;
+ break;
+ }
+ break;
+ case 2: /* clts */
+ if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) &&
+ (vmcs12->cr0_read_shadow & X86_CR0_TS))
+ return 1;
+ break;
+ case 1: /* mov from cr */
+ switch (cr) {
+ case 3:
+ if (vmcs12->cpu_based_vm_exec_control &
+ CPU_BASED_CR3_STORE_EXITING)
+ return 1;
+ break;
+ case 8:
+ if (vmcs12->cpu_based_vm_exec_control &
+ CPU_BASED_CR8_STORE_EXITING)
+ return 1;
+ break;
+ }
+ break;
+ case 3: /* lmsw */
+ /*
+ * lmsw can change bits 1..3 of cr0, and only set bit 0 of
+ * cr0. Other attempted changes are ignored, with no exit.
+ */
+ if (vmcs12->cr0_guest_host_mask & 0xe &
+ (val ^ vmcs12->cr0_read_shadow))
+ return 1;
+ if ((vmcs12->cr0_guest_host_mask & 0x1) &&
+ !(vmcs12->cr0_read_shadow & 0x1) &&
+ (val & 0x1))
+ return 1;
+ break;
+ }
+ return 0;
+}
+
+/*
+ * Return 1 if we should exit from L2 to L1 to handle an exit, or 0 if we
+ * should handle it ourselves in L0 (and then continue L2). Only call this
+ * when in is_guest_mode (L2).
+ */
+static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
+{
+ u32 exit_reason = vmcs_read32(VM_EXIT_REASON);
+ u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+
+ if (vmx->nested.nested_run_pending)
+ return 0;
+
+ if (unlikely(vmx->fail)) {
+ printk(KERN_INFO "%s failed vm entry %x\n",
+ __func__, vmcs_read32(VM_INSTRUCTION_ERROR));
+ return 1;
+ }
+
+ switch (exit_reason) {
+ case EXIT_REASON_EXCEPTION_NMI:
+ if (!is_exception(intr_info))
+ return 0;
+ else if (is_page_fault(intr_info))
+ return enable_ept;
+ return vmcs12->exception_bitmap &
+ (1u << (intr_info & INTR_INFO_VECTOR_MASK));
+ case EXIT_REASON_EXTERNAL_INTERRUPT:
+ return 0;
+ case EXIT_REASON_TRIPLE_FAULT:
+ return 1;
+ case EXIT_REASON_PENDING_INTERRUPT:
+ case EXIT_REASON_NMI_WINDOW:
+ /*
+ * prepare_vmcs02() set the CPU_BASED_VIRTUAL_INTR_PENDING bit
+ * (aka Interrupt Window Exiting) only when L1 turned it on,
+ * so if we got a PENDING_INTERRUPT exit, this must be for L1.
+ * Same for NMI Window Exiting.
+ */
+ return 1;
+ case EXIT_REASON_TASK_SWITCH:
+ return 1;
+ case EXIT_REASON_CPUID:
+ return 1;
+ case EXIT_REASON_HLT:
+ return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING);
+ case EXIT_REASON_INVD:
+ return 1;
+ case EXIT_REASON_INVLPG:
+ return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
+ case EXIT_REASON_RDPMC:
+ return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING);
+ case EXIT_REASON_RDTSC:
+ return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING);
+ case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR:
+ case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD:
+ case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD:
+ case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE:
+ case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
+ /*
+ * VMX instructions trap unconditionally. This allows L1 to
+ * emulate them for its L2 guest, i.e., allows 3-level nesting!
+ */
+ return 1;
+ case EXIT_REASON_CR_ACCESS:
+ return nested_vmx_exit_handled_cr(vcpu, vmcs12);
+ case EXIT_REASON_DR_ACCESS:
+ return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING);
+ case EXIT_REASON_IO_INSTRUCTION:
+ /* TODO: support IO bitmaps */
+ return 1;
+ case EXIT_REASON_MSR_READ:
+ case EXIT_REASON_MSR_WRITE:
+ return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
+ case EXIT_REASON_INVALID_STATE:
+ return 1;
+ case EXIT_REASON_MWAIT_INSTRUCTION:
+ return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
+ case EXIT_REASON_MONITOR_INSTRUCTION:
+ return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING);
+ case EXIT_REASON_PAUSE_INSTRUCTION:
+ return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) ||
+ nested_cpu_has2(vmcs12,
+ SECONDARY_EXEC_PAUSE_LOOP_EXITING);
+ case EXIT_REASON_MCE_DURING_VMENTRY:
+ return 0;
+ case EXIT_REASON_TPR_BELOW_THRESHOLD:
+ return 1;
+ case EXIT_REASON_APIC_ACCESS:
+ return nested_cpu_has2(vmcs12,
+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
+ case EXIT_REASON_EPT_VIOLATION:
+ case EXIT_REASON_EPT_MISCONFIG:
+ return 0;
+ case EXIT_REASON_WBINVD:
+ return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING);
+ case EXIT_REASON_XSETBV:
+ return 1;
+ default:
+ return 1;
+ }
+}
+
static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
{
*info1 = vmcs_readl(EXIT_QUALIFICATION);
@@ -3933,6 +5702,25 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
if (vmx->emulation_required && emulate_invalid_guest_state)
return handle_invalid_guest_state(vcpu);
+ /*
+ * the KVM_REQ_EVENT optimization bit is only on for one entry, and if
+ * we did not inject a still-pending event to L1 now because of
+ * nested_run_pending, we need to re-enable this bit.
+ */
+ if (vmx->nested.nested_run_pending)
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
+
+ if (!is_guest_mode(vcpu) && (exit_reason == EXIT_REASON_VMLAUNCH ||
+ exit_reason == EXIT_REASON_VMRESUME))
+ vmx->nested.nested_run_pending = 1;
+ else
+ vmx->nested.nested_run_pending = 0;
+
+ if (is_guest_mode(vcpu) && nested_vmx_exit_handled(vcpu)) {
+ nested_vmx_vmexit(vcpu);
+ return 1;
+ }
+
if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) {
vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
vcpu->run->fail_entry.hardware_entry_failure_reason
@@ -3955,7 +5743,9 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
"(0x%x) and exit reason is 0x%x\n",
__func__, vectoring_info, exit_reason);
- if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) {
+ if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked &&
+ !(is_guest_mode(vcpu) && nested_cpu_has_virtual_nmis(
+ get_vmcs12(vcpu), vcpu)))) {
if (vmx_interrupt_allowed(vcpu)) {
vmx->soft_vnmi_blocked = 0;
} else if (vmx->vnmi_blocked_time > 1000000000LL &&
@@ -4118,6 +5908,8 @@ static void __vmx_complete_interrupts(struct vcpu_vmx *vmx,
static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
{
+ if (is_guest_mode(&vmx->vcpu))
+ return;
__vmx_complete_interrupts(vmx, vmx->idt_vectoring_info,
VM_EXIT_INSTRUCTION_LEN,
IDT_VECTORING_ERROR_CODE);
@@ -4125,6 +5917,8 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
static void vmx_cancel_injection(struct kvm_vcpu *vcpu)
{
+ if (is_guest_mode(vcpu))
+ return;
__vmx_complete_interrupts(to_vmx(vcpu),
vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
VM_ENTRY_INSTRUCTION_LEN,
@@ -4145,6 +5939,21 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ if (is_guest_mode(vcpu) && !vmx->nested.nested_run_pending) {
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ if (vmcs12->idt_vectoring_info_field &
+ VECTORING_INFO_VALID_MASK) {
+ vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
+ vmcs12->idt_vectoring_info_field);
+ vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
+ vmcs12->vm_exit_instruction_len);
+ if (vmcs12->idt_vectoring_info_field &
+ VECTORING_INFO_DELIVER_CODE_MASK)
+ vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
+ vmcs12->idt_vectoring_error_code);
+ }
+ }
+
/* Record the guest's net vcpu time for enforced NMI injections. */
if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked))
vmx->entry_time = ktime_get();
@@ -4167,6 +5976,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
vmx_set_interrupt_shadow(vcpu, 0);
+ vmx->__launched = vmx->loaded_vmcs->launched;
asm(
/* Store host registers */
"push %%"R"dx; push %%"R"bp;"
@@ -4237,7 +6047,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
"pop %%"R"bp; pop %%"R"dx \n\t"
"setbe %c[fail](%0) \n\t"
: : "c"(vmx), "d"((unsigned long)HOST_RSP),
- [launched]"i"(offsetof(struct vcpu_vmx, launched)),
+ [launched]"i"(offsetof(struct vcpu_vmx, __launched)),
[fail]"i"(offsetof(struct vcpu_vmx, fail)),
[host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)),
[rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])),
@@ -4276,8 +6086,19 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
+ if (is_guest_mode(vcpu)) {
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ vmcs12->idt_vectoring_info_field = vmx->idt_vectoring_info;
+ if (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK) {
+ vmcs12->idt_vectoring_error_code =
+ vmcs_read32(IDT_VECTORING_ERROR_CODE);
+ vmcs12->vm_exit_instruction_len =
+ vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
+ }
+ }
+
asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
- vmx->launched = 1;
+ vmx->loaded_vmcs->launched = 1;
vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
@@ -4289,41 +6110,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
#undef R
#undef Q
-static void vmx_free_vmcs(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
-
- if (vmx->vmcs) {
- vcpu_clear(vmx);
- free_vmcs(vmx->vmcs);
- vmx->vmcs = NULL;
- }
-}
-
static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
free_vpid(vmx);
- vmx_free_vmcs(vcpu);
+ free_nested(vmx);
+ free_loaded_vmcs(vmx->loaded_vmcs);
kfree(vmx->guest_msrs);
kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, vmx);
}
-static inline void vmcs_init(struct vmcs *vmcs)
-{
- u64 phys_addr = __pa(per_cpu(vmxarea, raw_smp_processor_id()));
-
- if (!vmm_exclusive)
- kvm_cpu_vmxon(phys_addr);
-
- vmcs_clear(vmcs);
-
- if (!vmm_exclusive)
- kvm_cpu_vmxoff();
-}
-
static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
{
int err;
@@ -4345,11 +6143,15 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
goto uninit_vcpu;
}
- vmx->vmcs = alloc_vmcs();
- if (!vmx->vmcs)
+ vmx->loaded_vmcs = &vmx->vmcs01;
+ vmx->loaded_vmcs->vmcs = alloc_vmcs();
+ if (!vmx->loaded_vmcs->vmcs)
goto free_msrs;
-
- vmcs_init(vmx->vmcs);
+ if (!vmm_exclusive)
+ kvm_cpu_vmxon(__pa(per_cpu(vmxarea, raw_smp_processor_id())));
+ loaded_vmcs_init(vmx->loaded_vmcs);
+ if (!vmm_exclusive)
+ kvm_cpu_vmxoff();
cpu = get_cpu();
vmx_vcpu_load(&vmx->vcpu, cpu);
@@ -4375,10 +6177,13 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
goto free_vmcs;
}
+ vmx->nested.current_vmptr = -1ull;
+ vmx->nested.current_vmcs12 = NULL;
+
return &vmx->vcpu;
free_vmcs:
- free_vmcs(vmx->vmcs);
+ free_vmcs(vmx->loaded_vmcs->vmcs);
free_msrs:
kfree(vmx->guest_msrs);
uninit_vcpu:
@@ -4512,6 +6317,650 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
{
+ if (func == 1 && nested)
+ entry->ecx |= bit(X86_FEATURE_VMX);
+}
+
+/*
+ * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
+ * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
+ * with L0's requirements for its guest (a.k.a. vmsc01), so we can run the L2
+ * guest in a way that will both be appropriate to L1's requests, and our
+ * needs. In addition to modifying the active vmcs (which is vmcs02), this
+ * function also has additional necessary side-effects, like setting various
+ * vcpu->arch fields.
+ */
+static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ u32 exec_control;
+
+ vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
+ vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
+ vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector);
+ vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector);
+ vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector);
+ vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector);
+ vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector);
+ vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector);
+ vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit);
+ vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit);
+ vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit);
+ vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit);
+ vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit);
+ vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit);
+ vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit);
+ vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit);
+ vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit);
+ vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit);
+ vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes);
+ vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes);
+ vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes);
+ vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes);
+ vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes);
+ vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes);
+ vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes);
+ vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes);
+ vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base);
+ vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base);
+ vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base);
+ vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base);
+ vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base);
+ vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base);
+ vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base);
+ vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
+ vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
+ vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
+
+ vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
+ vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
+ vmcs12->vm_entry_intr_info_field);
+ vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
+ vmcs12->vm_entry_exception_error_code);
+ vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
+ vmcs12->vm_entry_instruction_len);
+ vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
+ vmcs12->guest_interruptibility_info);
+ vmcs_write32(GUEST_ACTIVITY_STATE, vmcs12->guest_activity_state);
+ vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
+ vmcs_writel(GUEST_DR7, vmcs12->guest_dr7);
+ vmcs_writel(GUEST_RFLAGS, vmcs12->guest_rflags);
+ vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
+ vmcs12->guest_pending_dbg_exceptions);
+ vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp);
+ vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip);
+
+ vmcs_write64(VMCS_LINK_POINTER, -1ull);
+
+ vmcs_write32(PIN_BASED_VM_EXEC_CONTROL,
+ (vmcs_config.pin_based_exec_ctrl |
+ vmcs12->pin_based_vm_exec_control));
+
+ /*
+ * Whether page-faults are trapped is determined by a combination of
+ * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF.
+ * If enable_ept, L0 doesn't care about page faults and we should
+ * set all of these to L1's desires. However, if !enable_ept, L0 does
+ * care about (at least some) page faults, and because it is not easy
+ * (if at all possible?) to merge L0 and L1's desires, we simply ask
+ * to exit on each and every L2 page fault. This is done by setting
+ * MASK=MATCH=0 and (see below) EB.PF=1.
+ * Note that below we don't need special code to set EB.PF beyond the
+ * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
+ * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
+ * !enable_ept, EB.PF is 1, so the "or" will always be 1.
+ *
+ * A problem with this approach (when !enable_ept) is that L1 may be
+ * injected with more page faults than it asked for. This could have
+ * caused problems, but in practice existing hypervisors don't care.
+ * To fix this, we will need to emulate the PFEC checking (on the L1
+ * page tables), using walk_addr(), when injecting PFs to L1.
+ */
+ vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK,
+ enable_ept ? vmcs12->page_fault_error_code_mask : 0);
+ vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH,
+ enable_ept ? vmcs12->page_fault_error_code_match : 0);
+
+ if (cpu_has_secondary_exec_ctrls()) {
+ u32 exec_control = vmx_secondary_exec_control(vmx);
+ if (!vmx->rdtscp_enabled)
+ exec_control &= ~SECONDARY_EXEC_RDTSCP;
+ /* Take the following fields only from vmcs12 */
+ exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+ if (nested_cpu_has(vmcs12,
+ CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
+ exec_control |= vmcs12->secondary_vm_exec_control;
+
+ if (exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) {
+ /*
+ * Translate L1 physical address to host physical
+ * address for vmcs02. Keep the page pinned, so this
+ * physical address remains valid. We keep a reference
+ * to it so we can release it later.
+ */
+ if (vmx->nested.apic_access_page) /* shouldn't happen */
+ nested_release_page(vmx->nested.apic_access_page);
+ vmx->nested.apic_access_page =
+ nested_get_page(vcpu, vmcs12->apic_access_addr);
+ /*
+ * If translation failed, no matter: This feature asks
+ * to exit when accessing the given address, and if it
+ * can never be accessed, this feature won't do
+ * anything anyway.
+ */
+ if (!vmx->nested.apic_access_page)
+ exec_control &=
+ ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+ else
+ vmcs_write64(APIC_ACCESS_ADDR,
+ page_to_phys(vmx->nested.apic_access_page));
+ }
+
+ vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
+ }
+
+
+ /*
+ * Set host-state according to L0's settings (vmcs12 is irrelevant here)
+ * Some constant fields are set here by vmx_set_constant_host_state().
+ * Other fields are different per CPU, and will be set later when
+ * vmx_vcpu_load() is called, and when vmx_save_host_state() is called.
+ */
+ vmx_set_constant_host_state();
+
+ /*
+ * HOST_RSP is normally set correctly in vmx_vcpu_run() just before
+ * entry, but only if the current (host) sp changed from the value
+ * we wrote last (vmx->host_rsp). This cache is no longer relevant
+ * if we switch vmcs, and rather than hold a separate cache per vmcs,
+ * here we just force the write to happen on entry.
+ */
+ vmx->host_rsp = 0;
+
+ exec_control = vmx_exec_control(vmx); /* L0's desires */
+ exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
+ exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
+ exec_control &= ~CPU_BASED_TPR_SHADOW;
+ exec_control |= vmcs12->cpu_based_vm_exec_control;
+ /*
+ * Merging of IO and MSR bitmaps not currently supported.
+ * Rather, exit every time.
+ */
+ exec_control &= ~CPU_BASED_USE_MSR_BITMAPS;
+ exec_control &= ~CPU_BASED_USE_IO_BITMAPS;
+ exec_control |= CPU_BASED_UNCOND_IO_EXITING;
+
+ vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
+
+ /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the
+ * bitwise-or of what L1 wants to trap for L2, and what we want to
+ * trap. Note that CR0.TS also needs updating - we do this later.
+ */
+ update_exception_bitmap(vcpu);
+ vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask;
+ vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
+
+ /* Note: IA32_MODE, LOAD_IA32_EFER are modified by vmx_set_efer below */
+ vmcs_write32(VM_EXIT_CONTROLS,
+ vmcs12->vm_exit_controls | vmcs_config.vmexit_ctrl);
+ vmcs_write32(VM_ENTRY_CONTROLS, vmcs12->vm_entry_controls |
+ (vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE));
+
+ if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)
+ vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat);
+ else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
+ vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
+
+
+ set_cr4_guest_host_mask(vmx);
+
+ vmcs_write64(TSC_OFFSET,
+ vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset);
+
+ if (enable_vpid) {
+ /*
+ * Trivially support vpid by letting L2s share their parent
+ * L1's vpid. TODO: move to a more elaborate solution, giving
+ * each L2 its own vpid and exposing the vpid feature to L1.
+ */
+ vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
+ vmx_flush_tlb(vcpu);
+ }
+
+ if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)
+ vcpu->arch.efer = vmcs12->guest_ia32_efer;
+ if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
+ vcpu->arch.efer |= (EFER_LMA | EFER_LME);
+ else
+ vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
+ /* Note: modifies VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
+ vmx_set_efer(vcpu, vcpu->arch.efer);
+
+ /*
+ * This sets GUEST_CR0 to vmcs12->guest_cr0, with possibly a modified
+ * TS bit (for lazy fpu) and bits which we consider mandatory enabled.
+ * The CR0_READ_SHADOW is what L2 should have expected to read given
+ * the specifications by L1; It's not enough to take
+ * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we
+ * have more bits than L1 expected.
+ */
+ vmx_set_cr0(vcpu, vmcs12->guest_cr0);
+ vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
+
+ vmx_set_cr4(vcpu, vmcs12->guest_cr4);
+ vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12));
+
+ /* shadow page tables on either EPT or shadow page tables */
+ kvm_set_cr3(vcpu, vmcs12->guest_cr3);
+ kvm_mmu_reset_context(vcpu);
+
+ kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp);
+ kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip);
+}
+
+/*
+ * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1
+ * for running an L2 nested guest.
+ */
+static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
+{
+ struct vmcs12 *vmcs12;
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ int cpu;
+ struct loaded_vmcs *vmcs02;
+
+ if (!nested_vmx_check_permission(vcpu) ||
+ !nested_vmx_check_vmcs12(vcpu))
+ return 1;
+
+ skip_emulated_instruction(vcpu);
+ vmcs12 = get_vmcs12(vcpu);
+
+ /*
+ * The nested entry process starts with enforcing various prerequisites
+ * on vmcs12 as required by the Intel SDM, and act appropriately when
+ * they fail: As the SDM explains, some conditions should cause the
+ * instruction to fail, while others will cause the instruction to seem
+ * to succeed, but return an EXIT_REASON_INVALID_STATE.
+ * To speed up the normal (success) code path, we should avoid checking
+ * for misconfigurations which will anyway be caught by the processor
+ * when using the merged vmcs02.
+ */
+ if (vmcs12->launch_state == launch) {
+ nested_vmx_failValid(vcpu,
+ launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
+ : VMXERR_VMRESUME_NONLAUNCHED_VMCS);
+ return 1;
+ }
+
+ if ((vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_MSR_BITMAPS) &&
+ !IS_ALIGNED(vmcs12->msr_bitmap, PAGE_SIZE)) {
+ /*TODO: Also verify bits beyond physical address width are 0*/
+ nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
+ return 1;
+ }
+
+ if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) &&
+ !IS_ALIGNED(vmcs12->apic_access_addr, PAGE_SIZE)) {
+ /*TODO: Also verify bits beyond physical address width are 0*/
+ nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
+ return 1;
+ }
+
+ if (vmcs12->vm_entry_msr_load_count > 0 ||
+ vmcs12->vm_exit_msr_load_count > 0 ||
+ vmcs12->vm_exit_msr_store_count > 0) {
+ if (printk_ratelimit())
+ printk(KERN_WARNING
+ "%s: VMCS MSR_{LOAD,STORE} unsupported\n", __func__);
+ nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
+ return 1;
+ }
+
+ if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
+ nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high) ||
+ !vmx_control_verify(vmcs12->secondary_vm_exec_control,
+ nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high) ||
+ !vmx_control_verify(vmcs12->pin_based_vm_exec_control,
+ nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high) ||
+ !vmx_control_verify(vmcs12->vm_exit_controls,
+ nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high) ||
+ !vmx_control_verify(vmcs12->vm_entry_controls,
+ nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high))
+ {
+ nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
+ return 1;
+ }
+
+ if (((vmcs12->host_cr0 & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON) ||
+ ((vmcs12->host_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) {
+ nested_vmx_failValid(vcpu,
+ VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
+ return 1;
+ }
+
+ if (((vmcs12->guest_cr0 & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON) ||
+ ((vmcs12->guest_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) {
+ nested_vmx_entry_failure(vcpu, vmcs12,
+ EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
+ return 1;
+ }
+ if (vmcs12->vmcs_link_pointer != -1ull) {
+ nested_vmx_entry_failure(vcpu, vmcs12,
+ EXIT_REASON_INVALID_STATE, ENTRY_FAIL_VMCS_LINK_PTR);
+ return 1;
+ }
+
+ /*
+ * We're finally done with prerequisite checking, and can start with
+ * the nested entry.
+ */
+
+ vmcs02 = nested_get_current_vmcs02(vmx);
+ if (!vmcs02)
+ return -ENOMEM;
+
+ enter_guest_mode(vcpu);
+
+ vmx->nested.vmcs01_tsc_offset = vmcs_read64(TSC_OFFSET);
+
+ cpu = get_cpu();
+ vmx->loaded_vmcs = vmcs02;
+ vmx_vcpu_put(vcpu);
+ vmx_vcpu_load(vcpu, cpu);
+ vcpu->cpu = cpu;
+ put_cpu();
+
+ vmcs12->launch_state = 1;
+
+ prepare_vmcs02(vcpu, vmcs12);
+
+ /*
+ * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
+ * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
+ * returned as far as L1 is concerned. It will only return (and set
+ * the success flag) when L2 exits (see nested_vmx_vmexit()).
+ */
+ return 1;
+}
+
+/*
+ * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date
+ * because L2 may have changed some cr0 bits directly (CRO_GUEST_HOST_MASK).
+ * This function returns the new value we should put in vmcs12.guest_cr0.
+ * It's not enough to just return the vmcs02 GUEST_CR0. Rather,
+ * 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now
+ * available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0
+ * didn't trap the bit, because if L1 did, so would L0).
+ * 2. Bits that L1 asked to trap (and therefore L0 also did) could not have
+ * been modified by L2, and L1 knows it. So just leave the old value of
+ * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0
+ * isn't relevant, because if L0 traps this bit it can set it to anything.
+ * 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have
+ * changed these bits, and therefore they need to be updated, but L0
+ * didn't necessarily allow them to be changed in GUEST_CR0 - and rather
+ * put them in vmcs02 CR0_READ_SHADOW. So take these bits from there.
+ */
+static inline unsigned long
+vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
+{
+ return
+ /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) |
+ /*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) |
+ /*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask |
+ vcpu->arch.cr0_guest_owned_bits));
+}
+
+static inline unsigned long
+vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
+{
+ return
+ /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) |
+ /*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) |
+ /*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask |
+ vcpu->arch.cr4_guest_owned_bits));
+}
+
+/*
+ * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits
+ * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12),
+ * and this function updates it to reflect the changes to the guest state while
+ * L2 was running (and perhaps made some exits which were handled directly by L0
+ * without going back to L1), and to reflect the exit reason.
+ * Note that we do not have to copy here all VMCS fields, just those that
+ * could have changed by the L2 guest or the exit - i.e., the guest-state and
+ * exit-information fields only. Other fields are modified by L1 with VMWRITE,
+ * which already writes to vmcs12 directly.
+ */
+void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
+{
+ /* update guest state fields: */
+ vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
+ vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
+
+ kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7);
+ vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
+ vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP);
+ vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS);
+
+ vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR);
+ vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR);
+ vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR);
+ vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR);
+ vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR);
+ vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR);
+ vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR);
+ vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR);
+ vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT);
+ vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT);
+ vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT);
+ vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT);
+ vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT);
+ vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT);
+ vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT);
+ vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT);
+ vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT);
+ vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT);
+ vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES);
+ vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES);
+ vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES);
+ vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES);
+ vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES);
+ vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES);
+ vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES);
+ vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES);
+ vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE);
+ vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE);
+ vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE);
+ vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE);
+ vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE);
+ vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE);
+ vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE);
+ vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE);
+ vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE);
+ vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE);
+
+ vmcs12->guest_activity_state = vmcs_read32(GUEST_ACTIVITY_STATE);
+ vmcs12->guest_interruptibility_info =
+ vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
+ vmcs12->guest_pending_dbg_exceptions =
+ vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS);
+
+ /* TODO: These cannot have changed unless we have MSR bitmaps and
+ * the relevant bit asks not to trap the change */
+ vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
+ if (vmcs12->vm_entry_controls & VM_EXIT_SAVE_IA32_PAT)
+ vmcs12->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT);
+ vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS);
+ vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP);
+ vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP);
+
+ /* update exit information fields: */
+
+ vmcs12->vm_exit_reason = vmcs_read32(VM_EXIT_REASON);
+ vmcs12->exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+
+ vmcs12->vm_exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
+ vmcs12->vm_exit_intr_error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
+ vmcs12->idt_vectoring_info_field =
+ vmcs_read32(IDT_VECTORING_INFO_FIELD);
+ vmcs12->idt_vectoring_error_code =
+ vmcs_read32(IDT_VECTORING_ERROR_CODE);
+ vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
+ vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+
+ /* clear vm-entry fields which are to be cleared on exit */
+ if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
+ vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK;
+}
+
+/*
+ * A part of what we need to when the nested L2 guest exits and we want to
+ * run its L1 parent, is to reset L1's guest state to the host state specified
+ * in vmcs12.
+ * This function is to be called not only on normal nested exit, but also on
+ * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry
+ * Failures During or After Loading Guest State").
+ * This function should be called when the active VMCS is L1's (vmcs01).
+ */
+void load_vmcs12_host_state(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
+{
+ if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
+ vcpu->arch.efer = vmcs12->host_ia32_efer;
+ if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
+ vcpu->arch.efer |= (EFER_LMA | EFER_LME);
+ else
+ vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
+ vmx_set_efer(vcpu, vcpu->arch.efer);
+
+ kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->host_rsp);
+ kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->host_rip);
+ /*
+ * Note that calling vmx_set_cr0 is important, even if cr0 hasn't
+ * actually changed, because it depends on the current state of
+ * fpu_active (which may have changed).
+ * Note that vmx_set_cr0 refers to efer set above.
+ */
+ kvm_set_cr0(vcpu, vmcs12->host_cr0);
+ /*
+ * If we did fpu_activate()/fpu_deactivate() during L2's run, we need
+ * to apply the same changes to L1's vmcs. We just set cr0 correctly,
+ * but we also need to update cr0_guest_host_mask and exception_bitmap.
+ */
+ update_exception_bitmap(vcpu);
+ vcpu->arch.cr0_guest_owned_bits = (vcpu->fpu_active ? X86_CR0_TS : 0);
+ vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
+
+ /*
+ * Note that CR4_GUEST_HOST_MASK is already set in the original vmcs01
+ * (KVM doesn't change it)- no reason to call set_cr4_guest_host_mask();
+ */
+ vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
+ kvm_set_cr4(vcpu, vmcs12->host_cr4);
+
+ /* shadow page tables on either EPT or shadow page tables */
+ kvm_set_cr3(vcpu, vmcs12->host_cr3);
+ kvm_mmu_reset_context(vcpu);
+
+ if (enable_vpid) {
+ /*
+ * Trivially support vpid by letting L2s share their parent
+ * L1's vpid. TODO: move to a more elaborate solution, giving
+ * each L2 its own vpid and exposing the vpid feature to L1.
+ */
+ vmx_flush_tlb(vcpu);
+ }
+
+
+ vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs);
+ vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp);
+ vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
+ vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
+ vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
+ vmcs_writel(GUEST_TR_BASE, vmcs12->host_tr_base);
+ vmcs_writel(GUEST_GS_BASE, vmcs12->host_gs_base);
+ vmcs_writel(GUEST_FS_BASE, vmcs12->host_fs_base);
+ vmcs_write16(GUEST_ES_SELECTOR, vmcs12->host_es_selector);
+ vmcs_write16(GUEST_CS_SELECTOR, vmcs12->host_cs_selector);
+ vmcs_write16(GUEST_SS_SELECTOR, vmcs12->host_ss_selector);
+ vmcs_write16(GUEST_DS_SELECTOR, vmcs12->host_ds_selector);
+ vmcs_write16(GUEST_FS_SELECTOR, vmcs12->host_fs_selector);
+ vmcs_write16(GUEST_GS_SELECTOR, vmcs12->host_gs_selector);
+ vmcs_write16(GUEST_TR_SELECTOR, vmcs12->host_tr_selector);
+
+ if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT)
+ vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat);
+ if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
+ vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL,
+ vmcs12->host_ia32_perf_global_ctrl);
+}
+
+/*
+ * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1
+ * and modify vmcs12 to make it see what it would expect to see there if
+ * L2 was its real guest. Must only be called when in L2 (is_guest_mode())
+ */
+static void nested_vmx_vmexit(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ int cpu;
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+
+ leave_guest_mode(vcpu);
+ prepare_vmcs12(vcpu, vmcs12);
+
+ cpu = get_cpu();
+ vmx->loaded_vmcs = &vmx->vmcs01;
+ vmx_vcpu_put(vcpu);
+ vmx_vcpu_load(vcpu, cpu);
+ vcpu->cpu = cpu;
+ put_cpu();
+
+ /* if no vmcs02 cache requested, remove the one we used */
+ if (VMCS02_POOL_SIZE == 0)
+ nested_free_vmcs02(vmx, vmx->nested.current_vmptr);
+
+ load_vmcs12_host_state(vcpu, vmcs12);
+
+ /* Update TSC_OFFSET if vmx_adjust_tsc_offset() was used while L2 ran */
+ vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
+
+ /* This is needed for same reason as it was needed in prepare_vmcs02 */
+ vmx->host_rsp = 0;
+
+ /* Unpin physical memory we referred to in vmcs02 */
+ if (vmx->nested.apic_access_page) {
+ nested_release_page(vmx->nested.apic_access_page);
+ vmx->nested.apic_access_page = 0;
+ }
+
+ /*
+ * Exiting from L2 to L1, we're now back to L1 which thinks it just
+ * finished a VMLAUNCH or VMRESUME instruction, so we need to set the
+ * success or failure flag accordingly.
+ */
+ if (unlikely(vmx->fail)) {
+ vmx->fail = 0;
+ nested_vmx_failValid(vcpu, vmcs_read32(VM_INSTRUCTION_ERROR));
+ } else
+ nested_vmx_succeed(vcpu);
+}
+
+/*
+ * L1's failure to enter L2 is a subset of a normal exit, as explained in
+ * 23.7 "VM-entry failures during or after loading guest state" (this also
+ * lists the acceptable exit-reason and exit-qualification parameters).
+ * It should only be called before L2 actually succeeded to run, and when
+ * vmcs01 is current (it doesn't leave_guest_mode() or switch vmcss).
+ */
+static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12,
+ u32 reason, unsigned long qualification)
+{
+ load_vmcs12_host_state(vcpu, vmcs12);
+ vmcs12->vm_exit_reason = reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
+ vmcs12->exit_qualification = qualification;
+ nested_vmx_succeed(vcpu);
}
static int vmx_check_intercept(struct kvm_vcpu *vcpu,
@@ -4670,16 +7119,13 @@ static int __init vmx_init(void)
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
if (enable_ept) {
- bypass_guest_pf = 0;
kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull,
VMX_EPT_EXECUTABLE_MASK);
+ ept_set_mmio_spte_mask();
kvm_enable_tdp();
} else
kvm_disable_tdp();
- if (bypass_guest_pf)
- kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull);
-
return 0;
out3:
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 77c9d8673dc4..84a28ea45fa4 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -347,6 +347,7 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
vcpu->arch.cr2 = fault->address;
kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code);
}
+EXPORT_SYMBOL_GPL(kvm_inject_page_fault);
void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
{
@@ -579,6 +580,22 @@ static bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
return best && (best->ecx & bit(X86_FEATURE_XSAVE));
}
+static bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry(vcpu, 7, 0);
+ return best && (best->ebx & bit(X86_FEATURE_SMEP));
+}
+
+static bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry(vcpu, 7, 0);
+ return best && (best->ebx & bit(X86_FEATURE_FSGSBASE));
+}
+
static void update_cpuid(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
@@ -598,14 +615,20 @@ static void update_cpuid(struct kvm_vcpu *vcpu)
int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
unsigned long old_cr4 = kvm_read_cr4(vcpu);
- unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
-
+ unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE |
+ X86_CR4_PAE | X86_CR4_SMEP;
if (cr4 & CR4_RESERVED_BITS)
return 1;
if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE))
return 1;
+ if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP))
+ return 1;
+
+ if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_RDWRGSFS))
+ return 1;
+
if (is_long_mode(vcpu)) {
if (!(cr4 & X86_CR4_PAE))
return 1;
@@ -615,11 +638,9 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
kvm_read_cr3(vcpu)))
return 1;
- if (cr4 & X86_CR4_VMXE)
+ if (kvm_x86_ops->set_cr4(vcpu, cr4))
return 1;
- kvm_x86_ops->set_cr4(vcpu, cr4);
-
if ((cr4 ^ old_cr4) & pdptr_bits)
kvm_mmu_reset_context(vcpu);
@@ -787,12 +808,12 @@ EXPORT_SYMBOL_GPL(kvm_get_dr);
* kvm-specific. Those are put in the beginning of the list.
*/
-#define KVM_SAVE_MSRS_BEGIN 8
+#define KVM_SAVE_MSRS_BEGIN 9
static u32 msrs_to_save[] = {
MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
- HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN,
+ HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
MSR_STAR,
#ifdef CONFIG_X86_64
@@ -1388,7 +1409,7 @@ static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
return 1;
kvm_x86_ops->patch_hypercall(vcpu, instructions);
((unsigned char *)instructions)[3] = 0xc3; /* ret */
- if (copy_to_user((void __user *)addr, instructions, 4))
+ if (__copy_to_user((void __user *)addr, instructions, 4))
return 1;
kvm->arch.hv_hypercall = data;
break;
@@ -1415,7 +1436,7 @@ static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data)
HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT);
if (kvm_is_error_hva(addr))
return 1;
- if (clear_user((void __user *)addr, PAGE_SIZE))
+ if (__clear_user((void __user *)addr, PAGE_SIZE))
return 1;
vcpu->arch.hv_vapic = data;
break;
@@ -1467,6 +1488,35 @@ static void kvmclock_reset(struct kvm_vcpu *vcpu)
}
}
+static void accumulate_steal_time(struct kvm_vcpu *vcpu)
+{
+ u64 delta;
+
+ if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
+ return;
+
+ delta = current->sched_info.run_delay - vcpu->arch.st.last_steal;
+ vcpu->arch.st.last_steal = current->sched_info.run_delay;
+ vcpu->arch.st.accum_steal = delta;
+}
+
+static void record_steal_time(struct kvm_vcpu *vcpu)
+{
+ if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
+ return;
+
+ if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
+ &vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
+ return;
+
+ vcpu->arch.st.steal.steal += vcpu->arch.st.accum_steal;
+ vcpu->arch.st.steal.version += 2;
+ vcpu->arch.st.accum_steal = 0;
+
+ kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
+ &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
+}
+
int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
{
switch (msr) {
@@ -1549,6 +1599,33 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
if (kvm_pv_enable_async_pf(vcpu, data))
return 1;
break;
+ case MSR_KVM_STEAL_TIME:
+
+ if (unlikely(!sched_info_on()))
+ return 1;
+
+ if (data & KVM_STEAL_RESERVED_MASK)
+ return 1;
+
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
+ data & KVM_STEAL_VALID_BITS))
+ return 1;
+
+ vcpu->arch.st.msr_val = data;
+
+ if (!(data & KVM_MSR_ENABLED))
+ break;
+
+ vcpu->arch.st.last_steal = current->sched_info.run_delay;
+
+ preempt_disable();
+ accumulate_steal_time(vcpu);
+ preempt_enable();
+
+ kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
+
+ break;
+
case MSR_IA32_MCG_CTL:
case MSR_IA32_MCG_STATUS:
case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
@@ -1834,6 +1911,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
case MSR_KVM_ASYNC_PF_EN:
data = vcpu->arch.apf.msr_val;
break;
+ case MSR_KVM_STEAL_TIME:
+ data = vcpu->arch.st.msr_val;
+ break;
case MSR_IA32_P5_MC_ADDR:
case MSR_IA32_P5_MC_TYPE:
case MSR_IA32_MCG_CAP:
@@ -2145,6 +2225,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
kvm_migrate_timers(vcpu);
vcpu->cpu = cpu;
}
+
+ accumulate_steal_time(vcpu);
+ kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
}
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
@@ -2283,6 +2366,13 @@ static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
entry->flags = 0;
}
+static bool supported_xcr0_bit(unsigned bit)
+{
+ u64 mask = ((u64)1 << bit);
+
+ return mask & (XSTATE_FP | XSTATE_SSE | XSTATE_YMM) & host_xcr0;
+}
+
#define F(x) bit(X86_FEATURE_##x)
static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
@@ -2328,7 +2418,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
0 /* Reserved, DCA */ | F(XMM4_1) |
F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
- F(F16C);
+ F(F16C) | F(RDRAND);
/* cpuid 0x80000001.ecx */
const u32 kvm_supported_word6_x86_features =
F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
@@ -2342,6 +2432,10 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
F(PMM) | F(PMM_EN);
+ /* cpuid 7.0.ebx */
+ const u32 kvm_supported_word9_x86_features =
+ F(SMEP) | F(FSGSBASE) | F(ERMS);
+
/* all calls to cpuid_count() should be made on the same cpu */
get_cpu();
do_cpuid_1_ent(entry, function, index);
@@ -2376,7 +2470,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
}
break;
}
- /* function 4 and 0xb have additional index. */
+ /* function 4 has additional index. */
case 4: {
int i, cache_type;
@@ -2393,6 +2487,22 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
}
break;
}
+ case 7: {
+ entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+ /* Mask ebx against host capbability word 9 */
+ if (index == 0) {
+ entry->ebx &= kvm_supported_word9_x86_features;
+ cpuid_mask(&entry->ebx, 9);
+ } else
+ entry->ebx = 0;
+ entry->eax = 0;
+ entry->ecx = 0;
+ entry->edx = 0;
+ break;
+ }
+ case 9:
+ break;
+ /* function 0xb has additional index. */
case 0xb: {
int i, level_type;
@@ -2410,16 +2520,17 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
break;
}
case 0xd: {
- int i;
+ int idx, i;
entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
- for (i = 1; *nent < maxnent && i < 64; ++i) {
- if (entry[i].eax == 0)
+ for (idx = 1, i = 1; *nent < maxnent && idx < 64; ++idx) {
+ do_cpuid_1_ent(&entry[i], function, idx);
+ if (entry[i].eax == 0 || !supported_xcr0_bit(idx))
continue;
- do_cpuid_1_ent(&entry[i], function, i);
entry[i].flags |=
KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
++*nent;
+ ++i;
}
break;
}
@@ -2438,6 +2549,10 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
(1 << KVM_FEATURE_CLOCKSOURCE2) |
(1 << KVM_FEATURE_ASYNC_PF) |
(1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT);
+
+ if (sched_info_on())
+ entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
+
entry->ebx = 0;
entry->ecx = 0;
entry->edx = 0;
@@ -2451,6 +2566,24 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
entry->ecx &= kvm_supported_word6_x86_features;
cpuid_mask(&entry->ecx, 6);
break;
+ case 0x80000008: {
+ unsigned g_phys_as = (entry->eax >> 16) & 0xff;
+ unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
+ unsigned phys_as = entry->eax & 0xff;
+
+ if (!g_phys_as)
+ g_phys_as = phys_as;
+ entry->eax = g_phys_as | (virt_as << 8);
+ entry->ebx = entry->edx = 0;
+ break;
+ }
+ case 0x80000019:
+ entry->ecx = entry->edx = 0;
+ break;
+ case 0x8000001a:
+ break;
+ case 0x8000001d:
+ break;
/*Add support for Centaur's CPUID instruction*/
case 0xC0000000:
/*Just support up to 0xC0000004 now*/
@@ -2460,10 +2593,16 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
entry->edx &= kvm_supported_word5_x86_features;
cpuid_mask(&entry->edx, 5);
break;
+ case 3: /* Processor serial number */
+ case 5: /* MONITOR/MWAIT */
+ case 6: /* Thermal management */
+ case 0xA: /* Architectural Performance Monitoring */
+ case 0x80000007: /* Advanced power management */
case 0xC0000002:
case 0xC0000003:
case 0xC0000004:
- /*Now nothing to do, reserved for the future*/
+ default:
+ entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
break;
}
@@ -3817,7 +3956,7 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
exception);
}
-static int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
+int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
gva_t addr, void *val, unsigned int bytes,
struct x86_exception *exception)
{
@@ -3827,6 +3966,7 @@ static int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
exception);
}
+EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
gva_t addr, void *val, unsigned int bytes,
@@ -3836,7 +3976,7 @@ static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);
}
-static int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
+int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
gva_t addr, void *val,
unsigned int bytes,
struct x86_exception *exception)
@@ -3868,6 +4008,42 @@ static int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
out:
return r;
}
+EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system);
+
+static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
+ gpa_t *gpa, struct x86_exception *exception,
+ bool write)
+{
+ u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
+
+ if (vcpu_match_mmio_gva(vcpu, gva) &&
+ check_write_user_access(vcpu, write, access,
+ vcpu->arch.access)) {
+ *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT |
+ (gva & (PAGE_SIZE - 1));
+ trace_vcpu_match_mmio(gva, *gpa, write, false);
+ return 1;
+ }
+
+ if (write)
+ access |= PFERR_WRITE_MASK;
+
+ *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
+
+ if (*gpa == UNMAPPED_GVA)
+ return -1;
+
+ /* For APIC access vmexit */
+ if ((*gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
+ return 1;
+
+ if (vcpu_match_mmio_gpa(vcpu, *gpa)) {
+ trace_vcpu_match_mmio(gva, *gpa, write, true);
+ return 1;
+ }
+
+ return 0;
+}
static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt,
unsigned long addr,
@@ -3876,8 +4052,8 @@ static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt,
struct x86_exception *exception)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
- gpa_t gpa;
- int handled;
+ gpa_t gpa;
+ int handled, ret;
if (vcpu->mmio_read_completed) {
memcpy(val, vcpu->mmio_data, bytes);
@@ -3887,13 +4063,12 @@ static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt,
return X86EMUL_CONTINUE;
}
- gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, exception);
+ ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, false);
- if (gpa == UNMAPPED_GVA)
+ if (ret < 0)
return X86EMUL_PROPAGATE_FAULT;
- /* For APIC access vmexit */
- if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
+ if (ret)
goto mmio;
if (kvm_read_guest_virt(ctxt, addr, val, bytes, exception)
@@ -3944,16 +4119,16 @@ static int emulator_write_emulated_onepage(unsigned long addr,
struct x86_exception *exception,
struct kvm_vcpu *vcpu)
{
- gpa_t gpa;
- int handled;
+ gpa_t gpa;
+ int handled, ret;
- gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, exception);
+ ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, true);
- if (gpa == UNMAPPED_GVA)
+ if (ret < 0)
return X86EMUL_PROPAGATE_FAULT;
/* For APIC access vmexit */
- if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
+ if (ret)
goto mmio;
if (emulator_write_phys(vcpu, gpa, val, bytes))
@@ -4473,9 +4648,24 @@ static void inject_emulated_exception(struct kvm_vcpu *vcpu)
kvm_queue_exception(vcpu, ctxt->exception.vector);
}
+static void init_decode_cache(struct x86_emulate_ctxt *ctxt,
+ const unsigned long *regs)
+{
+ memset(&ctxt->twobyte, 0,
+ (void *)&ctxt->regs - (void *)&ctxt->twobyte);
+ memcpy(ctxt->regs, regs, sizeof(ctxt->regs));
+
+ ctxt->fetch.start = 0;
+ ctxt->fetch.end = 0;
+ ctxt->io_read.pos = 0;
+ ctxt->io_read.end = 0;
+ ctxt->mem_read.pos = 0;
+ ctxt->mem_read.end = 0;
+}
+
static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
{
- struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
+ struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
int cs_db, cs_l;
/*
@@ -4488,40 +4678,38 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
- vcpu->arch.emulate_ctxt.eflags = kvm_get_rflags(vcpu);
- vcpu->arch.emulate_ctxt.eip = kvm_rip_read(vcpu);
- vcpu->arch.emulate_ctxt.mode =
- (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
- (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
- ? X86EMUL_MODE_VM86 : cs_l
- ? X86EMUL_MODE_PROT64 : cs_db
- ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
- vcpu->arch.emulate_ctxt.guest_mode = is_guest_mode(vcpu);
- memset(c, 0, sizeof(struct decode_cache));
- memcpy(c->regs, vcpu->arch.regs, sizeof c->regs);
+ ctxt->eflags = kvm_get_rflags(vcpu);
+ ctxt->eip = kvm_rip_read(vcpu);
+ ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
+ (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 :
+ cs_l ? X86EMUL_MODE_PROT64 :
+ cs_db ? X86EMUL_MODE_PROT32 :
+ X86EMUL_MODE_PROT16;
+ ctxt->guest_mode = is_guest_mode(vcpu);
+
+ init_decode_cache(ctxt, vcpu->arch.regs);
vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
}
int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip)
{
- struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
+ struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
int ret;
init_emulate_ctxt(vcpu);
- vcpu->arch.emulate_ctxt.decode.op_bytes = 2;
- vcpu->arch.emulate_ctxt.decode.ad_bytes = 2;
- vcpu->arch.emulate_ctxt.decode.eip = vcpu->arch.emulate_ctxt.eip +
- inc_eip;
- ret = emulate_int_real(&vcpu->arch.emulate_ctxt, &emulate_ops, irq);
+ ctxt->op_bytes = 2;
+ ctxt->ad_bytes = 2;
+ ctxt->_eip = ctxt->eip + inc_eip;
+ ret = emulate_int_real(ctxt, irq);
if (ret != X86EMUL_CONTINUE)
return EMULATE_FAIL;
- vcpu->arch.emulate_ctxt.eip = c->eip;
- memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
- kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip);
- kvm_set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
+ ctxt->eip = ctxt->_eip;
+ memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs);
+ kvm_rip_write(vcpu, ctxt->eip);
+ kvm_set_rflags(vcpu, ctxt->eflags);
if (irq == NMI_VECTOR)
vcpu->arch.nmi_pending = false;
@@ -4582,21 +4770,21 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
int insn_len)
{
int r;
- struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
+ struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
bool writeback = true;
kvm_clear_exception_queue(vcpu);
if (!(emulation_type & EMULTYPE_NO_DECODE)) {
init_emulate_ctxt(vcpu);
- vcpu->arch.emulate_ctxt.interruptibility = 0;
- vcpu->arch.emulate_ctxt.have_exception = false;
- vcpu->arch.emulate_ctxt.perm_ok = false;
+ ctxt->interruptibility = 0;
+ ctxt->have_exception = false;
+ ctxt->perm_ok = false;
- vcpu->arch.emulate_ctxt.only_vendor_specific_insn
+ ctxt->only_vendor_specific_insn
= emulation_type & EMULTYPE_TRAP_UD;
- r = x86_decode_insn(&vcpu->arch.emulate_ctxt, insn, insn_len);
+ r = x86_decode_insn(ctxt, insn, insn_len);
trace_kvm_emulate_insn_start(vcpu);
++vcpu->stat.insn_emulation;
@@ -4612,7 +4800,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
}
if (emulation_type & EMULTYPE_SKIP) {
- kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.decode.eip);
+ kvm_rip_write(vcpu, ctxt->_eip);
return EMULATE_DONE;
}
@@ -4620,11 +4808,11 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
changes registers values during IO operation */
if (vcpu->arch.emulate_regs_need_sync_from_vcpu) {
vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
- memcpy(c->regs, vcpu->arch.regs, sizeof c->regs);
+ memcpy(ctxt->regs, vcpu->arch.regs, sizeof ctxt->regs);
}
restart:
- r = x86_emulate_insn(&vcpu->arch.emulate_ctxt);
+ r = x86_emulate_insn(ctxt);
if (r == EMULATION_INTERCEPTED)
return EMULATE_DONE;
@@ -4636,7 +4824,7 @@ restart:
return handle_emulation_failure(vcpu);
}
- if (vcpu->arch.emulate_ctxt.have_exception) {
+ if (ctxt->have_exception) {
inject_emulated_exception(vcpu);
r = EMULATE_DONE;
} else if (vcpu->arch.pio.count) {
@@ -4655,13 +4843,12 @@ restart:
r = EMULATE_DONE;
if (writeback) {
- toggle_interruptibility(vcpu,
- vcpu->arch.emulate_ctxt.interruptibility);
- kvm_set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
+ toggle_interruptibility(vcpu, ctxt->interruptibility);
+ kvm_set_rflags(vcpu, ctxt->eflags);
kvm_make_request(KVM_REQ_EVENT, vcpu);
- memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
+ memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
- kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip);
+ kvm_rip_write(vcpu, ctxt->eip);
} else
vcpu->arch.emulate_regs_need_sync_to_vcpu = true;
@@ -4878,6 +5065,30 @@ void kvm_after_handle_nmi(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
+static void kvm_set_mmio_spte_mask(void)
+{
+ u64 mask;
+ int maxphyaddr = boot_cpu_data.x86_phys_bits;
+
+ /*
+ * Set the reserved bits and the present bit of an paging-structure
+ * entry to generate page fault with PFER.RSV = 1.
+ */
+ mask = ((1ull << (62 - maxphyaddr + 1)) - 1) << maxphyaddr;
+ mask |= 1ull;
+
+#ifdef CONFIG_X86_64
+ /*
+ * If reserved bit is not supported, clear the present bit to disable
+ * mmio page fault.
+ */
+ if (maxphyaddr == 52)
+ mask &= ~1ull;
+#endif
+
+ kvm_mmu_set_mmio_spte_mask(mask);
+}
+
int kvm_arch_init(void *opaque)
{
int r;
@@ -4904,10 +5115,10 @@ int kvm_arch_init(void *opaque)
if (r)
goto out;
+ kvm_set_mmio_spte_mask();
kvm_init_msr_list();
kvm_x86_ops = ops;
- kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
PT_DIRTY_MASK, PT64_NX_MASK, 0);
@@ -5082,8 +5293,7 @@ int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
kvm_x86_ops->patch_hypercall(vcpu, instruction);
- return emulator_write_emulated(&vcpu->arch.emulate_ctxt,
- rip, instruction, 3, NULL);
+ return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);
}
static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
@@ -5384,6 +5594,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
r = 1;
goto out;
}
+ if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
+ record_steal_time(vcpu);
+
}
r = kvm_mmu_reload(vcpu);
@@ -5671,8 +5884,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
* that usually, but some bad designed PV devices (vmware
* backdoor interface) need this to work
*/
- struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
- memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
+ struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
+ memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
}
regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
@@ -5801,21 +6014,20 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
bool has_error_code, u32 error_code)
{
- struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
+ struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
int ret;
init_emulate_ctxt(vcpu);
- ret = emulator_task_switch(&vcpu->arch.emulate_ctxt,
- tss_selector, reason, has_error_code,
- error_code);
+ ret = emulator_task_switch(ctxt, tss_selector, reason,
+ has_error_code, error_code);
if (ret)
return EMULATE_FAIL;
- memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
- kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip);
- kvm_set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
+ memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs);
+ kvm_rip_write(vcpu, ctxt->eip);
+ kvm_set_rflags(vcpu, ctxt->eflags);
kvm_make_request(KVM_REQ_EVENT, vcpu);
return EMULATE_DONE;
}
@@ -6093,12 +6305,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
if (r == 0)
r = kvm_mmu_setup(vcpu);
vcpu_put(vcpu);
- if (r < 0)
- goto free_vcpu;
- return 0;
-free_vcpu:
- kvm_x86_ops->vcpu_free(vcpu);
return r;
}
@@ -6126,6 +6333,7 @@ int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu->arch.apf.msr_val = 0;
+ vcpu->arch.st.msr_val = 0;
kvmclock_reset(vcpu);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index e407ed3df817..d36fe237c665 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -75,10 +75,54 @@ static inline u32 bit(int bitno)
return 1 << (bitno & 31);
}
+static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
+ gva_t gva, gfn_t gfn, unsigned access)
+{
+ vcpu->arch.mmio_gva = gva & PAGE_MASK;
+ vcpu->arch.access = access;
+ vcpu->arch.mmio_gfn = gfn;
+}
+
+/*
+ * Clear the mmio cache info for the given gva,
+ * specially, if gva is ~0ul, we clear all mmio cache info.
+ */
+static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
+{
+ if (gva != (~0ul) && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
+ return;
+
+ vcpu->arch.mmio_gva = 0;
+}
+
+static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
+{
+ if (vcpu->arch.mmio_gva && vcpu->arch.mmio_gva == (gva & PAGE_MASK))
+ return true;
+
+ return false;
+}
+
+static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
+{
+ if (vcpu->arch.mmio_gfn && vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
+ return true;
+
+ return false;
+}
+
void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data);
+int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
+ gva_t addr, void *val, unsigned int bytes,
+ struct x86_exception *exception);
+
+int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
+ gva_t addr, void *val, unsigned int bytes,
+ struct x86_exception *exception);
+
#endif
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
index a6575b949b11..ccf73b2f3e69 100644
--- a/arch/x86/xen/Makefile
+++ b/arch/x86/xen/Makefile
@@ -13,7 +13,7 @@ CFLAGS_mmu.o := $(nostackp)
obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \
time.o xen-asm.o xen-asm_$(BITS).o \
grant-table.o suspend.o platform-pci-unplug.o \
- p2m.o
+ p2m.o trace.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 53257421082b..974a528458a0 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -341,6 +341,8 @@ static void xen_set_ldt(const void *addr, unsigned entries)
struct mmuext_op *op;
struct multicall_space mcs = xen_mc_entry(sizeof(*op));
+ trace_xen_cpu_set_ldt(addr, entries);
+
op = mcs.args;
op->cmd = MMUEXT_SET_LDT;
op->arg1.linear_addr = (unsigned long)addr;
@@ -496,6 +498,8 @@ static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum,
xmaddr_t mach_lp = arbitrary_virt_to_machine(&dt[entrynum]);
u64 entry = *(u64 *)ptr;
+ trace_xen_cpu_write_ldt_entry(dt, entrynum, entry);
+
preempt_disable();
xen_mc_flush();
@@ -565,6 +569,8 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
unsigned long p = (unsigned long)&dt[entrynum];
unsigned long start, end;
+ trace_xen_cpu_write_idt_entry(dt, entrynum, g);
+
preempt_disable();
start = __this_cpu_read(idt_desc.address);
@@ -619,6 +625,8 @@ static void xen_load_idt(const struct desc_ptr *desc)
static DEFINE_SPINLOCK(lock);
static struct trap_info traps[257];
+ trace_xen_cpu_load_idt(desc);
+
spin_lock(&lock);
__get_cpu_var(idt_desc) = *desc;
@@ -637,6 +645,8 @@ static void xen_load_idt(const struct desc_ptr *desc)
static void xen_write_gdt_entry(struct desc_struct *dt, int entry,
const void *desc, int type)
{
+ trace_xen_cpu_write_gdt_entry(dt, entry, desc, type);
+
preempt_disable();
switch (type) {
@@ -665,6 +675,8 @@ static void xen_write_gdt_entry(struct desc_struct *dt, int entry,
static void __init xen_write_gdt_entry_boot(struct desc_struct *dt, int entry,
const void *desc, int type)
{
+ trace_xen_cpu_write_gdt_entry(dt, entry, desc, type);
+
switch (type) {
case DESC_LDT:
case DESC_TSS:
@@ -684,7 +696,9 @@ static void __init xen_write_gdt_entry_boot(struct desc_struct *dt, int entry,
static void xen_load_sp0(struct tss_struct *tss,
struct thread_struct *thread)
{
- struct multicall_space mcs = xen_mc_entry(0);
+ struct multicall_space mcs;
+
+ mcs = xen_mc_entry(0);
MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->sp0);
xen_mc_issue(PARAVIRT_LAZY_CPU);
}
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 0ccccb67a993..f987bde77c49 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -48,6 +48,8 @@
#include <linux/memblock.h>
#include <linux/seq_file.h>
+#include <trace/events/xen.h>
+
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/fixmap.h>
@@ -194,6 +196,8 @@ void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
struct multicall_space mcs;
struct mmu_update *u;
+ trace_xen_mmu_set_domain_pte(ptep, pteval, domid);
+
mcs = xen_mc_entry(sizeof(*u));
u = mcs.args;
@@ -225,6 +229,24 @@ static void xen_extend_mmu_update(const struct mmu_update *update)
*u = *update;
}
+static void xen_extend_mmuext_op(const struct mmuext_op *op)
+{
+ struct multicall_space mcs;
+ struct mmuext_op *u;
+
+ mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u));
+
+ if (mcs.mc != NULL) {
+ mcs.mc->args[1]++;
+ } else {
+ mcs = __xen_mc_entry(sizeof(*u));
+ MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
+ }
+
+ u = mcs.args;
+ *u = *op;
+}
+
static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
{
struct mmu_update u;
@@ -245,6 +267,8 @@ static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
static void xen_set_pmd(pmd_t *ptr, pmd_t val)
{
+ trace_xen_mmu_set_pmd(ptr, val);
+
/* If page is not pinned, we can just update the entry
directly */
if (!xen_page_pinned(ptr)) {
@@ -282,22 +306,30 @@ static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
return true;
}
-static void xen_set_pte(pte_t *ptep, pte_t pteval)
+static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
{
if (!xen_batched_set_pte(ptep, pteval))
native_set_pte(ptep, pteval);
}
+static void xen_set_pte(pte_t *ptep, pte_t pteval)
+{
+ trace_xen_mmu_set_pte(ptep, pteval);
+ __xen_set_pte(ptep, pteval);
+}
+
static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pteval)
{
- xen_set_pte(ptep, pteval);
+ trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval);
+ __xen_set_pte(ptep, pteval);
}
pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
/* Just return the pte as-is. We preserve the bits on commit */
+ trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep);
return *ptep;
}
@@ -306,6 +338,7 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
{
struct mmu_update u;
+ trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte);
xen_mc_batch();
u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
@@ -530,6 +563,8 @@ static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
static void xen_set_pud(pud_t *ptr, pud_t val)
{
+ trace_xen_mmu_set_pud(ptr, val);
+
/* If page is not pinned, we can just update the entry
directly */
if (!xen_page_pinned(ptr)) {
@@ -543,17 +578,20 @@ static void xen_set_pud(pud_t *ptr, pud_t val)
#ifdef CONFIG_X86_PAE
static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
{
+ trace_xen_mmu_set_pte_atomic(ptep, pte);
set_64bit((u64 *)ptep, native_pte_val(pte));
}
static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
+ trace_xen_mmu_pte_clear(mm, addr, ptep);
if (!xen_batched_set_pte(ptep, native_make_pte(0)))
native_pte_clear(mm, addr, ptep);
}
static void xen_pmd_clear(pmd_t *pmdp)
{
+ trace_xen_mmu_pmd_clear(pmdp);
set_pmd(pmdp, __pmd(0));
}
#endif /* CONFIG_X86_PAE */
@@ -629,6 +667,8 @@ static void xen_set_pgd(pgd_t *ptr, pgd_t val)
{
pgd_t *user_ptr = xen_get_user_pgd(ptr);
+ trace_xen_mmu_set_pgd(ptr, user_ptr, val);
+
/* If page is not pinned, we can just update the entry
directly */
if (!xen_page_pinned(ptr)) {
@@ -788,14 +828,12 @@ static void xen_pte_unlock(void *v)
static void xen_do_pin(unsigned level, unsigned long pfn)
{
- struct mmuext_op *op;
- struct multicall_space mcs;
+ struct mmuext_op op;
- mcs = __xen_mc_entry(sizeof(*op));
- op = mcs.args;
- op->cmd = level;
- op->arg1.mfn = pfn_to_mfn(pfn);
- MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
+ op.cmd = level;
+ op.arg1.mfn = pfn_to_mfn(pfn);
+
+ xen_extend_mmuext_op(&op);
}
static int xen_pin_page(struct mm_struct *mm, struct page *page,
@@ -863,6 +901,8 @@ static int xen_pin_page(struct mm_struct *mm, struct page *page,
read-only, and can be pinned. */
static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
{
+ trace_xen_mmu_pgd_pin(mm, pgd);
+
xen_mc_batch();
if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
@@ -988,6 +1028,8 @@ static int xen_unpin_page(struct mm_struct *mm, struct page *page,
/* Release a pagetables pages back as normal RW */
static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
{
+ trace_xen_mmu_pgd_unpin(mm, pgd);
+
xen_mc_batch();
xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
@@ -1196,6 +1238,8 @@ static void xen_flush_tlb(void)
struct mmuext_op *op;
struct multicall_space mcs;
+ trace_xen_mmu_flush_tlb(0);
+
preempt_disable();
mcs = xen_mc_entry(sizeof(*op));
@@ -1214,6 +1258,8 @@ static void xen_flush_tlb_single(unsigned long addr)
struct mmuext_op *op;
struct multicall_space mcs;
+ trace_xen_mmu_flush_tlb_single(addr);
+
preempt_disable();
mcs = xen_mc_entry(sizeof(*op));
@@ -1240,6 +1286,8 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
} *args;
struct multicall_space mcs;
+ trace_xen_mmu_flush_tlb_others(cpus, mm, va);
+
if (cpumask_empty(cpus))
return; /* nothing to do */
@@ -1275,10 +1323,11 @@ static void set_current_cr3(void *v)
static void __xen_write_cr3(bool kernel, unsigned long cr3)
{
- struct mmuext_op *op;
- struct multicall_space mcs;
+ struct mmuext_op op;
unsigned long mfn;
+ trace_xen_mmu_write_cr3(kernel, cr3);
+
if (cr3)
mfn = pfn_to_mfn(PFN_DOWN(cr3));
else
@@ -1286,13 +1335,10 @@ static void __xen_write_cr3(bool kernel, unsigned long cr3)
WARN_ON(mfn == 0 && kernel);
- mcs = __xen_mc_entry(sizeof(*op));
-
- op = mcs.args;
- op->cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
- op->arg1.mfn = mfn;
+ op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
+ op.arg1.mfn = mfn;
- MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
+ xen_extend_mmuext_op(&op);
if (kernel) {
percpu_write(xen_cr3, cr3);
@@ -1451,19 +1497,52 @@ static void __init xen_release_pmd_init(unsigned long pfn)
make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
}
+static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
+{
+ struct multicall_space mcs;
+ struct mmuext_op *op;
+
+ mcs = __xen_mc_entry(sizeof(*op));
+ op = mcs.args;
+ op->cmd = cmd;
+ op->arg1.mfn = pfn_to_mfn(pfn);
+
+ MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
+}
+
+static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot)
+{
+ struct multicall_space mcs;
+ unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT);
+
+ mcs = __xen_mc_entry(0);
+ MULTI_update_va_mapping(mcs.mc, (unsigned long)addr,
+ pfn_pte(pfn, prot), 0);
+}
+
/* This needs to make sure the new pte page is pinned iff its being
attached to a pinned pagetable. */
-static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned level)
+static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
+ unsigned level)
{
- struct page *page = pfn_to_page(pfn);
+ bool pinned = PagePinned(virt_to_page(mm->pgd));
+
+ trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned);
+
+ if (pinned) {
+ struct page *page = pfn_to_page(pfn);
- if (PagePinned(virt_to_page(mm->pgd))) {
SetPagePinned(page);
if (!PageHighMem(page)) {
- make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn)));
+ xen_mc_batch();
+
+ __set_pfn_prot(pfn, PAGE_KERNEL_RO);
+
if (level == PT_PTE && USE_SPLIT_PTLOCKS)
- pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
+ __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
+
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
} else {
/* make sure there are no stray mappings of
this page */
@@ -1483,15 +1562,23 @@ static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
}
/* This should never happen until we're OK to use struct page */
-static void xen_release_ptpage(unsigned long pfn, unsigned level)
+static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
{
struct page *page = pfn_to_page(pfn);
+ bool pinned = PagePinned(page);
- if (PagePinned(page)) {
+ trace_xen_mmu_release_ptpage(pfn, level, pinned);
+
+ if (pinned) {
if (!PageHighMem(page)) {
+ xen_mc_batch();
+
if (level == PT_PTE && USE_SPLIT_PTLOCKS)
- pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
- make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
+ __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
+
+ __set_pfn_prot(pfn, PAGE_KERNEL);
+
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
}
ClearPagePinned(page);
}
diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c
index 1b2b73ff0a6e..0d82003e76ad 100644
--- a/arch/x86/xen/multicalls.c
+++ b/arch/x86/xen/multicalls.c
@@ -30,12 +30,13 @@
#define MC_BATCH 32
-#define MC_DEBUG 1
+#define MC_DEBUG 0
#define MC_ARGS (MC_BATCH * 16)
struct mc_buffer {
+ unsigned mcidx, argidx, cbidx;
struct multicall_entry entries[MC_BATCH];
#if MC_DEBUG
struct multicall_entry debug[MC_BATCH];
@@ -46,85 +47,15 @@ struct mc_buffer {
void (*fn)(void *);
void *data;
} callbacks[MC_BATCH];
- unsigned mcidx, argidx, cbidx;
};
static DEFINE_PER_CPU(struct mc_buffer, mc_buffer);
DEFINE_PER_CPU(unsigned long, xen_mc_irq_flags);
-/* flush reasons 0- slots, 1- args, 2- callbacks */
-enum flush_reasons
-{
- FL_SLOTS,
- FL_ARGS,
- FL_CALLBACKS,
-
- FL_N_REASONS
-};
-
-#ifdef CONFIG_XEN_DEBUG_FS
-#define NHYPERCALLS 40 /* not really */
-
-static struct {
- unsigned histo[MC_BATCH+1];
-
- unsigned issued;
- unsigned arg_total;
- unsigned hypercalls;
- unsigned histo_hypercalls[NHYPERCALLS];
-
- unsigned flush[FL_N_REASONS];
-} mc_stats;
-
-static u8 zero_stats;
-
-static inline void check_zero(void)
-{
- if (unlikely(zero_stats)) {
- memset(&mc_stats, 0, sizeof(mc_stats));
- zero_stats = 0;
- }
-}
-
-static void mc_add_stats(const struct mc_buffer *mc)
-{
- int i;
-
- check_zero();
-
- mc_stats.issued++;
- mc_stats.hypercalls += mc->mcidx;
- mc_stats.arg_total += mc->argidx;
-
- mc_stats.histo[mc->mcidx]++;
- for(i = 0; i < mc->mcidx; i++) {
- unsigned op = mc->entries[i].op;
- if (op < NHYPERCALLS)
- mc_stats.histo_hypercalls[op]++;
- }
-}
-
-static void mc_stats_flush(enum flush_reasons idx)
-{
- check_zero();
-
- mc_stats.flush[idx]++;
-}
-
-#else /* !CONFIG_XEN_DEBUG_FS */
-
-static inline void mc_add_stats(const struct mc_buffer *mc)
-{
-}
-
-static inline void mc_stats_flush(enum flush_reasons idx)
-{
-}
-#endif /* CONFIG_XEN_DEBUG_FS */
-
void xen_mc_flush(void)
{
struct mc_buffer *b = &__get_cpu_var(mc_buffer);
+ struct multicall_entry *mc;
int ret = 0;
unsigned long flags;
int i;
@@ -135,9 +66,26 @@ void xen_mc_flush(void)
something in the middle */
local_irq_save(flags);
- mc_add_stats(b);
+ trace_xen_mc_flush(b->mcidx, b->argidx, b->cbidx);
+
+ switch (b->mcidx) {
+ case 0:
+ /* no-op */
+ BUG_ON(b->argidx != 0);
+ break;
+
+ case 1:
+ /* Singleton multicall - bypass multicall machinery
+ and just do the call directly. */
+ mc = &b->entries[0];
+
+ mc->result = privcmd_call(mc->op,
+ mc->args[0], mc->args[1], mc->args[2],
+ mc->args[3], mc->args[4]);
+ ret = mc->result < 0;
+ break;
- if (b->mcidx) {
+ default:
#if MC_DEBUG
memcpy(b->debug, b->entries,
b->mcidx * sizeof(struct multicall_entry));
@@ -164,11 +112,10 @@ void xen_mc_flush(void)
}
}
#endif
+ }
- b->mcidx = 0;
- b->argidx = 0;
- } else
- BUG_ON(b->argidx != 0);
+ b->mcidx = 0;
+ b->argidx = 0;
for (i = 0; i < b->cbidx; i++) {
struct callback *cb = &b->callbacks[i];
@@ -188,18 +135,21 @@ struct multicall_space __xen_mc_entry(size_t args)
struct multicall_space ret;
unsigned argidx = roundup(b->argidx, sizeof(u64));
+ trace_xen_mc_entry_alloc(args);
+
BUG_ON(preemptible());
BUG_ON(b->argidx >= MC_ARGS);
- if (b->mcidx == MC_BATCH ||
- (argidx + args) >= MC_ARGS) {
- mc_stats_flush(b->mcidx == MC_BATCH ? FL_SLOTS : FL_ARGS);
+ if (unlikely(b->mcidx == MC_BATCH ||
+ (argidx + args) >= MC_ARGS)) {
+ trace_xen_mc_flush_reason((b->mcidx == MC_BATCH) ?
+ XEN_MC_FL_BATCH : XEN_MC_FL_ARGS);
xen_mc_flush();
argidx = roundup(b->argidx, sizeof(u64));
}
ret.mc = &b->entries[b->mcidx];
-#ifdef MC_DEBUG
+#if MC_DEBUG
b->caller[b->mcidx] = __builtin_return_address(0);
#endif
b->mcidx++;
@@ -218,20 +168,25 @@ struct multicall_space xen_mc_extend_args(unsigned long op, size_t size)
BUG_ON(preemptible());
BUG_ON(b->argidx >= MC_ARGS);
- if (b->mcidx == 0)
- return ret;
-
- if (b->entries[b->mcidx - 1].op != op)
- return ret;
+ if (unlikely(b->mcidx == 0 ||
+ b->entries[b->mcidx - 1].op != op)) {
+ trace_xen_mc_extend_args(op, size, XEN_MC_XE_BAD_OP);
+ goto out;
+ }
- if ((b->argidx + size) >= MC_ARGS)
- return ret;
+ if (unlikely((b->argidx + size) >= MC_ARGS)) {
+ trace_xen_mc_extend_args(op, size, XEN_MC_XE_NO_SPACE);
+ goto out;
+ }
ret.mc = &b->entries[b->mcidx - 1];
ret.args = &b->args[b->argidx];
b->argidx += size;
BUG_ON(b->argidx >= MC_ARGS);
+
+ trace_xen_mc_extend_args(op, size, XEN_MC_XE_OK);
+out:
return ret;
}
@@ -241,43 +196,13 @@ void xen_mc_callback(void (*fn)(void *), void *data)
struct callback *cb;
if (b->cbidx == MC_BATCH) {
- mc_stats_flush(FL_CALLBACKS);
+ trace_xen_mc_flush_reason(XEN_MC_FL_CALLBACK);
xen_mc_flush();
}
+ trace_xen_mc_callback(fn, data);
+
cb = &b->callbacks[b->cbidx++];
cb->fn = fn;
cb->data = data;
}
-
-#ifdef CONFIG_XEN_DEBUG_FS
-
-static struct dentry *d_mc_debug;
-
-static int __init xen_mc_debugfs(void)
-{
- struct dentry *d_xen = xen_init_debugfs();
-
- if (d_xen == NULL)
- return -ENOMEM;
-
- d_mc_debug = debugfs_create_dir("multicalls", d_xen);
-
- debugfs_create_u8("zero_stats", 0644, d_mc_debug, &zero_stats);
-
- debugfs_create_u32("batches", 0444, d_mc_debug, &mc_stats.issued);
- debugfs_create_u32("hypercalls", 0444, d_mc_debug, &mc_stats.hypercalls);
- debugfs_create_u32("arg_total", 0444, d_mc_debug, &mc_stats.arg_total);
-
- xen_debugfs_create_u32_array("batch_histo", 0444, d_mc_debug,
- mc_stats.histo, MC_BATCH);
- xen_debugfs_create_u32_array("hypercall_histo", 0444, d_mc_debug,
- mc_stats.histo_hypercalls, NHYPERCALLS);
- xen_debugfs_create_u32_array("flush_reasons", 0444, d_mc_debug,
- mc_stats.flush, FL_N_REASONS);
-
- return 0;
-}
-fs_initcall(xen_mc_debugfs);
-
-#endif /* CONFIG_XEN_DEBUG_FS */
diff --git a/arch/x86/xen/multicalls.h b/arch/x86/xen/multicalls.h
index 4ec8035e3216..dee79b78a90f 100644
--- a/arch/x86/xen/multicalls.h
+++ b/arch/x86/xen/multicalls.h
@@ -1,6 +1,8 @@
#ifndef _XEN_MULTICALLS_H
#define _XEN_MULTICALLS_H
+#include <trace/events/xen.h>
+
#include "xen-ops.h"
/* Multicalls */
@@ -20,8 +22,10 @@ DECLARE_PER_CPU(unsigned long, xen_mc_irq_flags);
static inline void xen_mc_batch(void)
{
unsigned long flags;
+
/* need to disable interrupts until this entry is complete */
local_irq_save(flags);
+ trace_xen_mc_batch(paravirt_get_lazy_mode());
__this_cpu_write(xen_mc_irq_flags, flags);
}
@@ -37,6 +41,8 @@ void xen_mc_flush(void);
/* Issue a multicall if we're not in a lazy mode */
static inline void xen_mc_issue(unsigned mode)
{
+ trace_xen_mc_issue(mode);
+
if ((paravirt_get_lazy_mode() & mode) == 0)
xen_mc_flush();
diff --git a/arch/x86/xen/trace.c b/arch/x86/xen/trace.c
new file mode 100644
index 000000000000..734beba2a08c
--- /dev/null
+++ b/arch/x86/xen/trace.c
@@ -0,0 +1,61 @@
+#include <linux/ftrace.h>
+
+#define N(x) [__HYPERVISOR_##x] = "("#x")"
+static const char *xen_hypercall_names[] = {
+ N(set_trap_table),
+ N(mmu_update),
+ N(set_gdt),
+ N(stack_switch),
+ N(set_callbacks),
+ N(fpu_taskswitch),
+ N(sched_op_compat),
+ N(dom0_op),
+ N(set_debugreg),
+ N(get_debugreg),
+ N(update_descriptor),
+ N(memory_op),
+ N(multicall),
+ N(update_va_mapping),
+ N(set_timer_op),
+ N(event_channel_op_compat),
+ N(xen_version),
+ N(console_io),
+ N(physdev_op_compat),
+ N(grant_table_op),
+ N(vm_assist),
+ N(update_va_mapping_otherdomain),
+ N(iret),
+ N(vcpu_op),
+ N(set_segment_base),
+ N(mmuext_op),
+ N(acm_op),
+ N(nmi_op),
+ N(sched_op),
+ N(callback_op),
+ N(xenoprof_op),
+ N(event_channel_op),
+ N(physdev_op),
+ N(hvm_op),
+
+/* Architecture-specific hypercall definitions. */
+ N(arch_0),
+ N(arch_1),
+ N(arch_2),
+ N(arch_3),
+ N(arch_4),
+ N(arch_5),
+ N(arch_6),
+ N(arch_7),
+};
+#undef N
+
+static const char *xen_hypercall_name(unsigned op)
+{
+ if (op < ARRAY_SIZE(xen_hypercall_names) && xen_hypercall_names[op] != NULL)
+ return xen_hypercall_names[op];
+
+ return "";
+}
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/xen.h>
diff --git a/arch/xtensa/kernel/module.c b/arch/xtensa/kernel/module.c
index c1accea8cb56..451dda928c93 100644
--- a/arch/xtensa/kernel/module.c
+++ b/arch/xtensa/kernel/module.c
@@ -24,26 +24,6 @@
#undef DEBUG_RELOCATE
-void *module_alloc(unsigned long size)
-{
- if (size == 0)
- return NULL;
- return vmalloc_exec(size);
-}
-
-void module_free(struct module *mod, void *module_region)
-{
- vfree(module_region);
-}
-
-int module_frob_arch_sections(Elf32_Ehdr *hdr,
- Elf32_Shdr *sechdrs,
- char *secstrings,
- struct module *mod)
-{
- return 0;
-}
-
static int
decode_calln_opcode (unsigned char *location)
{
@@ -66,18 +46,6 @@ decode_l32r_opcode (unsigned char *location)
#endif
}
-int apply_relocate(Elf32_Shdr *sechdrs,
- const char *strtab,
- unsigned int symindex,
- unsigned int relsec,
- struct module *mod)
-{
- printk(KERN_ERR "module %s: REL RELOCATION unsupported\n",
- mod->name);
- return -ENOEXEC;
-
-}
-
int apply_relocate_add(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
@@ -222,14 +190,3 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
}
return 0;
}
-
-int module_finalize(const Elf_Ehdr *hdr,
- const Elf_Shdr *sechdrs,
- struct module *mod)
-{
- return 0;
-}
-
-void module_arch_cleanup(struct module *mod)
-{
-}