diff options
author | 2013-07-12 12:34:42 +0200 | |
---|---|---|
committer | 2013-07-12 12:34:42 +0200 | |
commit | f2006e27396f55276f24434f56e208d86e7f9908 (patch) | |
tree | 71896db916d33888b4286f80117d3cac0da40e6d /arch/arm | |
parent | Merge branch 'timers/core' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/linux-dynticks into timers/urgent (diff) | |
parent | Merge tag 'dm-3.11-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-dm (diff) | |
download | wireguard-linux-f2006e27396f55276f24434f56e208d86e7f9908.tar.xz wireguard-linux-f2006e27396f55276f24434f56e208d86e7f9908.zip |
Merge branch 'linus' into timers/urgent
Get upstream changes so we can apply fixes against them
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to '')
384 files changed, 8166 insertions, 1317 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index de7049bdea85..0ac9be677ebb 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -14,6 +14,7 @@ config ARM select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW select GENERIC_PCI_IOMAP + select GENERIC_SCHED_CLOCK select GENERIC_SMP_IDLE_THREAD select GENERIC_IDLE_POLL_SETUP select GENERIC_STRNCPY_FROM_USER @@ -40,6 +41,7 @@ config ARM select HAVE_IDE if PCI || ISA || PCMCIA select HAVE_IRQ_TIME_ACCOUNTING select HAVE_KERNEL_GZIP + select HAVE_KERNEL_LZ4 select HAVE_KERNEL_LZMA select HAVE_KERNEL_LZO select HAVE_KERNEL_XZ @@ -175,6 +177,9 @@ config ARCH_HAS_CPUFREQ and that the relevant menu configurations are displayed for it. +config ARCH_HAS_BANDGAP + bool + config GENERIC_HWEIGHT bool default y @@ -1450,7 +1455,7 @@ config SMP depends on CPU_V6K || CPU_V7 depends on GENERIC_CLOCKEVENTS depends on HAVE_SMP - depends on MMU + depends on MMU || ARM_MPU select USE_GENERIC_SMP_HELPERS help This enables support for systems with more than one CPU. If you have @@ -1471,7 +1476,7 @@ config SMP config SMP_ON_UP bool "Allow booting SMP kernel on uniprocessor systems (EXPERIMENTAL)" - depends on SMP && !XIP_KERNEL + depends on SMP && !XIP_KERNEL && MMU default y help SMP kernels contain instructions which fail on non-SMP processors. @@ -1744,6 +1749,14 @@ config HW_PERF_EVENTS Enable hardware performance counter support for perf events. If disabled, perf events will use software events only. +config SYS_SUPPORTS_HUGETLBFS + def_bool y + depends on ARM_LPAE + +config HAVE_ARCH_TRANSPARENT_HUGEPAGE + def_bool y + depends on ARM_LPAE + source "mm/Kconfig" config FORCE_MAX_ZONEORDER diff --git a/arch/arm/Kconfig-nommu b/arch/arm/Kconfig-nommu index c859495da480..aed66d5df7f1 100644 --- a/arch/arm/Kconfig-nommu +++ b/arch/arm/Kconfig-nommu @@ -50,3 +50,15 @@ config REMAP_VECTORS_TO_RAM Otherwise, say 'y' here. In this case, the kernel will require external support to redirect the hardware exception vectors to the writable versions located at DRAM_BASE. + +config ARM_MPU + bool 'Use the ARM v7 PMSA Compliant MPU' + depends on CPU_V7 + default y + help + Some ARM systems without an MMU have instead a Memory Protection + Unit (MPU) that defines the type and permissions for regions of + memory. + + If your CPU has an MPU then you should choose 'y' here unless you + know that you do not want to use the MPU. diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index 77c1411c9a91..5b7be8d975b5 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug @@ -550,6 +550,13 @@ choice of the tiles using the RS1 memory map, including all new A-class core tiles, FPGA-based SMMs and software models. + config DEBUG_VEXPRESS_UART0_CRX + bool "Use PL011 UART0 at 0xb0090000 (Cortex-R compliant tiles)" + depends on ARCH_VEXPRESS && !MMU + help + This option selects UART0 at 0xb0090000. This is appropriate for + Cortex-R series tiles and SMMs, such as Cortex-R5 and Cortex-R7 + config DEBUG_VT8500_UART0 bool "Use UART0 on VIA/Wondermedia SoCs" depends on ARCH_VT8500 @@ -789,7 +796,8 @@ config DEBUG_LL_INCLUDE default "debug/u300.S" if DEBUG_U300_UART default "debug/ux500.S" if DEBUG_UX500_UART default "debug/vexpress.S" if DEBUG_VEXPRESS_UART0_DETECT || \ - DEBUG_VEXPRESS_UART0_CA9 || DEBUG_VEXPRESS_UART0_RS1 + DEBUG_VEXPRESS_UART0_CA9 || DEBUG_VEXPRESS_UART0_RS1 || \ + DEBUG_VEXPRESS_UART0_CRX default "debug/vt8500.S" if DEBUG_VT8500_UART0 default "debug/zynq.S" if DEBUG_ZYNQ_UART0 || DEBUG_ZYNQ_UART1 default "mach/debug-macro.S" diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 72caf82a8280..c0ac0f5e5e5c 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -59,38 +59,44 @@ comma = , # Note that GCC does not numerically define an architecture version # macro, but instead defines a whole series of macros which makes # testing for a specific architecture or later rather impossible. -arch-$(CONFIG_CPU_32v7M) :=-D__LINUX_ARM_ARCH__=7 -march=armv7-m -Wa,-march=armv7-m -arch-$(CONFIG_CPU_32v7) :=-D__LINUX_ARM_ARCH__=7 $(call cc-option,-march=armv7-a,-march=armv5t -Wa$(comma)-march=armv7-a) -arch-$(CONFIG_CPU_32v6) :=-D__LINUX_ARM_ARCH__=6 $(call cc-option,-march=armv6,-march=armv5t -Wa$(comma)-march=armv6) +arch-$(CONFIG_CPU_32v7M) =-D__LINUX_ARM_ARCH__=7 -march=armv7-m -Wa,-march=armv7-m +arch-$(CONFIG_CPU_32v7) =-D__LINUX_ARM_ARCH__=7 $(call cc-option,-march=armv7-a,-march=armv5t -Wa$(comma)-march=armv7-a) +arch-$(CONFIG_CPU_32v6) =-D__LINUX_ARM_ARCH__=6 $(call cc-option,-march=armv6,-march=armv5t -Wa$(comma)-march=armv6) # Only override the compiler option if ARMv6. The ARMv6K extensions are # always available in ARMv7 ifeq ($(CONFIG_CPU_32v6),y) -arch-$(CONFIG_CPU_32v6K) :=-D__LINUX_ARM_ARCH__=6 $(call cc-option,-march=armv6k,-march=armv5t -Wa$(comma)-march=armv6k) +arch-$(CONFIG_CPU_32v6K) =-D__LINUX_ARM_ARCH__=6 $(call cc-option,-march=armv6k,-march=armv5t -Wa$(comma)-march=armv6k) endif -arch-$(CONFIG_CPU_32v5) :=-D__LINUX_ARM_ARCH__=5 $(call cc-option,-march=armv5te,-march=armv4t) -arch-$(CONFIG_CPU_32v4T) :=-D__LINUX_ARM_ARCH__=4 -march=armv4t -arch-$(CONFIG_CPU_32v4) :=-D__LINUX_ARM_ARCH__=4 -march=armv4 -arch-$(CONFIG_CPU_32v3) :=-D__LINUX_ARM_ARCH__=3 -march=armv3 +arch-$(CONFIG_CPU_32v5) =-D__LINUX_ARM_ARCH__=5 $(call cc-option,-march=armv5te,-march=armv4t) +arch-$(CONFIG_CPU_32v4T) =-D__LINUX_ARM_ARCH__=4 -march=armv4t +arch-$(CONFIG_CPU_32v4) =-D__LINUX_ARM_ARCH__=4 -march=armv4 +arch-$(CONFIG_CPU_32v3) =-D__LINUX_ARM_ARCH__=3 -march=armv3 + +# Evaluate arch cc-option calls now +arch-y := $(arch-y) # This selects how we optimise for the processor. -tune-$(CONFIG_CPU_ARM7TDMI) :=-mtune=arm7tdmi -tune-$(CONFIG_CPU_ARM720T) :=-mtune=arm7tdmi -tune-$(CONFIG_CPU_ARM740T) :=-mtune=arm7tdmi -tune-$(CONFIG_CPU_ARM9TDMI) :=-mtune=arm9tdmi -tune-$(CONFIG_CPU_ARM940T) :=-mtune=arm9tdmi -tune-$(CONFIG_CPU_ARM946E) :=$(call cc-option,-mtune=arm9e,-mtune=arm9tdmi) -tune-$(CONFIG_CPU_ARM920T) :=-mtune=arm9tdmi -tune-$(CONFIG_CPU_ARM922T) :=-mtune=arm9tdmi -tune-$(CONFIG_CPU_ARM925T) :=-mtune=arm9tdmi -tune-$(CONFIG_CPU_ARM926T) :=-mtune=arm9tdmi -tune-$(CONFIG_CPU_FA526) :=-mtune=arm9tdmi -tune-$(CONFIG_CPU_SA110) :=-mtune=strongarm110 -tune-$(CONFIG_CPU_SA1100) :=-mtune=strongarm1100 -tune-$(CONFIG_CPU_XSCALE) :=$(call cc-option,-mtune=xscale,-mtune=strongarm110) -Wa,-mcpu=xscale -tune-$(CONFIG_CPU_XSC3) :=$(call cc-option,-mtune=xscale,-mtune=strongarm110) -Wa,-mcpu=xscale -tune-$(CONFIG_CPU_FEROCEON) :=$(call cc-option,-mtune=marvell-f,-mtune=xscale) -tune-$(CONFIG_CPU_V6) :=$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm) -tune-$(CONFIG_CPU_V6K) :=$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm) +tune-$(CONFIG_CPU_ARM7TDMI) =-mtune=arm7tdmi +tune-$(CONFIG_CPU_ARM720T) =-mtune=arm7tdmi +tune-$(CONFIG_CPU_ARM740T) =-mtune=arm7tdmi +tune-$(CONFIG_CPU_ARM9TDMI) =-mtune=arm9tdmi +tune-$(CONFIG_CPU_ARM940T) =-mtune=arm9tdmi +tune-$(CONFIG_CPU_ARM946E) =$(call cc-option,-mtune=arm9e,-mtune=arm9tdmi) +tune-$(CONFIG_CPU_ARM920T) =-mtune=arm9tdmi +tune-$(CONFIG_CPU_ARM922T) =-mtune=arm9tdmi +tune-$(CONFIG_CPU_ARM925T) =-mtune=arm9tdmi +tune-$(CONFIG_CPU_ARM926T) =-mtune=arm9tdmi +tune-$(CONFIG_CPU_FA526) =-mtune=arm9tdmi +tune-$(CONFIG_CPU_SA110) =-mtune=strongarm110 +tune-$(CONFIG_CPU_SA1100) =-mtune=strongarm1100 +tune-$(CONFIG_CPU_XSCALE) =$(call cc-option,-mtune=xscale,-mtune=strongarm110) -Wa,-mcpu=xscale +tune-$(CONFIG_CPU_XSC3) =$(call cc-option,-mtune=xscale,-mtune=strongarm110) -Wa,-mcpu=xscale +tune-$(CONFIG_CPU_FEROCEON) =$(call cc-option,-mtune=marvell-f,-mtune=xscale) +tune-$(CONFIG_CPU_V6) =$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm) +tune-$(CONFIG_CPU_V6K) =$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm) + +# Evaluate tune cc-option calls now +tune-y := $(tune-y) ifeq ($(CONFIG_AEABI),y) CFLAGS_ABI :=-mabi=aapcs-linux -mno-thumb-interwork @@ -295,9 +301,10 @@ zImage Image xipImage bootpImage uImage: vmlinux zinstall uinstall install: vmlinux $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@ -%.dtb: scripts +%.dtb: | scripts $(Q)$(MAKE) $(build)=$(boot)/dts MACHINE=$(MACHINE) $(boot)/dts/$@ +PHONY += dtbs dtbs: scripts $(Q)$(MAKE) $(build)=$(boot)/dts MACHINE=$(MACHINE) dtbs diff --git a/arch/arm/boot/compressed/.gitignore b/arch/arm/boot/compressed/.gitignore index f79a08efe000..47279aa96a6a 100644 --- a/arch/arm/boot/compressed/.gitignore +++ b/arch/arm/boot/compressed/.gitignore @@ -6,6 +6,7 @@ piggy.gzip piggy.lzo piggy.lzma piggy.xzkern +piggy.lz4 vmlinux vmlinux.lds diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index 120b83bfde20..7ac1610252ba 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile @@ -27,7 +27,7 @@ OBJS += misc.o decompress.o ifeq ($(CONFIG_DEBUG_UNCOMPRESS),y) OBJS += debug.o endif -FONTC = $(srctree)/drivers/video/console/font_acorn_8x8.c +FONTC = $(srctree)/lib/fonts/font_acorn_8x8.c # string library code (-Os is enforced to keep it much smaller) OBJS += string.o @@ -91,6 +91,7 @@ suffix_$(CONFIG_KERNEL_GZIP) = gzip suffix_$(CONFIG_KERNEL_LZO) = lzo suffix_$(CONFIG_KERNEL_LZMA) = lzma suffix_$(CONFIG_KERNEL_XZ) = xzkern +suffix_$(CONFIG_KERNEL_LZ4) = lz4 # Borrowed libfdt files for the ATAG compatibility mode @@ -115,7 +116,7 @@ targets := vmlinux vmlinux.lds \ font.o font.c head.o misc.o $(OBJS) # Make sure files are removed during clean -extra-y += piggy.gzip piggy.lzo piggy.lzma piggy.xzkern \ +extra-y += piggy.gzip piggy.lzo piggy.lzma piggy.xzkern piggy.lz4 \ lib1funcs.S ashldi3.S $(libfdt) $(libfdt_hdrs) \ hyp-stub.S diff --git a/arch/arm/boot/compressed/atags_to_fdt.c b/arch/arm/boot/compressed/atags_to_fdt.c index aabc02a68482..d1153c8a765a 100644 --- a/arch/arm/boot/compressed/atags_to_fdt.c +++ b/arch/arm/boot/compressed/atags_to_fdt.c @@ -53,6 +53,17 @@ static const void *getprop(const void *fdt, const char *node_path, return fdt_getprop(fdt, offset, property, len); } +static uint32_t get_cell_size(const void *fdt) +{ + int len; + uint32_t cell_size = 1; + const uint32_t *size_len = getprop(fdt, "/", "#size-cells", &len); + + if (size_len) + cell_size = fdt32_to_cpu(*size_len); + return cell_size; +} + static void merge_fdt_bootargs(void *fdt, const char *fdt_cmdline) { char cmdline[COMMAND_LINE_SIZE]; @@ -95,9 +106,11 @@ static void merge_fdt_bootargs(void *fdt, const char *fdt_cmdline) int atags_to_fdt(void *atag_list, void *fdt, int total_space) { struct tag *atag = atag_list; - uint32_t mem_reg_property[2 * NR_BANKS]; + /* In the case of 64 bits memory size, need to reserve 2 cells for + * address and size for each bank */ + uint32_t mem_reg_property[2 * 2 * NR_BANKS]; int memcount = 0; - int ret; + int ret, memsize; /* make sure we've got an aligned pointer */ if ((u32)atag_list & 0x3) @@ -137,8 +150,25 @@ int atags_to_fdt(void *atag_list, void *fdt, int total_space) continue; if (!atag->u.mem.size) continue; - mem_reg_property[memcount++] = cpu_to_fdt32(atag->u.mem.start); - mem_reg_property[memcount++] = cpu_to_fdt32(atag->u.mem.size); + memsize = get_cell_size(fdt); + + if (memsize == 2) { + /* if memsize is 2, that means that + * each data needs 2 cells of 32 bits, + * so the data are 64 bits */ + uint64_t *mem_reg_prop64 = + (uint64_t *)mem_reg_property; + mem_reg_prop64[memcount++] = + cpu_to_fdt64(atag->u.mem.start); + mem_reg_prop64[memcount++] = + cpu_to_fdt64(atag->u.mem.size); + } else { + mem_reg_property[memcount++] = + cpu_to_fdt32(atag->u.mem.start); + mem_reg_property[memcount++] = + cpu_to_fdt32(atag->u.mem.size); + } + } else if (atag->hdr.tag == ATAG_INITRD2) { uint32_t initrd_start, initrd_size; initrd_start = atag->u.initrd.start; @@ -150,8 +180,10 @@ int atags_to_fdt(void *atag_list, void *fdt, int total_space) } } - if (memcount) - setprop(fdt, "/memory", "reg", mem_reg_property, 4*memcount); + if (memcount) { + setprop(fdt, "/memory", "reg", mem_reg_property, + 4 * memcount * memsize); + } return fdt_pack(fdt); } diff --git a/arch/arm/boot/compressed/decompress.c b/arch/arm/boot/compressed/decompress.c index 24b0475cb8bf..bd245d34952d 100644 --- a/arch/arm/boot/compressed/decompress.c +++ b/arch/arm/boot/compressed/decompress.c @@ -51,6 +51,10 @@ extern char * strstr(const char * s1, const char *s2); #include "../../../../lib/decompress_unxz.c" #endif +#ifdef CONFIG_KERNEL_LZ4 +#include "../../../../lib/decompress_unlz4.c" +#endif + int do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x)) { return decompress(input, len, NULL, NULL, output, NULL, error); diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 032a8d987148..75189f13cf54 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -142,7 +142,6 @@ start: mov r7, r1 @ save architecture ID mov r8, r2 @ save atags pointer -#ifndef __ARM_ARCH_2__ /* * Booting from Angel - need to enter SVC mode and disable * FIQs/IRQs (numeric definitions from angel arm.h source). @@ -158,10 +157,6 @@ not_angel: safe_svcmode_maskall r0 msr spsr_cxsf, r9 @ Save the CPU boot mode in @ SPSR -#else - teqp pc, #0x0c000003 @ turn off interrupts -#endif - /* * Note that some cache flushing and other stuff may * be needed here - is there an Angel SWI call for this? @@ -183,7 +178,19 @@ not_angel: ldr r4, =zreladdr #endif - bl cache_on + /* + * Set up a page table only if it won't overwrite ourself. + * That means r4 < pc && r4 - 16k page directory > &_end. + * Given that r4 > &_end is most unfrequent, we add a rough + * additional 1MB of room for a possible appended DTB. + */ + mov r0, pc + cmp r0, r4 + ldrcc r0, LC0+32 + addcc r0, r0, pc + cmpcc r4, r0 + orrcc r4, r4, #1 @ remember we skipped cache_on + blcs cache_on restart: adr r0, LC0 ldmia r0, {r1, r2, r3, r6, r10, r11, r12} @@ -229,7 +236,7 @@ restart: adr r0, LC0 * r0 = delta * r2 = BSS start * r3 = BSS end - * r4 = final kernel address + * r4 = final kernel address (possibly with LSB set) * r5 = appended dtb size (still unknown) * r6 = _edata * r7 = architecture ID @@ -277,6 +284,7 @@ restart: adr r0, LC0 */ cmp r0, #1 sub r0, r4, #TEXT_OFFSET + bic r0, r0, #1 add r0, r0, #0x100 mov r1, r6 sub r2, sp, r6 @@ -323,12 +331,13 @@ dtb_check_done: /* * Check to see if we will overwrite ourselves. - * r4 = final kernel address + * r4 = final kernel address (possibly with LSB set) * r9 = size of decompressed image * r10 = end of this image, including bss/stack/malloc space if non XIP * We basically want: * r4 - 16k page directory >= r10 -> OK * r4 + image length <= address of wont_overwrite -> OK + * Note: the possible LSB in r4 is harmless here. */ add r10, r10, #16384 cmp r4, r10 @@ -390,7 +399,8 @@ dtb_check_done: add sp, sp, r6 #endif - bl cache_clean_flush + tst r4, #1 + bleq cache_clean_flush adr r0, BSYM(restart) add r0, r0, r6 @@ -402,7 +412,7 @@ wont_overwrite: * r0 = delta * r2 = BSS start * r3 = BSS end - * r4 = kernel execution address + * r4 = kernel execution address (possibly with LSB set) * r5 = appended dtb size (0 if not present) * r7 = architecture ID * r8 = atags pointer @@ -465,6 +475,15 @@ not_relocated: mov r0, #0 cmp r2, r3 blo 1b + /* + * Did we skip the cache setup earlier? + * That is indicated by the LSB in r4. + * Do it now if so. + */ + tst r4, #1 + bic r4, r4, #1 + blne cache_on + /* * The C runtime environment should now be setup sufficiently. * Set up some pointers, and start decompressing. @@ -513,6 +532,7 @@ LC0: .word LC0 @ r1 .word _got_start @ r11 .word _got_end @ ip .word .L_user_stack_end @ sp + .word _end - restart + 16384 + 1024*1024 .size LC0, . - LC0 #ifdef CONFIG_ARCH_RPC diff --git a/arch/arm/boot/compressed/piggy.lz4.S b/arch/arm/boot/compressed/piggy.lz4.S new file mode 100644 index 000000000000..3d9a575618a3 --- /dev/null +++ b/arch/arm/boot/compressed/piggy.lz4.S @@ -0,0 +1,6 @@ + .section .piggydata,#alloc + .globl input_data +input_data: + .incbin "arch/arm/boot/compressed/piggy.lz4" + .globl input_data_end +input_data_end: diff --git a/arch/arm/boot/dts/am335x-bone.dts b/arch/arm/boot/dts/am335x-bone.dts index 04feaf8f1420..444b4ede0d60 100644 --- a/arch/arm/boot/dts/am335x-bone.dts +++ b/arch/arm/boot/dts/am335x-bone.dts @@ -214,10 +214,12 @@ &cpsw_emac0 { phy_id = <&davinci_mdio>, <0>; + phy-mode = "mii"; }; &cpsw_emac1 { phy_id = <&davinci_mdio>, <1>; + phy-mode = "mii"; }; &mac { diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts index a16bb9691cc6..3aee1a43782d 100644 --- a/arch/arm/boot/dts/am335x-evm.dts +++ b/arch/arm/boot/dts/am335x-evm.dts @@ -467,8 +467,24 @@ &cpsw_emac0 { phy_id = <&davinci_mdio>, <0>; + phy-mode = "rgmii-txid"; }; &cpsw_emac1 { phy_id = <&davinci_mdio>, <1>; + phy-mode = "rgmii-txid"; +}; + +&tscadc { + status = "okay"; + tsc { + ti,wires = <4>; + ti,x-plate-resistance = <200>; + ti,coordiante-readouts = <5>; + ti,wire-config = <0x00 0x11 0x22 0x33>; + }; + + adc { + ti,adc-channels = <4 5 6 7>; + }; }; diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts index 9e00eef9b74b..0c8ad173d2b0 100644 --- a/arch/arm/boot/dts/am335x-evmsk.dts +++ b/arch/arm/boot/dts/am335x-evmsk.dts @@ -392,3 +392,13 @@ pinctrl-0 = <&davinci_mdio_default>; pinctrl-1 = <&davinci_mdio_sleep>; }; + +&cpsw_emac0 { + phy_id = <&davinci_mdio>, <0>; + phy-mode = "rgmii-txid"; +}; + +&cpsw_emac1 { + phy_id = <&davinci_mdio>, <1>; + phy-mode = "rgmii-txid"; +}; diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi index 0d4df90477f7..38b446ba1ce1 100644 --- a/arch/arm/boot/dts/am33xx.dtsi +++ b/arch/arm/boot/dts/am33xx.dtsi @@ -502,6 +502,23 @@ status = "disabled"; }; + tscadc: tscadc@44e0d000 { + compatible = "ti,am3359-tscadc"; + reg = <0x44e0d000 0x1000>; + interrupt-parent = <&intc>; + interrupts = <16>; + ti,hwmods = "adc_tsc"; + status = "disabled"; + + tsc { + compatible = "ti,am3359-tsc"; + }; + am335x_adc: adc { + #io-channel-cells = <1>; + compatible = "ti,am3359-adc"; + }; + }; + gpmc: gpmc@50000000 { compatible = "ti,am3352-gpmc"; ti,hwmods = "gpmc"; diff --git a/arch/arm/boot/dts/atlas6.dtsi b/arch/arm/boot/dts/atlas6.dtsi index 7d1a27949c13..9866cd736dee 100644 --- a/arch/arm/boot/dts/atlas6.dtsi +++ b/arch/arm/boot/dts/atlas6.dtsi @@ -613,7 +613,7 @@ }; rtc-iobg { - compatible = "sirf,prima2-rtciobg", "sirf-prima2-rtciobg-bus"; + compatible = "sirf,prima2-rtciobg", "sirf-prima2-rtciobg-bus", "simple-bus"; #address-cells = <1>; #size-cells = <1>; reg = <0x80030000 0x10000>; diff --git a/arch/arm/boot/dts/bcm11351.dtsi b/arch/arm/boot/dts/bcm11351.dtsi index 17979d5f23b4..c0cdf66f8964 100644 --- a/arch/arm/boot/dts/bcm11351.dtsi +++ b/arch/arm/boot/dts/bcm11351.dtsi @@ -50,10 +50,10 @@ }; L2: l2-cache { - compatible = "arm,pl310-cache"; - reg = <0x3ff20000 0x1000>; - cache-unified; - cache-level = <2>; + compatible = "bcm,bcm11351-a2-pl310-cache"; + reg = <0x3ff20000 0x1000>; + cache-unified; + cache-level = <2>; }; timer@35006000 { diff --git a/arch/arm/boot/dts/cros5250-common.dtsi b/arch/arm/boot/dts/cros5250-common.dtsi index 3f0239ec1bc5..dc259e8b8a73 100644 --- a/arch/arm/boot/dts/cros5250-common.dtsi +++ b/arch/arm/boot/dts/cros5250-common.dtsi @@ -190,7 +190,7 @@ samsung,i2c-max-bus-freq = <66000>; hdmiddc@50 { - compatible = "samsung,exynos5-hdmiddc"; + compatible = "samsung,exynos4210-hdmiddc"; reg = <0x50>; }; }; @@ -224,7 +224,7 @@ samsung,i2c-max-bus-freq = <378000>; hdmiphy@38 { - compatible = "samsung,exynos5-hdmiphy"; + compatible = "samsung,exynos4212-hdmiphy"; reg = <0x38>; }; }; diff --git a/arch/arm/boot/dts/ecx-common.dtsi b/arch/arm/boot/dts/ecx-common.dtsi index d61b535f682a..e8559b753c9d 100644 --- a/arch/arm/boot/dts/ecx-common.dtsi +++ b/arch/arm/boot/dts/ecx-common.dtsi @@ -33,6 +33,8 @@ calxeda,port-phys = <&combophy5 0 &combophy0 0 &combophy0 1 &combophy0 2 &combophy0 3>; + calxeda,sgpio-gpio =<&gpioh 5 1 &gpioh 6 1 &gpioh 7 1>; + calxeda,led-order = <4 0 1 2 3>; }; sdhci@ffe0e000 { diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts index 35a66dee4011..49f18c24a576 100644 --- a/arch/arm/boot/dts/exynos5250-smdk5250.dts +++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts @@ -105,7 +105,7 @@ samsung,i2c-max-bus-freq = <66000>; hdmiddc@50 { - compatible = "samsung,exynos5-hdmiddc"; + compatible = "samsung,exynos4210-hdmiddc"; reg = <0x50>; }; }; @@ -135,7 +135,7 @@ samsung,i2c-max-bus-freq = <66000>; hdmiphy@38 { - compatible = "samsung,exynos5-hdmiphy"; + compatible = "samsung,exynos4212-hdmiphy"; reg = <0x38>; }; }; diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi index 41cd625b6020..ef57277fc38f 100644 --- a/arch/arm/boot/dts/exynos5250.dtsi +++ b/arch/arm/boot/dts/exynos5250.dtsi @@ -599,7 +599,7 @@ }; hdmi { - compatible = "samsung,exynos5-hdmi"; + compatible = "samsung,exynos4212-hdmi"; reg = <0x14530000 0x70000>; interrupts = <0 95 0>; clocks = <&clock 333>, <&clock 136>, <&clock 137>, @@ -609,7 +609,7 @@ }; mixer { - compatible = "samsung,exynos5-mixer"; + compatible = "samsung,exynos5250-mixer"; reg = <0x14450000 0x10000>; interrupts = <0 94 0>; }; diff --git a/arch/arm/boot/dts/imx28-evk.dts b/arch/arm/boot/dts/imx28-evk.dts index 3637bf3b1d59..1f0d38d7b16f 100644 --- a/arch/arm/boot/dts/imx28-evk.dts +++ b/arch/arm/boot/dts/imx28-evk.dts @@ -155,12 +155,14 @@ can0: can@80032000 { pinctrl-names = "default"; pinctrl-0 = <&can0_pins_a>; + xceiver-supply = <®_can_3v3>; status = "okay"; }; can1: can@80034000 { pinctrl-names = "default"; pinctrl-0 = <&can1_pins_a>; + xceiver-supply = <®_can_3v3>; status = "okay"; }; }; @@ -319,6 +321,16 @@ gpio = <&gpio3 30 0>; enable-active-high; }; + + reg_can_3v3: can-3v3 { + compatible = "regulator-fixed"; + regulator-name = "can-3v3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpio = <&gpio2 13 0>; + enable-active-high; + }; + }; sound { diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi index 195451bf7706..6a8acb01b1d3 100644 --- a/arch/arm/boot/dts/imx28.dtsi +++ b/arch/arm/boot/dts/imx28.dtsi @@ -736,7 +736,7 @@ dcp@80028000 { reg = <0x80028000 0x2000>; interrupts = <52 53 54>; - status = "disabled"; + compatible = "fsl-dcp"; }; pxp@8002a000 { diff --git a/arch/arm/boot/dts/omap3-devkit8000.dts b/arch/arm/boot/dts/omap3-devkit8000.dts index af32eff9f4b7..7ef282795dd4 100644 --- a/arch/arm/boot/dts/omap3-devkit8000.dts +++ b/arch/arm/boot/dts/omap3-devkit8000.dts @@ -124,20 +124,21 @@ reg = <0 0 0>; /* CS0, offset 0 */ nand-bus-width = <16>; - gpmc,sync-clk = <0>; - gpmc,cs-on = <0>; - gpmc,cs-rd-off = <44>; - gpmc,cs-wr-off = <44>; - gpmc,adv-on = <6>; - gpmc,adv-rd-off = <34>; - gpmc,adv-wr-off = <44>; - gpmc,we-off = <40>; - gpmc,oe-off = <54>; - gpmc,access = <64>; - gpmc,rd-cycle = <82>; - gpmc,wr-cycle = <82>; - gpmc,wr-access = <40>; - gpmc,wr-data-mux-bus = <0>; + gpmc,device-nand; + gpmc,sync-clki-ps = <0>; + gpmc,cs-on-ns = <0>; + gpmc,cs-rd-off-ns = <44>; + gpmc,cs-wr-off-ns = <44>; + gpmc,adv-on-ns = <6>; + gpmc,adv-rd-off-ns = <34>; + gpmc,adv-wr-off-ns = <44>; + gpmc,we-off-ns = <40>; + gpmc,oe-off-ns = <54>; + gpmc,access-ns = <64>; + gpmc,rd-cycle-ns = <82>; + gpmc,wr-cycle-ns = <82>; + gpmc,wr-access-ns = <40>; + gpmc,wr-data-mux-bus-ns = <0>; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm/boot/dts/prima2.dtsi b/arch/arm/boot/dts/prima2.dtsi index 02edd8965f8a..05e9489cf95c 100644 --- a/arch/arm/boot/dts/prima2.dtsi +++ b/arch/arm/boot/dts/prima2.dtsi @@ -610,7 +610,7 @@ }; rtc-iobg { - compatible = "sirf,prima2-rtciobg", "sirf-prima2-rtciobg-bus"; + compatible = "sirf,prima2-rtciobg", "sirf-prima2-rtciobg-bus", "simple-bus"; #address-cells = <1>; #size-cells = <1>; reg = <0x80030000 0x10000>; diff --git a/arch/arm/boot/dts/sun4i-a10-cubieboard.dts b/arch/arm/boot/dts/sun4i-a10-cubieboard.dts index 0e22a285dfe0..757c4cd900ee 100644 --- a/arch/arm/boot/dts/sun4i-a10-cubieboard.dts +++ b/arch/arm/boot/dts/sun4i-a10-cubieboard.dts @@ -27,6 +27,21 @@ }; soc@01c20000 { + emac: ethernet@01c0b000 { + pinctrl-names = "default"; + pinctrl-0 = <&emac_pins_a>; + phy = <&phy1>; + status = "okay"; + }; + + mdio@01c0b080 { + status = "okay"; + + phy1: ethernet-phy@1 { + reg = <1>; + }; + }; + pinctrl@01c20800 { led_pins_cubieboard: led_pins@0 { allwinner,pins = "PH20", "PH21"; diff --git a/arch/arm/boot/dts/sun4i-a10-hackberry.dts b/arch/arm/boot/dts/sun4i-a10-hackberry.dts index b9efac100c85..3514b37d66bc 100644 --- a/arch/arm/boot/dts/sun4i-a10-hackberry.dts +++ b/arch/arm/boot/dts/sun4i-a10-hackberry.dts @@ -23,10 +23,51 @@ }; soc@01c20000 { + emac: ethernet@01c0b000 { + pinctrl-names = "default"; + pinctrl-0 = <&emac_pins_a>; + phy = <&phy0>; + status = "okay"; + }; + + mdio@01c0b080 { + phy-supply = <®_emac_3v3>; + status = "okay"; + + phy0: ethernet-phy@0 { + reg = <0>; + }; + }; + + pio: pinctrl@01c20800 { + pinctrl-names = "default"; + pinctrl-0 = <&hackberry_hogs>; + + hackberry_hogs: hogs@0 { + allwinner,pins = "PH19"; + allwinner,function = "gpio_out"; + allwinner,drive = <0>; + allwinner,pull = <0>; + }; + }; + uart0: serial@01c28000 { pinctrl-names = "default"; pinctrl-0 = <&uart0_pins_a>; status = "okay"; }; }; + + regulators { + compatible = "simple-bus"; + + reg_emac_3v3: emac-3v3 { + compatible = "regulator-fixed"; + regulator-name = "emac-3v3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + enable-active-high; + gpio = <&pio 7 19 0>; + }; + }; }; diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi index 82e03d22f913..b2bd6e124250 100644 --- a/arch/arm/boot/dts/sun4i-a10.dtsi +++ b/arch/arm/boot/dts/sun4i-a10.dtsi @@ -167,6 +167,22 @@ reg = <0x01c20000 0x300000>; ranges; + emac: ethernet@01c0b000 { + compatible = "allwinner,sun4i-emac"; + reg = <0x01c0b000 0x1000>; + interrupts = <55>; + clocks = <&ahb_gates 17>; + status = "disabled"; + }; + + mdio@01c0b080 { + compatible = "allwinner,sun4i-mdio"; + reg = <0x01c0b080 0x14>; + status = "disabled"; + #address-cells = <1>; + #size-cells = <0>; + }; + intc: interrupt-controller@01c20400 { compatible = "allwinner,sun4i-ic"; reg = <0x01c20400 0x400>; @@ -226,6 +242,17 @@ allwinner,drive = <0>; allwinner,pull = <0>; }; + + emac_pins_a: emac0@0 { + allwinner,pins = "PA0", "PA1", "PA2", + "PA3", "PA4", "PA5", "PA6", + "PA7", "PA8", "PA9", "PA10", + "PA11", "PA12", "PA13", "PA14", + "PA15", "PA16"; + allwinner,function = "emac"; + allwinner,drive = <0>; + allwinner,pull = <0>; + }; }; timer@01c20c00 { diff --git a/arch/arm/common/mcpm_head.S b/arch/arm/common/mcpm_head.S index 8178705c4b24..80f033614a1f 100644 --- a/arch/arm/common/mcpm_head.S +++ b/arch/arm/common/mcpm_head.S @@ -32,11 +32,11 @@ 1901: adr r0, 1902b bl printascii mov r0, r9 - bl printhex8 + bl printhex2 adr r0, 1903b bl printascii mov r0, r10 - bl printhex8 + bl printhex2 adr r0, 1904b bl printascii #endif diff --git a/arch/arm/common/mcpm_platsmp.c b/arch/arm/common/mcpm_platsmp.c index 3caed0db6986..510e5b13aa2e 100644 --- a/arch/arm/common/mcpm_platsmp.c +++ b/arch/arm/common/mcpm_platsmp.c @@ -19,10 +19,6 @@ #include <asm/smp.h> #include <asm/smp_plat.h> -static void __init simple_smp_init_cpus(void) -{ -} - static int __cpuinit mcpm_boot_secondary(unsigned int cpu, struct task_struct *idle) { unsigned int mpidr, pcpu, pcluster, ret; @@ -74,7 +70,6 @@ static void mcpm_cpu_die(unsigned int cpu) #endif static struct smp_operations __initdata mcpm_smp_ops = { - .smp_init_cpus = simple_smp_init_cpus, .smp_boot_secondary = mcpm_boot_secondary, .smp_secondary_init = mcpm_secondary_init, #ifdef CONFIG_HOTPLUG_CPU diff --git a/arch/arm/common/timer-sp.c b/arch/arm/common/timer-sp.c index ddc740769601..023ee63827a2 100644 --- a/arch/arm/common/timer-sp.c +++ b/arch/arm/common/timer-sp.c @@ -28,8 +28,8 @@ #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> +#include <linux/sched_clock.h> -#include <asm/sched_clock.h> #include <asm/hardware/arm_timer.h> #include <asm/hardware/timer-sp.h> diff --git a/arch/arm/configs/bcm_defconfig b/arch/arm/configs/bcm_defconfig index e3bf2d65618e..65edf6d47215 100644 --- a/arch/arm/configs/bcm_defconfig +++ b/arch/arm/configs/bcm_defconfig @@ -78,6 +78,13 @@ CONFIG_BACKLIGHT_LCD_SUPPORT=y CONFIG_LCD_CLASS_DEVICE=y CONFIG_BACKLIGHT_CLASS_DEVICE=y # CONFIG_USB_SUPPORT is not set +CONFIG_MMC=y +CONFIG_MMC_UNSAFE_RESUME=y +CONFIG_MMC_BLOCK_MINORS=32 +CONFIG_MMC_TEST=y +CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_PLTFM=y +CONFIG_MMC_SDHCI_BCM_KONA=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y CONFIG_LEDS_TRIGGERS=y diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index 2ac0ffb12f03..a24c02443920 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig @@ -286,3 +286,4 @@ CONFIG_SOC_OMAP5=y CONFIG_TI_DAVINCI_MDIO=y CONFIG_TI_DAVINCI_CPDMA=y CONFIG_TI_CPSW=y +CONFIG_AT803X_PHY=y diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h index 7c1bfc0aea0c..accefe099182 100644 --- a/arch/arm/include/asm/arch_timer.h +++ b/arch/arm/include/asm/arch_timer.h @@ -80,15 +80,6 @@ static inline u32 arch_timer_get_cntfrq(void) return val; } -static inline u64 arch_counter_get_cntpct(void) -{ - u64 cval; - - isb(); - asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval)); - return cval; -} - static inline u64 arch_counter_get_cntvct(void) { u64 cval; diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h index cedd3721318b..6493802f880a 100644 --- a/arch/arm/include/asm/cp15.h +++ b/arch/arm/include/asm/cp15.h @@ -23,6 +23,11 @@ #define CR_RR (1 << 14) /* Round Robin cache replacement */ #define CR_L4 (1 << 15) /* LDR pc can set T bit */ #define CR_DT (1 << 16) +#ifdef CONFIG_MMU +#define CR_HA (1 << 17) /* Hardware management of Access Flag */ +#else +#define CR_BR (1 << 17) /* MPU Background region enable (PMSA) */ +#endif #define CR_IT (1 << 18) #define CR_ST (1 << 19) #define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */ diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index d7deb62554c9..8c25dc4e9851 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h @@ -8,6 +8,7 @@ #define CPUID_CACHETYPE 1 #define CPUID_TCM 2 #define CPUID_TLBTYPE 3 +#define CPUID_MPUIR 4 #define CPUID_MPIDR 5 #ifdef CONFIG_CPU_V7M diff --git a/arch/arm/include/asm/div64.h b/arch/arm/include/asm/div64.h index fe92ccf1d0b0..191ada6e4d2d 100644 --- a/arch/arm/include/asm/div64.h +++ b/arch/arm/include/asm/div64.h @@ -46,7 +46,7 @@ __rem; \ }) -#if __GNUC__ < 4 +#if __GNUC__ < 4 || !defined(CONFIG_AEABI) /* * gcc versions earlier than 4.0 are simply too problematic for the diff --git a/arch/arm/include/asm/glue-proc.h b/arch/arm/include/asm/glue-proc.h index e6168c0c18e9..74a8b84f3cb1 100644 --- a/arch/arm/include/asm/glue-proc.h +++ b/arch/arm/include/asm/glue-proc.h @@ -230,21 +230,21 @@ # endif #endif -#ifdef CONFIG_CPU_PJ4B +#ifdef CONFIG_CPU_V7M # ifdef CPU_NAME # undef MULTI_CPU # define MULTI_CPU # else -# define CPU_NAME cpu_pj4b +# define CPU_NAME cpu_v7m # endif #endif -#ifdef CONFIG_CPU_V7M +#ifdef CONFIG_CPU_PJ4B # ifdef CPU_NAME # undef MULTI_CPU # define MULTI_CPU # else -# define CPU_NAME cpu_v7m +# define CPU_NAME cpu_pj4b # endif #endif diff --git a/arch/arm/include/asm/hardware/iop3xx.h b/arch/arm/include/asm/hardware/iop3xx.h index ed94b1a366ae..423744bf18eb 100644 --- a/arch/arm/include/asm/hardware/iop3xx.h +++ b/arch/arm/include/asm/hardware/iop3xx.h @@ -223,11 +223,12 @@ extern int iop3xx_get_init_atu(void); #ifndef __ASSEMBLY__ #include <linux/types.h> +#include <linux/reboot.h> void iop3xx_map_io(void); void iop_init_cp6_handler(void); void iop_init_time(unsigned long tickrate); -void iop3xx_restart(char, const char *); +void iop3xx_restart(enum reboot_mode, const char *); static inline u32 read_tmr0(void) { diff --git a/arch/arm/include/asm/hugetlb-3level.h b/arch/arm/include/asm/hugetlb-3level.h new file mode 100644 index 000000000000..d4014fbe5ea3 --- /dev/null +++ b/arch/arm/include/asm/hugetlb-3level.h @@ -0,0 +1,71 @@ +/* + * arch/arm/include/asm/hugetlb-3level.h + * + * Copyright (C) 2012 ARM Ltd. + * + * Based on arch/x86/include/asm/hugetlb.h. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _ASM_ARM_HUGETLB_3LEVEL_H +#define _ASM_ARM_HUGETLB_3LEVEL_H + + +/* + * If our huge pte is non-zero then mark the valid bit. + * This allows pte_present(huge_ptep_get(ptep)) to return true for non-zero + * ptes. + * (The valid bit is automatically cleared by set_pte_at for PROT_NONE ptes). + */ +static inline pte_t huge_ptep_get(pte_t *ptep) +{ + pte_t retval = *ptep; + if (pte_val(retval)) + pte_val(retval) |= L_PTE_VALID; + return retval; +} + +static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte) +{ + set_pte_at(mm, addr, ptep, pte); +} + +static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ + ptep_clear_flush(vma, addr, ptep); +} + +static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + ptep_set_wrprotect(mm, addr, ptep); +} + +static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + return ptep_get_and_clear(mm, addr, ptep); +} + +static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t pte, int dirty) +{ + return ptep_set_access_flags(vma, addr, ptep, pte, dirty); +} + +#endif /* _ASM_ARM_HUGETLB_3LEVEL_H */ diff --git a/arch/arm/include/asm/hugetlb.h b/arch/arm/include/asm/hugetlb.h new file mode 100644 index 000000000000..1f1b1cd112f3 --- /dev/null +++ b/arch/arm/include/asm/hugetlb.h @@ -0,0 +1,84 @@ +/* + * arch/arm/include/asm/hugetlb.h + * + * Copyright (C) 2012 ARM Ltd. + * + * Based on arch/x86/include/asm/hugetlb.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _ASM_ARM_HUGETLB_H +#define _ASM_ARM_HUGETLB_H + +#include <asm/page.h> +#include <asm-generic/hugetlb.h> + +#include <asm/hugetlb-3level.h> + +static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, + unsigned long addr, unsigned long end, + unsigned long floor, + unsigned long ceiling) +{ + free_pgd_range(tlb, addr, end, floor, ceiling); +} + + +static inline int is_hugepage_only_range(struct mm_struct *mm, + unsigned long addr, unsigned long len) +{ + return 0; +} + +static inline int prepare_hugepage_range(struct file *file, + unsigned long addr, unsigned long len) +{ + struct hstate *h = hstate_file(file); + if (len & ~huge_page_mask(h)) + return -EINVAL; + if (addr & ~huge_page_mask(h)) + return -EINVAL; + return 0; +} + +static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) +{ +} + +static inline int huge_pte_none(pte_t pte) +{ + return pte_none(pte); +} + +static inline pte_t huge_pte_wrprotect(pte_t pte) +{ + return pte_wrprotect(pte); +} + +static inline int arch_prepare_hugepage(struct page *page) +{ + return 0; +} + +static inline void arch_release_hugepage(struct page *page) +{ +} + +static inline void arch_clear_hugepage_flags(struct page *page) +{ + clear_bit(PG_dcache_clean, &page->flags); +} + +#endif /* _ASM_ARM_HUGETLB_H */ diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index 652b56086de7..d070741b2b37 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -130,16 +130,16 @@ static inline u32 __raw_readl(const volatile void __iomem *addr) */ extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long, size_t, unsigned int, void *); -extern void __iomem *__arm_ioremap_caller(unsigned long, size_t, unsigned int, +extern void __iomem *__arm_ioremap_caller(phys_addr_t, size_t, unsigned int, void *); extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int); -extern void __iomem *__arm_ioremap(unsigned long, size_t, unsigned int); -extern void __iomem *__arm_ioremap_exec(unsigned long, size_t, bool cached); +extern void __iomem *__arm_ioremap(phys_addr_t, size_t, unsigned int); +extern void __iomem *__arm_ioremap_exec(phys_addr_t, size_t, bool cached); extern void __iounmap(volatile void __iomem *addr); extern void __arm_iounmap(volatile void __iomem *addr); -extern void __iomem * (*arch_ioremap_caller)(unsigned long, size_t, +extern void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *); extern void (*arch_iounmap)(volatile void __iomem *); diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h index 124623e5ef14..64e96960de29 100644 --- a/arch/arm/include/asm/kvm_arm.h +++ b/arch/arm/include/asm/kvm_arm.h @@ -135,7 +135,6 @@ #define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1ULL) #define PTRS_PER_S2_PGD (1ULL << (KVM_PHYS_SHIFT - 30)) #define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t)) -#define S2_PGD_SIZE (1 << S2_PGD_ORDER) /* Virtualization Translation Control Register (VTCR) bits */ #define VTCR_SH0 (3 << 12) diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index 18d50322a9e2..a2f43ddcc300 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h @@ -37,16 +37,18 @@ #define c5_AIFSR 15 /* Auxilary Instrunction Fault Status R */ #define c6_DFAR 16 /* Data Fault Address Register */ #define c6_IFAR 17 /* Instruction Fault Address Register */ -#define c9_L2CTLR 18 /* Cortex A15 L2 Control Register */ -#define c10_PRRR 19 /* Primary Region Remap Register */ -#define c10_NMRR 20 /* Normal Memory Remap Register */ -#define c12_VBAR 21 /* Vector Base Address Register */ -#define c13_CID 22 /* Context ID Register */ -#define c13_TID_URW 23 /* Thread ID, User R/W */ -#define c13_TID_URO 24 /* Thread ID, User R/O */ -#define c13_TID_PRIV 25 /* Thread ID, Privileged */ -#define c14_CNTKCTL 26 /* Timer Control Register (PL1) */ -#define NR_CP15_REGS 27 /* Number of regs (incl. invalid) */ +#define c7_PAR 18 /* Physical Address Register */ +#define c7_PAR_high 19 /* PAR top 32 bits */ +#define c9_L2CTLR 20 /* Cortex A15 L2 Control Register */ +#define c10_PRRR 21 /* Primary Region Remap Register */ +#define c10_NMRR 22 /* Normal Memory Remap Register */ +#define c12_VBAR 23 /* Vector Base Address Register */ +#define c13_CID 24 /* Context ID Register */ +#define c13_TID_URW 25 /* Thread ID, User R/W */ +#define c13_TID_URO 26 /* Thread ID, User R/O */ +#define c13_TID_PRIV 27 /* Thread ID, Privileged */ +#define c14_CNTKCTL 28 /* Timer Control Register (PL1) */ +#define NR_CP15_REGS 29 /* Number of regs (incl. invalid) */ #define ARM_EXCEPTION_RESET 0 #define ARM_EXCEPTION_UNDEFINED 1 @@ -72,8 +74,6 @@ extern char __kvm_hyp_vector[]; extern char __kvm_hyp_code_start[]; extern char __kvm_hyp_code_end[]; -extern void __kvm_tlb_flush_vmid(struct kvm *kvm); - extern void __kvm_flush_vm_context(void); extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index 82b4babead2c..a464e8d7b6c5 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -65,11 +65,6 @@ static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu) return cpsr_mode > USR_MODE;; } -static inline bool kvm_vcpu_reg_is_pc(struct kvm_vcpu *vcpu, int reg) -{ - return reg == 15; -} - static inline u32 kvm_vcpu_get_hsr(struct kvm_vcpu *vcpu) { return vcpu->arch.fault.hsr; diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 57cb786a6203..7d22517d8071 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -23,9 +23,14 @@ #include <asm/kvm_asm.h> #include <asm/kvm_mmio.h> #include <asm/fpstate.h> -#include <asm/kvm_arch_timer.h> +#include <kvm/arm_arch_timer.h> +#if defined(CONFIG_KVM_ARM_MAX_VCPUS) #define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS +#else +#define KVM_MAX_VCPUS 0 +#endif + #define KVM_USER_MEM_SLOTS 32 #define KVM_PRIVATE_MEM_SLOTS 4 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 @@ -38,7 +43,7 @@ #define KVM_NR_PAGE_SIZES 1 #define KVM_PAGES_PER_HPAGE(x) (1UL<<31) -#include <asm/kvm_vgic.h> +#include <kvm/arm_vgic.h> struct kvm_vcpu; u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode); @@ -190,8 +195,8 @@ int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, int exception_index); -static inline void __cpu_init_hyp_mode(unsigned long long boot_pgd_ptr, - unsigned long long pgd_ptr, +static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr, + phys_addr_t pgd_ptr, unsigned long hyp_stack_ptr, unsigned long vector_ptr) { diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h index 75bf07910b81..441efc491b50 100644 --- a/arch/arm/include/asm/mach/arch.h +++ b/arch/arm/include/asm/mach/arch.h @@ -11,6 +11,7 @@ #include <linux/types.h> #ifndef __ASSEMBLY__ +#include <linux/reboot.h> struct tag; struct meminfo; @@ -43,7 +44,7 @@ struct machine_desc { unsigned char reserve_lp0 :1; /* never has lp0 */ unsigned char reserve_lp1 :1; /* never has lp1 */ unsigned char reserve_lp2 :1; /* never has lp2 */ - char restart_mode; /* default restart mode */ + enum reboot_mode reboot_mode; /* default restart mode */ struct smp_operations *smp; /* SMP operations */ bool (*smp_init)(void); void (*fixup)(struct tag *, char **, @@ -58,7 +59,7 @@ struct machine_desc { #ifdef CONFIG_MULTI_IRQ_HANDLER void (*handle_irq)(struct pt_regs *); #endif - void (*restart)(char, const char *); + void (*restart)(enum reboot_mode, const char *); }; /* diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 57870ab313c5..e750a938fd3c 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -18,6 +18,8 @@ #include <linux/types.h> #include <linux/sizes.h> +#include <asm/cache.h> + #ifdef CONFIG_NEED_MACH_MEMORY_H #include <mach/memory.h> #endif @@ -141,6 +143,20 @@ #define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page))) #define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys))) +/* + * Minimum guaranted alignment in pgd_alloc(). The page table pointers passed + * around in head.S and proc-*.S are shifted by this amount, in order to + * leave spare high bits for systems with physical address extension. This + * does not fully accomodate the 40-bit addressing capability of ARM LPAE, but + * gives us about 38-bits or so. + */ +#ifdef CONFIG_ARM_LPAE +#define ARCH_PGD_SHIFT L1_CACHE_SHIFT +#else +#define ARCH_PGD_SHIFT 0 +#endif +#define ARCH_PGD_MASK ((1 << ARCH_PGD_SHIFT) - 1) + #ifndef __ASSEMBLY__ /* @@ -207,7 +223,7 @@ static inline unsigned long __phys_to_virt(unsigned long x) * direct-mapped view. We assume this is the first page * of RAM in the mem_map as well. */ -#define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT) +#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT)) /* * These are *only* valid on the kernel direct mapped RAM memory. @@ -260,12 +276,6 @@ static inline __deprecated void *bus_to_virt(unsigned long x) /* * Conversion between a struct page and a physical address. * - * Note: when converting an unknown physical address to a - * struct page, the resulting pointer must be validated - * using VALID_PAGE(). It must return an invalid struct page - * for any physical address not corresponding to a system - * RAM address. - * * page_to_pfn(page) convert a struct page * to a PFN number * pfn_to_page(pfn) convert a _valid_ PFN number to struct page * * diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index a7b85e0d0cc1..b5792b7fd8d3 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -18,6 +18,7 @@ #include <asm/cacheflush.h> #include <asm/cachetype.h> #include <asm/proc-fns.h> +#include <asm/smp_plat.h> #include <asm-generic/mm_hooks.h> void __check_vmalloc_seq(struct mm_struct *mm); @@ -27,7 +28,15 @@ void __check_vmalloc_seq(struct mm_struct *mm); void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); #define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) -DECLARE_PER_CPU(atomic64_t, active_asids); +#ifdef CONFIG_ARM_ERRATA_798181 +void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, + cpumask_t *mask); +#else /* !CONFIG_ARM_ERRATA_798181 */ +static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, + cpumask_t *mask) +{ +} +#endif /* CONFIG_ARM_ERRATA_798181 */ #else /* !CONFIG_CPU_HAS_ASID */ @@ -98,12 +107,16 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, #ifdef CONFIG_MMU unsigned int cpu = smp_processor_id(); -#ifdef CONFIG_SMP - /* check for possible thread migration */ - if (!cpumask_empty(mm_cpumask(next)) && + /* + * __sync_icache_dcache doesn't broadcast the I-cache invalidation, + * so check for possible thread migration and invalidate the I-cache + * if we're new to this CPU. + */ + if (cache_ops_need_broadcast() && + !cpumask_empty(mm_cpumask(next)) && !cpumask_test_cpu(cpu, mm_cpumask(next))) __flush_icache_all(); -#endif + if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) { check_and_switch_context(next, tsk); if (cache_is_vivt()) diff --git a/arch/arm/include/asm/mpu.h b/arch/arm/include/asm/mpu.h new file mode 100644 index 000000000000..c3247cc2fe08 --- /dev/null +++ b/arch/arm/include/asm/mpu.h @@ -0,0 +1,76 @@ +#ifndef __ARM_MPU_H +#define __ARM_MPU_H + +#ifdef CONFIG_ARM_MPU + +/* MPUIR layout */ +#define MPUIR_nU 1 +#define MPUIR_DREGION 8 +#define MPUIR_IREGION 16 +#define MPUIR_DREGION_SZMASK (0xFF << MPUIR_DREGION) +#define MPUIR_IREGION_SZMASK (0xFF << MPUIR_IREGION) + +/* ID_MMFR0 data relevant to MPU */ +#define MMFR0_PMSA (0xF << 4) +#define MMFR0_PMSAv7 (3 << 4) + +/* MPU D/I Size Register fields */ +#define MPU_RSR_SZ 1 +#define MPU_RSR_EN 0 + +/* The D/I RSR value for an enabled region spanning the whole of memory */ +#define MPU_RSR_ALL_MEM 63 + +/* Individual bits in the DR/IR ACR */ +#define MPU_ACR_XN (1 << 12) +#define MPU_ACR_SHARED (1 << 2) + +/* C, B and TEX[2:0] bits only have semantic meanings when grouped */ +#define MPU_RGN_CACHEABLE 0xB +#define MPU_RGN_SHARED_CACHEABLE (MPU_RGN_CACHEABLE | MPU_ACR_SHARED) +#define MPU_RGN_STRONGLY_ORDERED 0 + +/* Main region should only be shared for SMP */ +#ifdef CONFIG_SMP +#define MPU_RGN_NORMAL (MPU_RGN_CACHEABLE | MPU_ACR_SHARED) +#else +#define MPU_RGN_NORMAL MPU_RGN_CACHEABLE +#endif + +/* Access permission bits of ACR (only define those that we use)*/ +#define MPU_AP_PL1RW_PL0RW (0x3 << 8) +#define MPU_AP_PL1RW_PL0R0 (0x2 << 8) +#define MPU_AP_PL1RW_PL0NA (0x1 << 8) + +/* For minimal static MPU region configurations */ +#define MPU_PROBE_REGION 0 +#define MPU_BG_REGION 1 +#define MPU_RAM_REGION 2 +#define MPU_VECTORS_REGION 3 + +/* Maximum number of regions Linux is interested in */ +#define MPU_MAX_REGIONS 16 + +#define MPU_DATA_SIDE 0 +#define MPU_INSTR_SIDE 1 + +#ifndef __ASSEMBLY__ + +struct mpu_rgn { + /* Assume same attributes for d/i-side */ + u32 drbar; + u32 drsr; + u32 dracr; +}; + +struct mpu_rgn_info { + u32 mpuir; + struct mpu_rgn rgns[MPU_MAX_REGIONS]; +}; +extern struct mpu_rgn_info mpu_rgn_info; + +#endif /* __ASSEMBLY__ */ + +#endif /* CONFIG_ARM_MPU */ + +#endif diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index 812a4944e783..6363f3d1d505 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h @@ -13,7 +13,7 @@ /* PAGE_SHIFT determines the page size */ #define PAGE_SHIFT 12 #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) -#define PAGE_MASK (~(PAGE_SIZE-1)) +#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1)) #ifndef __ASSEMBLY__ diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h index 18f5cef82ad5..626989fec4d3 100644 --- a/arch/arm/include/asm/pgtable-3level-hwdef.h +++ b/arch/arm/include/asm/pgtable-3level-hwdef.h @@ -30,6 +30,7 @@ #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0) #define PMD_TYPE_TABLE (_AT(pmdval_t, 3) << 0) #define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0) +#define PMD_TABLE_BIT (_AT(pmdval_t, 1) << 1) #define PMD_BIT4 (_AT(pmdval_t, 0)) #define PMD_DOMAIN(x) (_AT(pmdval_t, 0)) #define PMD_APTABLE_SHIFT (61) @@ -41,6 +42,8 @@ */ #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2) #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3) +#define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */ +#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */ #define PMD_SECT_S (_AT(pmdval_t, 3) << 8) #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10) #define PMD_SECT_nG (_AT(pmdval_t, 1) << 11) @@ -66,6 +69,7 @@ #define PTE_TYPE_MASK (_AT(pteval_t, 3) << 0) #define PTE_TYPE_FAULT (_AT(pteval_t, 0) << 0) #define PTE_TYPE_PAGE (_AT(pteval_t, 3) << 0) +#define PTE_TABLE_BIT (_AT(pteval_t, 1) << 1) #define PTE_BUFFERABLE (_AT(pteval_t, 1) << 2) /* AttrIndx[0] */ #define PTE_CACHEABLE (_AT(pteval_t, 1) << 3) /* AttrIndx[1] */ #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ @@ -79,4 +83,24 @@ #define PHYS_MASK_SHIFT (40) #define PHYS_MASK ((1ULL << PHYS_MASK_SHIFT) - 1) +/* + * TTBR0/TTBR1 split (PAGE_OFFSET): + * 0x40000000: T0SZ = 2, T1SZ = 0 (not used) + * 0x80000000: T0SZ = 0, T1SZ = 1 + * 0xc0000000: T0SZ = 0, T1SZ = 2 + * + * Only use this feature if PHYS_OFFSET <= PAGE_OFFSET, otherwise + * booting secondary CPUs would end up using TTBR1 for the identity + * mapping set up in TTBR0. + */ +#if defined CONFIG_VMSPLIT_2G +#define TTBR1_OFFSET 16 /* skip two L1 entries */ +#elif defined CONFIG_VMSPLIT_3G +#define TTBR1_OFFSET (4096 * (1 + 3)) /* only L2, skip pgd + 3*pmd */ +#else +#define TTBR1_OFFSET 0 +#endif + +#define TTBR1_SIZE (((PAGE_OFFSET >> 30) - 1) << 16) + #endif diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index 86b8fe398b95..5689c18c85f5 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h @@ -33,7 +33,7 @@ #define PTRS_PER_PMD 512 #define PTRS_PER_PGD 4 -#define PTE_HWTABLE_PTRS (PTRS_PER_PTE) +#define PTE_HWTABLE_PTRS (0) #define PTE_HWTABLE_OFF (0) #define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u64)) @@ -48,20 +48,28 @@ #define PMD_SHIFT 21 #define PMD_SIZE (1UL << PMD_SHIFT) -#define PMD_MASK (~(PMD_SIZE-1)) +#define PMD_MASK (~((1 << PMD_SHIFT) - 1)) #define PGDIR_SIZE (1UL << PGDIR_SHIFT) -#define PGDIR_MASK (~(PGDIR_SIZE-1)) +#define PGDIR_MASK (~((1 << PGDIR_SHIFT) - 1)) /* * section address mask and size definitions. */ #define SECTION_SHIFT 21 #define SECTION_SIZE (1UL << SECTION_SHIFT) -#define SECTION_MASK (~(SECTION_SIZE-1)) +#define SECTION_MASK (~((1 << SECTION_SHIFT) - 1)) #define USER_PTRS_PER_PGD (PAGE_OFFSET / PGDIR_SIZE) /* + * Hugetlb definitions. + */ +#define HPAGE_SHIFT PMD_SHIFT +#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) +#define HPAGE_MASK (~(HPAGE_SIZE - 1)) +#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) + +/* * "Linux" PTE definitions for LPAE. * * These bits overlap with the hardware bits but the naming is preserved for @@ -79,6 +87,11 @@ #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */ #define L_PTE_NONE (_AT(pteval_t, 1) << 57) /* PROT_NONE */ +#define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0) +#define PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55) +#define PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56) +#define PMD_SECT_NONE (_AT(pmdval_t, 1) << 57) + /* * To be used in assembly code with the upper page attributes. */ @@ -166,8 +179,83 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) clean_pmd_entry(pmdp); \ } while (0) +/* + * For 3 levels of paging the PTE_EXT_NG bit will be set for user address ptes + * that are written to a page table but not for ptes created with mk_pte. + * + * In hugetlb_no_page, a new huge pte (new_pte) is generated and passed to + * hugetlb_cow, where it is compared with an entry in a page table. + * This comparison test fails erroneously leading ultimately to a memory leak. + * + * To correct this behaviour, we mask off PTE_EXT_NG for any pte that is + * present before running the comparison. + */ +#define __HAVE_ARCH_PTE_SAME +#define pte_same(pte_a,pte_b) ((pte_present(pte_a) ? pte_val(pte_a) & ~PTE_EXT_NG \ + : pte_val(pte_a)) \ + == (pte_present(pte_b) ? pte_val(pte_b) & ~PTE_EXT_NG \ + : pte_val(pte_b))) + #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,__pte(pte_val(pte)|(ext))) +#define pte_huge(pte) (pte_val(pte) && !(pte_val(pte) & PTE_TABLE_BIT)) +#define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT)) + +#define pmd_young(pmd) (pmd_val(pmd) & PMD_SECT_AF) + +#define __HAVE_ARCH_PMD_WRITE +#define pmd_write(pmd) (!(pmd_val(pmd) & PMD_SECT_RDONLY)) + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT)) +#define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING) +#endif + +#define PMD_BIT_FUNC(fn,op) \ +static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; } + +PMD_BIT_FUNC(wrprotect, |= PMD_SECT_RDONLY); +PMD_BIT_FUNC(mkold, &= ~PMD_SECT_AF); +PMD_BIT_FUNC(mksplitting, |= PMD_SECT_SPLITTING); +PMD_BIT_FUNC(mkwrite, &= ~PMD_SECT_RDONLY); +PMD_BIT_FUNC(mkdirty, |= PMD_SECT_DIRTY); +PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF); + +#define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT)) + +#define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT) +#define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) +#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) + +/* represent a notpresent pmd by zero, this is used by pmdp_invalidate */ +#define pmd_mknotpresent(pmd) (__pmd(0)) + +static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) +{ + const pmdval_t mask = PMD_SECT_USER | PMD_SECT_XN | PMD_SECT_RDONLY | + PMD_SECT_VALID | PMD_SECT_NONE; + pmd_val(pmd) = (pmd_val(pmd) & ~mask) | (pgprot_val(newprot) & mask); + return pmd; +} + +static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t pmd) +{ + BUG_ON(addr >= TASK_SIZE); + + /* create a faulting entry if PROT_NONE protected */ + if (pmd_val(pmd) & PMD_SECT_NONE) + pmd_val(pmd) &= ~PMD_SECT_VALID; + + *pmdp = __pmd(pmd_val(pmd) | PMD_SECT_nG); + flush_pmd_entry(pmdp); +} + +static inline int has_transparent_hugepage(void) +{ + return 1; +} + #endif /* __ASSEMBLY__ */ #endif /* _ASM_PGTABLE_3LEVEL_H */ diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 229e0dde9c71..04aeb02d2e11 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -24,6 +24,9 @@ #include <asm/memory.h> #include <asm/pgtable-hwdef.h> + +#include <asm/tlbflush.h> + #ifdef CONFIG_ARM_LPAE #include <asm/pgtable-3level.h> #else diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h index f3628fb3d2b3..5324c1112f3a 100644 --- a/arch/arm/include/asm/proc-fns.h +++ b/arch/arm/include/asm/proc-fns.h @@ -60,7 +60,7 @@ extern struct processor { /* * Set the page table */ - void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *mm); + void (*switch_mm)(phys_addr_t pgd_phys, struct mm_struct *mm); /* * Set a possibly extended PTE. Non-extended PTEs should * ignore 'ext'. @@ -82,7 +82,7 @@ extern void cpu_proc_init(void); extern void cpu_proc_fin(void); extern int cpu_do_idle(void); extern void cpu_dcache_clean_area(void *, int); -extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); +extern void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); #ifdef CONFIG_ARM_LPAE extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte); #else @@ -116,13 +116,25 @@ extern void cpu_resume(void); #define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm) #ifdef CONFIG_ARM_LPAE + +#define cpu_get_ttbr(nr) \ + ({ \ + u64 ttbr; \ + __asm__("mrrc p15, " #nr ", %Q0, %R0, c2" \ + : "=r" (ttbr)); \ + ttbr; \ + }) + +#define cpu_set_ttbr(nr, val) \ + do { \ + u64 ttbr = val; \ + __asm__("mcrr p15, " #nr ", %Q0, %R0, c2" \ + : : "r" (ttbr)); \ + } while (0) + #define cpu_get_pgd() \ ({ \ - unsigned long pg, pg2; \ - __asm__("mrrc p15, 0, %0, %1, c2" \ - : "=r" (pg), "=r" (pg2) \ - : \ - : "cc"); \ + u64 pg = cpu_get_ttbr(0); \ pg &= ~(PTRS_PER_PGD*sizeof(pgd_t)-1); \ (pgd_t *)phys_to_virt(pg); \ }) @@ -137,6 +149,10 @@ extern void cpu_resume(void); }) #endif +#else /*!CONFIG_MMU */ + +#define cpu_switch_mm(pgd,mm) { } + #endif #endif /* __ASSEMBLY__ */ diff --git a/arch/arm/include/asm/sched_clock.h b/arch/arm/include/asm/sched_clock.h index 3d520ddca61b..2389b71a8e7c 100644 --- a/arch/arm/include/asm/sched_clock.h +++ b/arch/arm/include/asm/sched_clock.h @@ -1,16 +1,4 @@ -/* - * sched_clock.h: support for extending counters to full 64-bit ns counter - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. +/* You shouldn't include this file. Use linux/sched_clock.h instead. + * Temporary file until all asm/sched_clock.h users are gone */ -#ifndef ASM_SCHED_CLOCK -#define ASM_SCHED_CLOCK - -extern void sched_clock_postinit(void); -extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate); - -extern unsigned long long (*sched_clock_func)(void); - -#endif +#include <linux/sched_clock.h> diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h index d3a22bebe6ce..a8cae71caceb 100644 --- a/arch/arm/include/asm/smp.h +++ b/arch/arm/include/asm/smp.h @@ -65,7 +65,10 @@ asmlinkage void secondary_start_kernel(void); * Initial data for bringing up a secondary CPU. */ struct secondary_data { - unsigned long pgdir; + union { + unsigned long mpu_rgn_szr; + unsigned long pgdir; + }; unsigned long swapper_pg_dir; void *stack; }; diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h index e78983202737..6462a721ebd4 100644 --- a/arch/arm/include/asm/smp_plat.h +++ b/arch/arm/include/asm/smp_plat.h @@ -26,6 +26,9 @@ static inline bool is_smp(void) } /* all SMP configurations have the extended CPUID registers */ +#ifndef CONFIG_MMU +#define tlb_ops_need_broadcast() 0 +#else static inline int tlb_ops_need_broadcast(void) { if (!is_smp()) @@ -33,6 +36,7 @@ static inline int tlb_ops_need_broadcast(void) return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2; } +#endif #if !defined(CONFIG_SMP) || __LINUX_ARM_ARCH__ >= 7 #define cache_ops_need_broadcast() 0 @@ -66,4 +70,22 @@ static inline int get_logical_index(u32 mpidr) return -EINVAL; } +/* + * NOTE ! Assembly code relies on the following + * structure memory layout in order to carry out load + * multiple from its base address. For more + * information check arch/arm/kernel/sleep.S + */ +struct mpidr_hash { + u32 mask; /* used by sleep.S */ + u32 shift_aff[3]; /* used by sleep.S */ + u32 bits; +}; + +extern struct mpidr_hash mpidr_hash; + +static inline u32 mpidr_hash_size(void) +{ + return 1 << mpidr_hash.bits; +} #endif diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index 6220e9fdf4c7..f8b8965666e9 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -97,19 +97,22 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) static inline int arch_spin_trylock(arch_spinlock_t *lock) { - unsigned long tmp; + unsigned long contended, res; u32 slock; - __asm__ __volatile__( -" ldrex %0, [%2]\n" -" subs %1, %0, %0, ror #16\n" -" addeq %0, %0, %3\n" -" strexeq %1, %0, [%2]" - : "=&r" (slock), "=&r" (tmp) - : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) - : "cc"); - - if (tmp == 0) { + do { + __asm__ __volatile__( + " ldrex %0, [%3]\n" + " mov %2, #0\n" + " subs %1, %0, %0, ror #16\n" + " addeq %0, %0, %4\n" + " strexeq %2, %0, [%3]" + : "=&r" (slock), "=&r" (contended), "=r" (res) + : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) + : "cc"); + } while (res); + + if (!contended) { smp_mb(); return 1; } else { diff --git a/arch/arm/include/asm/suspend.h b/arch/arm/include/asm/suspend.h index 1c0a551ae375..cd20029bcd94 100644 --- a/arch/arm/include/asm/suspend.h +++ b/arch/arm/include/asm/suspend.h @@ -1,6 +1,11 @@ #ifndef __ASM_ARM_SUSPEND_H #define __ASM_ARM_SUSPEND_H +struct sleep_save_sp { + u32 *save_ptr_stash; + u32 save_ptr_stash_phys; +}; + extern void cpu_resume(void); extern int cpu_suspend(unsigned long, int (*)(unsigned long)); diff --git a/arch/arm/include/asm/system_misc.h b/arch/arm/include/asm/system_misc.h index 21a23e378bbe..a3d61ad984af 100644 --- a/arch/arm/include/asm/system_misc.h +++ b/arch/arm/include/asm/system_misc.h @@ -6,11 +6,12 @@ #include <linux/compiler.h> #include <linux/linkage.h> #include <linux/irqflags.h> +#include <linux/reboot.h> extern void cpu_init(void); void soft_restart(unsigned long); -extern void (*arm_pm_restart)(char str, const char *cmd); +extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); extern void (*arm_pm_idle)(void); #define UDBG_UNDEFINED (1 << 0) diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 1995d1a84060..214d4158089a 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -58,7 +58,7 @@ struct thread_info { struct cpu_context_save cpu_context; /* cpu context */ __u32 syscall; /* syscall number */ __u8 used_cp[16]; /* thread used copro */ - unsigned long tp_value; + unsigned long tp_value[2]; /* TLS registers */ #ifdef CONFIG_CRUNCH struct crunch_state crunchstate; #endif diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index bdf2b8458ec1..46e7cfb3e721 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h @@ -204,6 +204,12 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, #endif } +static inline void +tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr) +{ + tlb_add_flush(tlb, addr); +} + #define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr) #define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr) #define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp) diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index a3625d141c1d..fdbb9e369745 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -535,8 +535,33 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, } #endif +#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) + #endif -#endif /* CONFIG_MMU */ +#elif defined(CONFIG_SMP) /* !CONFIG_MMU */ + +#ifndef __ASSEMBLY__ + +#include <linux/mm_types.h> + +static inline void local_flush_tlb_all(void) { } +static inline void local_flush_tlb_mm(struct mm_struct *mm) { } +static inline void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) { } +static inline void local_flush_tlb_kernel_page(unsigned long kaddr) { } +static inline void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { } +static inline void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) { } +static inline void local_flush_bp_all(void) { } + +extern void flush_tlb_all(void); +extern void flush_tlb_mm(struct mm_struct *mm); +extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr); +extern void flush_tlb_kernel_page(unsigned long kaddr); +extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); +extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); +extern void flush_bp_all(void); +#endif /* __ASSEMBLY__ */ + +#endif #endif diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h index 73409e6c0251..83259b873333 100644 --- a/arch/arm/include/asm/tls.h +++ b/arch/arm/include/asm/tls.h @@ -2,27 +2,30 @@ #define __ASMARM_TLS_H #ifdef __ASSEMBLY__ - .macro set_tls_none, tp, tmp1, tmp2 +#include <asm/asm-offsets.h> + .macro switch_tls_none, base, tp, tpuser, tmp1, tmp2 .endm - .macro set_tls_v6k, tp, tmp1, tmp2 + .macro switch_tls_v6k, base, tp, tpuser, tmp1, tmp2 + mrc p15, 0, \tmp2, c13, c0, 2 @ get the user r/w register mcr p15, 0, \tp, c13, c0, 3 @ set TLS register - mov \tmp1, #0 - mcr p15, 0, \tmp1, c13, c0, 2 @ clear user r/w TLS register + mcr p15, 0, \tpuser, c13, c0, 2 @ and the user r/w register + str \tmp2, [\base, #TI_TP_VALUE + 4] @ save it .endm - .macro set_tls_v6, tp, tmp1, tmp2 + .macro switch_tls_v6, base, tp, tpuser, tmp1, tmp2 ldr \tmp1, =elf_hwcap ldr \tmp1, [\tmp1, #0] mov \tmp2, #0xffff0fff tst \tmp1, #HWCAP_TLS @ hardware TLS available? - mcrne p15, 0, \tp, c13, c0, 3 @ yes, set TLS register - movne \tmp1, #0 - mcrne p15, 0, \tmp1, c13, c0, 2 @ clear user r/w TLS register streq \tp, [\tmp2, #-15] @ set TLS value at 0xffff0ff0 + mrcne p15, 0, \tmp2, c13, c0, 2 @ get the user r/w register + mcrne p15, 0, \tp, c13, c0, 3 @ yes, set TLS register + mcrne p15, 0, \tpuser, c13, c0, 2 @ set user r/w register + strne \tmp2, [\base, #TI_TP_VALUE + 4] @ save it .endm - .macro set_tls_software, tp, tmp1, tmp2 + .macro switch_tls_software, base, tp, tpuser, tmp1, tmp2 mov \tmp1, #0xffff0fff str \tp, [\tmp1, #-15] @ set TLS value at 0xffff0ff0 .endm @@ -31,19 +34,30 @@ #ifdef CONFIG_TLS_REG_EMUL #define tls_emu 1 #define has_tls_reg 1 -#define set_tls set_tls_none +#define switch_tls switch_tls_none #elif defined(CONFIG_CPU_V6) #define tls_emu 0 #define has_tls_reg (elf_hwcap & HWCAP_TLS) -#define set_tls set_tls_v6 +#define switch_tls switch_tls_v6 #elif defined(CONFIG_CPU_32v6K) #define tls_emu 0 #define has_tls_reg 1 -#define set_tls set_tls_v6k +#define switch_tls switch_tls_v6k #else #define tls_emu 0 #define has_tls_reg 0 -#define set_tls set_tls_software +#define switch_tls switch_tls_software #endif +#ifndef __ASSEMBLY__ +static inline unsigned long get_tpuser(void) +{ + unsigned long reg = 0; + + if (has_tls_reg && !tls_emu) + __asm__("mrc p15, 0, %0, c13, c0, 2" : "=r" (reg)); + + return reg; +} +#endif #endif /* __ASMARM_TLS_H */ diff --git a/arch/arm/include/asm/xen/hypercall.h b/arch/arm/include/asm/xen/hypercall.h index 799f42ecca63..7704e28c3483 100644 --- a/arch/arm/include/asm/xen/hypercall.h +++ b/arch/arm/include/asm/xen/hypercall.h @@ -47,6 +47,7 @@ unsigned long HYPERVISOR_hvm_op(int op, void *arg); int HYPERVISOR_memory_op(unsigned int cmd, void *arg); int HYPERVISOR_physdev_op(int cmd, void *arg); int HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args); +int HYPERVISOR_tmem_op(void *arg); static inline void MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va, diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h index 30cdacb675af..359a7b50b158 100644 --- a/arch/arm/include/asm/xen/page.h +++ b/arch/arm/include/asm/xen/page.h @@ -1,7 +1,6 @@ #ifndef _ASM_ARM_XEN_PAGE_H #define _ASM_ARM_XEN_PAGE_H -#include <asm/mach/map.h> #include <asm/page.h> #include <asm/pgtable.h> @@ -88,6 +87,6 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) return __set_phys_to_machine(pfn, mfn); } -#define xen_remap(cookie, size) __arm_ioremap((cookie), (size), MT_MEMORY); +#define xen_remap(cookie, size) ioremap_cached((cookie), (size)); #endif /* _ASM_ARM_XEN_PAGE_H */ diff --git a/arch/arm/include/debug/vexpress.S b/arch/arm/include/debug/vexpress.S index dc8e882a6257..acafb229e2b6 100644 --- a/arch/arm/include/debug/vexpress.S +++ b/arch/arm/include/debug/vexpress.S @@ -16,6 +16,8 @@ #define DEBUG_LL_PHYS_BASE_RS1 0x1c000000 #define DEBUG_LL_UART_OFFSET_RS1 0x00090000 +#define DEBUG_LL_UART_PHYS_CRX 0xb0090000 + #define DEBUG_LL_VIRT_BASE 0xf8000000 #if defined(CONFIG_DEBUG_VEXPRESS_UART0_DETECT) @@ -67,6 +69,14 @@ #include <asm/hardware/debug-pl01x.S> +#elif defined(CONFIG_DEBUG_VEXPRESS_UART0_CRX) + + .macro addruart,rp,tmp,tmp2 + ldr \rp, =DEBUG_LL_UART_PHYS_CRX + .endm + +#include <asm/hardware/debug-pl01x.S> + #else /* CONFIG_DEBUG_LL_UART_NONE */ .macro addruart, rp, rv, tmp diff --git a/arch/arm/include/uapi/asm/hwcap.h b/arch/arm/include/uapi/asm/hwcap.h index 3688fd15a32d..6d34d080372a 100644 --- a/arch/arm/include/uapi/asm/hwcap.h +++ b/arch/arm/include/uapi/asm/hwcap.h @@ -25,6 +25,6 @@ #define HWCAP_IDIVT (1 << 18) #define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs (not 16) */ #define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT) - +#define HWCAP_LPAE (1 << 20) #endif /* _UAPI__ASMARM_HWCAP_H */ diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index f4285b5ffb05..86d10dd47dc4 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -16,7 +16,7 @@ CFLAGS_REMOVE_return_address.o = -pg # Object file lists. obj-y := elf.o entry-common.o irq.o opcodes.o \ - process.o ptrace.o return_address.o sched_clock.o \ + process.o ptrace.o return_address.o \ setup.o signal.o stacktrace.o sys_arm.o time.o traps.o obj-$(CONFIG_ATAGS) += atags_parse.o @@ -38,7 +38,10 @@ obj-$(CONFIG_ARTHUR) += arthur.o obj-$(CONFIG_ISA_DMA) += dma-isa.o obj-$(CONFIG_PCI) += bios32.o isa.o obj-$(CONFIG_ARM_CPU_SUSPEND) += sleep.o suspend.o -obj-$(CONFIG_SMP) += smp.o smp_tlb.o +obj-$(CONFIG_SMP) += smp.o +ifdef CONFIG_MMU +obj-$(CONFIG_SMP) += smp_tlb.o +endif obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o obj-$(CONFIG_ARM_ARCH_TIMER) += arch_timer.o diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c index 59dcdced6e30..221f07b11ccb 100644 --- a/arch/arm/kernel/arch_timer.c +++ b/arch/arm/kernel/arch_timer.c @@ -11,9 +11,9 @@ #include <linux/init.h> #include <linux/types.h> #include <linux/errno.h> +#include <linux/sched_clock.h> #include <asm/delay.h> -#include <asm/sched_clock.h> #include <clocksource/arm_arch_timer.h> diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index ee68cce6b48e..ded041711beb 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -23,6 +23,7 @@ #include <asm/thread_info.h> #include <asm/memory.h> #include <asm/procinfo.h> +#include <asm/suspend.h> #include <asm/hardware/cache-l2x0.h> #include <linux/kbuild.h> @@ -145,6 +146,11 @@ int main(void) #ifdef MULTI_CACHE DEFINE(CACHE_FLUSH_KERN_ALL, offsetof(struct cpu_cache_fns, flush_kern_all)); #endif +#ifdef CONFIG_ARM_CPU_SUSPEND + DEFINE(SLEEP_SAVE_SP_SZ, sizeof(struct sleep_save_sp)); + DEFINE(SLEEP_SAVE_SP_PHYS, offsetof(struct sleep_save_sp, save_ptr_stash_phys)); + DEFINE(SLEEP_SAVE_SP_VIRT, offsetof(struct sleep_save_sp, save_ptr_stash)); +#endif BLANK(); DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 582b405befc5..a39cfc2a1f90 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -685,15 +685,16 @@ ENTRY(__switch_to) UNWIND(.fnstart ) UNWIND(.cantunwind ) add ip, r1, #TI_CPU_SAVE - ldr r3, [r2, #TI_TP_VALUE] ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack THUMB( str sp, [ip], #4 ) THUMB( str lr, [ip], #4 ) + ldr r4, [r2, #TI_TP_VALUE] + ldr r5, [r2, #TI_TP_VALUE + 4] #ifdef CONFIG_CPU_USE_DOMAINS ldr r6, [r2, #TI_CPU_DOMAIN] #endif - set_tls r3, r4, r5 + switch_tls r1, r4, r5, r3, r7 #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) ldr r7, [r2, #TI_TASK] ldr r8, =__stack_chk_guard diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 85a72b0809ca..94104bf69719 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -366,6 +366,16 @@ ENTRY(vector_swi) #endif zero_fp +#ifdef CONFIG_ALIGNMENT_TRAP + ldr ip, __cr_alignment + ldr ip, [ip] + mcr p15, 0, ip, c1, c0 @ update control register +#endif + + enable_irq + ct_user_exit + get_thread_info tsk + /* * Get the system call number. */ @@ -379,9 +389,9 @@ ENTRY(vector_swi) #ifdef CONFIG_ARM_THUMB tst r8, #PSR_T_BIT movne r10, #0 @ no thumb OABI emulation - ldreq r10, [lr, #-4] @ get SWI instruction + USER( ldreq r10, [lr, #-4] ) @ get SWI instruction #else - ldr r10, [lr, #-4] @ get SWI instruction + USER( ldr r10, [lr, #-4] ) @ get SWI instruction #endif #ifdef CONFIG_CPU_ENDIAN_BE8 rev r10, r10 @ little endian instruction @@ -396,22 +406,13 @@ ENTRY(vector_swi) /* Legacy ABI only, possibly thumb mode. */ tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in - ldreq scno, [lr, #-4] + USER( ldreq scno, [lr, #-4] ) #else /* Legacy ABI only. */ - ldr scno, [lr, #-4] @ get SWI instruction + USER( ldr scno, [lr, #-4] ) @ get SWI instruction #endif -#ifdef CONFIG_ALIGNMENT_TRAP - ldr ip, __cr_alignment - ldr ip, [ip] - mcr p15, 0, ip, c1, c0 @ update control register -#endif - enable_irq - ct_user_exit - - get_thread_info tsk adr tbl, sys_call_table @ load syscall table pointer #if defined(CONFIG_OABI_COMPAT) @@ -446,6 +447,21 @@ local_restart: eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back bcs arm_syscall b sys_ni_syscall @ not private func + +#if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI) + /* + * We failed to handle a fault trying to access the page + * containing the swi instruction, but we're not really in a + * position to return -EFAULT. Instead, return back to the + * instruction and re-enter the user fault handling path trying + * to page it in. This will likely result in sending SEGV to the + * current task. + */ +9001: + sub lr, lr, #4 + str lr, [sp, #S_PC] + b ret_fast_syscall +#endif ENDPROC(vector_swi) /* diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index 8812ce88f7a1..75f14cc3e073 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S @@ -17,9 +17,12 @@ #include <asm/assembler.h> #include <asm/ptrace.h> #include <asm/asm-offsets.h> +#include <asm/memory.h> #include <asm/cp15.h> #include <asm/thread_info.h> #include <asm/v7m.h> +#include <asm/mpu.h> +#include <asm/page.h> /* * Kernel startup entry point. @@ -63,12 +66,74 @@ ENTRY(stext) movs r10, r5 @ invalid processor (r5=0)? beq __error_p @ yes, error 'p' - adr lr, BSYM(__after_proc_init) @ return (PIC) address +#ifdef CONFIG_ARM_MPU + /* Calculate the size of a region covering just the kernel */ + ldr r5, =PHYS_OFFSET @ Region start: PHYS_OFFSET + ldr r6, =(_end) @ Cover whole kernel + sub r6, r6, r5 @ Minimum size of region to map + clz r6, r6 @ Region size must be 2^N... + rsb r6, r6, #31 @ ...so round up region size + lsl r6, r6, #MPU_RSR_SZ @ Put size in right field + orr r6, r6, #(1 << MPU_RSR_EN) @ Set region enabled bit + bl __setup_mpu +#endif + ldr r13, =__mmap_switched @ address to jump to after + @ initialising sctlr + adr lr, BSYM(1f) @ return (PIC) address ARM( add pc, r10, #PROCINFO_INITFUNC ) THUMB( add r12, r10, #PROCINFO_INITFUNC ) THUMB( mov pc, r12 ) + 1: b __after_proc_init ENDPROC(stext) +#ifdef CONFIG_SMP + __CPUINIT +ENTRY(secondary_startup) + /* + * Common entry point for secondary CPUs. + * + * Ensure that we're in SVC mode, and IRQs are disabled. Lookup + * the processor type - there is no need to check the machine type + * as it has already been validated by the primary processor. + */ + setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 +#ifndef CONFIG_CPU_CP15 + ldr r9, =CONFIG_PROCESSOR_ID +#else + mrc p15, 0, r9, c0, c0 @ get processor id +#endif + bl __lookup_processor_type @ r5=procinfo r9=cpuid + movs r10, r5 @ invalid processor? + beq __error_p @ yes, error 'p' + + adr r4, __secondary_data + ldmia r4, {r7, r12} + +#ifdef CONFIG_ARM_MPU + /* Use MPU region info supplied by __cpu_up */ + ldr r6, [r7] @ get secondary_data.mpu_szr + bl __setup_mpu @ Initialize the MPU +#endif + + adr lr, BSYM(__after_proc_init) @ return address + mov r13, r12 @ __secondary_switched address + ARM( add pc, r10, #PROCINFO_INITFUNC ) + THUMB( add r12, r10, #PROCINFO_INITFUNC ) + THUMB( mov pc, r12 ) +ENDPROC(secondary_startup) + +ENTRY(__secondary_switched) + ldr sp, [r7, #8] @ set up the stack pointer + mov fp, #0 + b secondary_start_kernel +ENDPROC(__secondary_switched) + + .type __secondary_data, %object +__secondary_data: + .long secondary_data + .long __secondary_switched +#endif /* CONFIG_SMP */ + /* * Set the Control Register and Read the process ID. */ @@ -99,10 +164,97 @@ __after_proc_init: #endif mcr p15, 0, r0, c1, c0, 0 @ write control reg #endif /* CONFIG_CPU_CP15 */ - - b __mmap_switched @ clear the BSS and jump - @ to start_kernel + mov pc, r13 ENDPROC(__after_proc_init) .ltorg +#ifdef CONFIG_ARM_MPU + + +/* Set which MPU region should be programmed */ +.macro set_region_nr tmp, rgnr + mov \tmp, \rgnr @ Use static region numbers + mcr p15, 0, \tmp, c6, c2, 0 @ Write RGNR +.endm + +/* Setup a single MPU region, either D or I side (D-side for unified) */ +.macro setup_region bar, acr, sr, side = MPU_DATA_SIDE + mcr p15, 0, \bar, c6, c1, (0 + \side) @ I/DRBAR + mcr p15, 0, \acr, c6, c1, (4 + \side) @ I/DRACR + mcr p15, 0, \sr, c6, c1, (2 + \side) @ I/DRSR +.endm + +/* + * Setup the MPU and initial MPU Regions. We create the following regions: + * Region 0: Use this for probing the MPU details, so leave disabled. + * Region 1: Background region - covers the whole of RAM as strongly ordered + * Region 2: Normal, Shared, cacheable for RAM. From PHYS_OFFSET, size from r6 + * Region 3: Normal, shared, inaccessible from PL0 to protect the vectors page + * + * r6: Value to be written to DRSR (and IRSR if required) for MPU_RAM_REGION +*/ + +ENTRY(__setup_mpu) + + /* Probe for v7 PMSA compliance */ + mrc p15, 0, r0, c0, c1, 4 @ Read ID_MMFR0 + and r0, r0, #(MMFR0_PMSA) @ PMSA field + teq r0, #(MMFR0_PMSAv7) @ PMSA v7 + bne __error_p @ Fail: ARM_MPU on NOT v7 PMSA + + /* Determine whether the D/I-side memory map is unified. We set the + * flags here and continue to use them for the rest of this function */ + mrc p15, 0, r0, c0, c0, 4 @ MPUIR + ands r5, r0, #MPUIR_DREGION_SZMASK @ 0 size d region => No MPU + beq __error_p @ Fail: ARM_MPU and no MPU + tst r0, #MPUIR_nU @ MPUIR_nU = 0 for unified + + /* Setup second region first to free up r6 */ + set_region_nr r0, #MPU_RAM_REGION + isb + /* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */ + ldr r0, =PHYS_OFFSET @ RAM starts at PHYS_OFFSET + ldr r5,=(MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL) + + setup_region r0, r5, r6, MPU_DATA_SIDE @ PHYS_OFFSET, shared, enabled + beq 1f @ Memory-map not unified + setup_region r0, r5, r6, MPU_INSTR_SIDE @ PHYS_OFFSET, shared, enabled +1: isb + + /* First/background region */ + set_region_nr r0, #MPU_BG_REGION + isb + /* Execute Never, strongly ordered, inaccessible to PL0, rw PL1 */ + mov r0, #0 @ BG region starts at 0x0 + ldr r5,=(MPU_ACR_XN | MPU_RGN_STRONGLY_ORDERED | MPU_AP_PL1RW_PL0NA) + mov r6, #MPU_RSR_ALL_MEM @ 4GB region, enabled + + setup_region r0, r5, r6, MPU_DATA_SIDE @ 0x0, BG region, enabled + beq 2f @ Memory-map not unified + setup_region r0, r5, r6, MPU_INSTR_SIDE @ 0x0, BG region, enabled +2: isb + + /* Vectors region */ + set_region_nr r0, #MPU_VECTORS_REGION + isb + /* Shared, inaccessible to PL0, rw PL1 */ + mov r0, #CONFIG_VECTORS_BASE @ Cover from VECTORS_BASE + ldr r5,=(MPU_AP_PL1RW_PL0NA | MPU_RGN_NORMAL) + /* Writing N to bits 5:1 (RSR_SZ) --> region size 2^N+1 */ + mov r6, #(((PAGE_SHIFT - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN) + + setup_region r0, r5, r6, MPU_DATA_SIDE @ VECTORS_BASE, PL0 NA, enabled + beq 3f @ Memory-map not unified + setup_region r0, r5, r6, MPU_INSTR_SIDE @ VECTORS_BASE, PL0 NA, enabled +3: isb + + /* Enable the MPU */ + mrc p15, 0, r0, c1, c0, 0 @ Read SCTLR + bic r0, r0, #CR_BR @ Disable the 'default mem-map' + orr r0, r0, #CR_M @ Set SCTRL.M (MPU on) + mcr p15, 0, r0, c1, c0, 0 @ Enable MPU + isb + mov pc,lr +ENDPROC(__setup_mpu) +#endif #include "head-common.S" diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 8bac553fe213..45e8935cae4e 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -156,7 +156,7 @@ ENDPROC(stext) * * Returns: * r0, r3, r5-r7 corrupted - * r4 = physical page table address + * r4 = page table (see ARCH_PGD_SHIFT in asm/memory.h) */ __create_page_tables: pgtbl r4, r8 @ page table address @@ -331,6 +331,7 @@ __create_page_tables: #endif #ifdef CONFIG_ARM_LPAE sub r4, r4, #0x1000 @ point to the PGD table + mov r4, r4, lsr #ARCH_PGD_SHIFT #endif mov pc, lr ENDPROC(__create_page_tables) @@ -408,7 +409,7 @@ __secondary_data: * r0 = cp#15 control register * r1 = machine ID * r2 = atags or dtb pointer - * r4 = page table pointer + * r4 = page table (see ARCH_PGD_SHIFT in asm/memory.h) * r9 = processor ID * r13 = *virtual* address to jump to upon completion */ @@ -427,10 +428,7 @@ __enable_mmu: #ifdef CONFIG_CPU_ICACHE_DISABLE bic r0, r0, #CR_I #endif -#ifdef CONFIG_ARM_LPAE - mov r5, #0 - mcrr p15, 0, r4, r5, c2 @ load TTBR0 -#else +#ifndef CONFIG_ARM_LPAE mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S index 1315c4ccfa56..4910232c4833 100644 --- a/arch/arm/kernel/hyp-stub.S +++ b/arch/arm/kernel/hyp-stub.S @@ -153,6 +153,13 @@ THUMB( orr r7, #(1 << 30) ) @ HSCTLR.TE mrc p15, 4, r7, c14, c1, 0 @ CNTHCTL orr r7, r7, #3 @ PL1PCEN | PL1PCTEN mcr p15, 4, r7, c14, c1, 0 @ CNTHCTL + mov r7, #0 + mcrr p15, 4, r7, r7, c14 @ CNTVOFF + + @ Disable virtual timer in case it was counting + mrc p15, 0, r7, c14, c3, 1 @ CNTV_CTL + bic r7, #1 @ Clear ENABLE + mcr p15, 0, r7, c14, c3, 1 @ CNTV_CTL 1: #endif diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 8c3094d0f7b7..d9f5cd4e533f 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -569,6 +569,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) return; } + perf_callchain_store(entry, regs->ARM_pc); tail = (struct frame_tail __user *)regs->ARM_fp - 1; while ((entry->nr < PERF_MAX_STACK_DEPTH) && diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 6e8931ccf13e..d3ca4f6915af 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -32,6 +32,7 @@ #include <linux/hw_breakpoint.h> #include <linux/cpuidle.h> #include <linux/leds.h> +#include <linux/reboot.h> #include <asm/cacheflush.h> #include <asm/idmap.h> @@ -39,6 +40,7 @@ #include <asm/thread_notify.h> #include <asm/stacktrace.h> #include <asm/mach/time.h> +#include <asm/tls.h> #ifdef CONFIG_CC_STACKPROTECTOR #include <linux/stackprotector.h> @@ -112,7 +114,7 @@ void soft_restart(unsigned long addr) BUG(); } -static void null_restart(char mode, const char *cmd) +static void null_restart(enum reboot_mode reboot_mode, const char *cmd) { } @@ -122,7 +124,7 @@ static void null_restart(char mode, const char *cmd) void (*pm_power_off)(void); EXPORT_SYMBOL(pm_power_off); -void (*arm_pm_restart)(char str, const char *cmd) = null_restart; +void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd) = null_restart; EXPORT_SYMBOL_GPL(arm_pm_restart); /* @@ -174,16 +176,6 @@ void arch_cpu_idle(void) default_idle(); } -static char reboot_mode = 'h'; - -int __init reboot_setup(char *str) -{ - reboot_mode = str[0]; - return 1; -} - -__setup("reboot=", reboot_setup); - /* * Called by kexec, immediately prior to machine_kexec(). * @@ -374,7 +366,8 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start, clear_ptrace_hw_breakpoint(p); if (clone_flags & CLONE_SETTLS) - thread->tp_value = childregs->ARM_r3; + thread->tp_value[0] = childregs->ARM_r3; + thread->tp_value[1] = get_tpuser(); thread_notify(THREAD_NOTIFY_COPY, thread); diff --git a/arch/arm/kernel/psci_smp.c b/arch/arm/kernel/psci_smp.c index 23a11424c568..219f1d73572a 100644 --- a/arch/arm/kernel/psci_smp.c +++ b/arch/arm/kernel/psci_smp.c @@ -68,8 +68,6 @@ void __ref psci_cpu_die(unsigned int cpu) /* We should never return */ panic("psci: cpu %d failed to shutdown\n", cpu); } -#else -#define psci_cpu_die NULL #endif bool __init psci_smp_available(void) @@ -80,5 +78,7 @@ bool __init psci_smp_available(void) struct smp_operations __initdata psci_smp_ops = { .smp_boot_secondary = psci_boot_secondary, +#ifdef CONFIG_HOTPLUG_CPU .cpu_die = psci_cpu_die, +#endif }; diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 03deeffd9f6d..0dd3b79b15c3 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -849,7 +849,7 @@ long arch_ptrace(struct task_struct *child, long request, #endif case PTRACE_GET_THREAD_AREA: - ret = put_user(task_thread_info(child)->tp_value, + ret = put_user(task_thread_info(child)->tp_value[0], datap); break; @@ -886,20 +886,12 @@ long arch_ptrace(struct task_struct *child, long request, #ifdef CONFIG_HAVE_HW_BREAKPOINT case PTRACE_GETHBPREGS: - if (ptrace_get_breakpoints(child) < 0) - return -ESRCH; - ret = ptrace_gethbpregs(child, addr, (unsigned long __user *)data); - ptrace_put_breakpoints(child); break; case PTRACE_SETHBPREGS: - if (ptrace_get_breakpoints(child) < 0) - return -ESRCH; - ret = ptrace_sethbpregs(child, addr, (unsigned long __user *)data); - ptrace_put_breakpoints(child); break; #endif diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 1c8278de6c46..63af9a7ae512 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -74,7 +74,7 @@ __setup("fpe=", fpe_setup); extern void paging_init(struct machine_desc *desc); extern void sanity_check_meminfo(void); -extern void reboot_setup(char *str); +extern enum reboot_mode reboot_mode; extern void setup_dma_zone(struct machine_desc *desc); unsigned int processor_id; @@ -367,7 +367,7 @@ void __init early_print(const char *str, ...) static void __init cpuid_init_hwcaps(void) { - unsigned int divide_instrs; + unsigned int divide_instrs, vmsa; if (cpu_architecture() < CPU_ARCH_ARMv7) return; @@ -380,6 +380,11 @@ static void __init cpuid_init_hwcaps(void) case 1: elf_hwcap |= HWCAP_IDIVT; } + + /* LPAE implies atomic ldrd/strd instructions */ + vmsa = (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xf) >> 0; + if (vmsa >= 5) + elf_hwcap |= HWCAP_LPAE; } static void __init feat_v6_fixup(void) @@ -470,9 +475,82 @@ void __init smp_setup_processor_id(void) for (i = 1; i < nr_cpu_ids; ++i) cpu_logical_map(i) = i == cpu ? 0 : i; + /* + * clear __my_cpu_offset on boot CPU to avoid hang caused by + * using percpu variable early, for example, lockdep will + * access percpu variable inside lock_release + */ + set_my_cpu_offset(0); + printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr); } +struct mpidr_hash mpidr_hash; +#ifdef CONFIG_SMP +/** + * smp_build_mpidr_hash - Pre-compute shifts required at each affinity + * level in order to build a linear index from an + * MPIDR value. Resulting algorithm is a collision + * free hash carried out through shifting and ORing + */ +static void __init smp_build_mpidr_hash(void) +{ + u32 i, affinity; + u32 fs[3], bits[3], ls, mask = 0; + /* + * Pre-scan the list of MPIDRS and filter out bits that do + * not contribute to affinity levels, ie they never toggle. + */ + for_each_possible_cpu(i) + mask |= (cpu_logical_map(i) ^ cpu_logical_map(0)); + pr_debug("mask of set bits 0x%x\n", mask); + /* + * Find and stash the last and first bit set at all affinity levels to + * check how many bits are required to represent them. + */ + for (i = 0; i < 3; i++) { + affinity = MPIDR_AFFINITY_LEVEL(mask, i); + /* + * Find the MSB bit and LSB bits position + * to determine how many bits are required + * to express the affinity level. + */ + ls = fls(affinity); + fs[i] = affinity ? ffs(affinity) - 1 : 0; + bits[i] = ls - fs[i]; + } + /* + * An index can be created from the MPIDR by isolating the + * significant bits at each affinity level and by shifting + * them in order to compress the 24 bits values space to a + * compressed set of values. This is equivalent to hashing + * the MPIDR through shifting and ORing. It is a collision free + * hash though not minimal since some levels might contain a number + * of CPUs that is not an exact power of 2 and their bit + * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}. + */ + mpidr_hash.shift_aff[0] = fs[0]; + mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0]; + mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] - + (bits[1] + bits[0]); + mpidr_hash.mask = mask; + mpidr_hash.bits = bits[2] + bits[1] + bits[0]; + pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n", + mpidr_hash.shift_aff[0], + mpidr_hash.shift_aff[1], + mpidr_hash.shift_aff[2], + mpidr_hash.mask, + mpidr_hash.bits); + /* + * 4x is an arbitrary value used to warn on a hash table much bigger + * than expected on most systems. + */ + if (mpidr_hash_size() > 4 * num_possible_cpus()) + pr_warn("Large number of MPIDR hash buckets detected\n"); + sync_cache_w(&mpidr_hash); +} +#endif + static void __init setup_processor(void) { struct proc_info_list *list; @@ -783,8 +861,8 @@ void __init setup_arch(char **cmdline_p) setup_dma_zone(mdesc); - if (mdesc->restart_mode) - reboot_setup(&mdesc->restart_mode); + if (mdesc->reboot_mode != REBOOT_HARD) + reboot_mode = mdesc->reboot_mode; init_mm.start_code = (unsigned long) _text; init_mm.end_code = (unsigned long) _etext; @@ -820,6 +898,7 @@ void __init setup_arch(char **cmdline_p) smp_set_ops(mdesc->smp); } smp_init_cpus(); + smp_build_mpidr_hash(); } #endif @@ -892,6 +971,7 @@ static const char *hwcap_str[] = { "vfpv4", "idiva", "idivt", + "lpae", NULL }; diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 296786bdbb73..1c16c35c271a 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -392,14 +392,19 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig, if (ksig->ka.sa.sa_flags & SA_SIGINFO) idx += 3; + /* + * Put the sigreturn code on the stack no matter which return + * mechanism we use in order to remain ABI compliant + */ if (__put_user(sigreturn_codes[idx], rc) || __put_user(sigreturn_codes[idx+1], rc+1)) return 1; - if (cpsr & MODE32_BIT) { + if ((cpsr & MODE32_BIT) && !IS_ENABLED(CONFIG_ARM_MPU)) { /* * 32-bit code can use the new high-page - * signal return code support. + * signal return code support except when the MPU has + * protected the vectors page from PL0 */ retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb; } else { diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S index 987dcf33415c..db1536b8b30b 100644 --- a/arch/arm/kernel/sleep.S +++ b/arch/arm/kernel/sleep.S @@ -7,6 +7,49 @@ .text /* + * Implementation of MPIDR hash algorithm through shifting + * and OR'ing. + * + * @dst: register containing hash result + * @rs0: register containing affinity level 0 bit shift + * @rs1: register containing affinity level 1 bit shift + * @rs2: register containing affinity level 2 bit shift + * @mpidr: register containing MPIDR value + * @mask: register containing MPIDR mask + * + * Pseudo C-code: + * + *u32 dst; + * + *compute_mpidr_hash(u32 rs0, u32 rs1, u32 rs2, u32 mpidr, u32 mask) { + * u32 aff0, aff1, aff2; + * u32 mpidr_masked = mpidr & mask; + * aff0 = mpidr_masked & 0xff; + * aff1 = mpidr_masked & 0xff00; + * aff2 = mpidr_masked & 0xff0000; + * dst = (aff0 >> rs0 | aff1 >> rs1 | aff2 >> rs2); + *} + * Input registers: rs0, rs1, rs2, mpidr, mask + * Output register: dst + * Note: input and output registers must be disjoint register sets + (eg: a macro instance with mpidr = r1 and dst = r1 is invalid) + */ + .macro compute_mpidr_hash dst, rs0, rs1, rs2, mpidr, mask + and \mpidr, \mpidr, \mask @ mask out MPIDR bits + and \dst, \mpidr, #0xff @ mask=aff0 + ARM( mov \dst, \dst, lsr \rs0 ) @ dst=aff0>>rs0 + THUMB( lsr \dst, \dst, \rs0 ) + and \mask, \mpidr, #0xff00 @ mask = aff1 + ARM( orr \dst, \dst, \mask, lsr \rs1 ) @ dst|=(aff1>>rs1) + THUMB( lsr \mask, \mask, \rs1 ) + THUMB( orr \dst, \dst, \mask ) + and \mask, \mpidr, #0xff0000 @ mask = aff2 + ARM( orr \dst, \dst, \mask, lsr \rs2 ) @ dst|=(aff2>>rs2) + THUMB( lsr \mask, \mask, \rs2 ) + THUMB( orr \dst, \dst, \mask ) + .endm + +/* * Save CPU state for a suspend. This saves the CPU general purpose * registers, and allocates space on the kernel stack to save the CPU * specific registers and some other data for resume. @@ -29,12 +72,18 @@ ENTRY(__cpu_suspend) mov r1, r4 @ size of save block mov r2, r5 @ virtual SP ldr r3, =sleep_save_sp -#ifdef CONFIG_SMP - ALT_SMP(mrc p15, 0, lr, c0, c0, 5) - ALT_UP(mov lr, #0) - and lr, lr, #15 + ldr r3, [r3, #SLEEP_SAVE_SP_VIRT] + ALT_SMP(mrc p15, 0, r9, c0, c0, 5) + ALT_UP_B(1f) + ldr r8, =mpidr_hash + /* + * This ldmia relies on the memory layout of the mpidr_hash + * struct mpidr_hash. + */ + ldmia r8, {r4-r7} @ r4 = mpidr mask (r5,r6,r7) = l[0,1,2] shifts + compute_mpidr_hash lr, r5, r6, r7, r9, r4 add r3, r3, lr, lsl #2 -#endif +1: bl __cpu_suspend_save adr lr, BSYM(cpu_suspend_abort) ldmfd sp!, {r0, pc} @ call suspend fn @@ -81,15 +130,23 @@ ENDPROC(cpu_resume_after_mmu) .data .align ENTRY(cpu_resume) -#ifdef CONFIG_SMP - adr r0, sleep_save_sp - ALT_SMP(mrc p15, 0, r1, c0, c0, 5) - ALT_UP(mov r1, #0) - and r1, r1, #15 - ldr r0, [r0, r1, lsl #2] @ stack phys addr -#else - ldr r0, sleep_save_sp @ stack phys addr -#endif + mov r1, #0 + ALT_SMP(mrc p15, 0, r0, c0, c0, 5) + ALT_UP_B(1f) + adr r2, mpidr_hash_ptr + ldr r3, [r2] + add r2, r2, r3 @ r2 = struct mpidr_hash phys address + /* + * This ldmia relies on the memory layout of the mpidr_hash + * struct mpidr_hash. + */ + ldmia r2, { r3-r6 } @ r3 = mpidr mask (r4,r5,r6) = l[0,1,2] shifts + compute_mpidr_hash r1, r4, r5, r6, r0, r3 +1: + adr r0, _sleep_save_sp + ldr r0, [r0, #SLEEP_SAVE_SP_PHYS] + ldr r0, [r0, r1, lsl #2] + setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off @ load phys pgd, stack, resume fn ARM( ldmia r0!, {r1, sp, pc} ) @@ -98,7 +155,11 @@ THUMB( mov sp, r2 ) THUMB( bx r3 ) ENDPROC(cpu_resume) -sleep_save_sp: - .rept CONFIG_NR_CPUS - .long 0 @ preserve stack phys ptr here - .endr + .align 2 +mpidr_hash_ptr: + .long mpidr_hash - . @ mpidr_hash struct offset + + .type sleep_save_sp, #object +ENTRY(sleep_save_sp) +_sleep_save_sp: + .space SLEEP_SAVE_SP_SZ @ struct sleep_save_sp diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 5919eb451bb9..c5fb5469054b 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -45,6 +45,7 @@ #include <asm/smp_plat.h> #include <asm/virt.h> #include <asm/mach/arch.h> +#include <asm/mpu.h> /* * as from 2.5, kernels no longer have an init_tasks structure @@ -78,6 +79,13 @@ void __init smp_set_ops(struct smp_operations *ops) smp_ops = *ops; }; +static unsigned long get_arch_pgd(pgd_t *pgd) +{ + phys_addr_t pgdir = virt_to_phys(pgd); + BUG_ON(pgdir & ARCH_PGD_MASK); + return pgdir >> ARCH_PGD_SHIFT; +} + int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) { int ret; @@ -87,8 +95,14 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) * its stack and the page tables. */ secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; - secondary_data.pgdir = virt_to_phys(idmap_pgd); - secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir); +#ifdef CONFIG_ARM_MPU + secondary_data.mpu_rgn_szr = mpu_rgn_info.rgns[MPU_RAM_REGION].drsr; +#endif + +#ifdef CONFIG_MMU + secondary_data.pgdir = get_arch_pgd(idmap_pgd); + secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir); +#endif __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data)); outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1)); @@ -112,9 +126,8 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) pr_err("CPU%u: failed to boot: %d\n", cpu, ret); } - secondary_data.stack = NULL; - secondary_data.pgdir = 0; + memset(&secondary_data, 0, sizeof(secondary_data)); return ret; } diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c index 9a52a07aa40e..a98b62dca2fa 100644 --- a/arch/arm/kernel/smp_tlb.c +++ b/arch/arm/kernel/smp_tlb.c @@ -103,7 +103,7 @@ static void broadcast_tlb_a15_erratum(void) static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm) { - int cpu, this_cpu; + int this_cpu; cpumask_t mask = { CPU_BITS_NONE }; if (!erratum_a15_798181()) @@ -111,21 +111,7 @@ static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm) dummy_flush_tlb_a15_erratum(); this_cpu = get_cpu(); - for_each_online_cpu(cpu) { - if (cpu == this_cpu) - continue; - /* - * We only need to send an IPI if the other CPUs are running - * the same ASID as the one being invalidated. There is no - * need for locking around the active_asids check since the - * switch_mm() function has at least one dmb() (as required by - * this workaround) in case a context switch happens on - * another CPU after the condition below. - */ - if (atomic64_read(&mm->context.id) == - atomic64_read(&per_cpu(active_asids, cpu))) - cpumask_set_cpu(cpu, &mask); - } + a15_erratum_get_cpumask(this_cpu, mm, &mask); smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1); put_cpu(); } diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c index c59c97ea8268..41cf3cbf756d 100644 --- a/arch/arm/kernel/suspend.c +++ b/arch/arm/kernel/suspend.c @@ -1,15 +1,54 @@ #include <linux/init.h> +#include <linux/slab.h> +#include <asm/cacheflush.h> #include <asm/idmap.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/memory.h> +#include <asm/smp_plat.h> #include <asm/suspend.h> #include <asm/tlbflush.h> extern int __cpu_suspend(unsigned long, int (*)(unsigned long)); extern void cpu_resume_mmu(void); +#ifdef CONFIG_MMU +/* + * Hide the first two arguments to __cpu_suspend - these are an implementation + * detail which platform code shouldn't have to know about. + */ +int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) +{ + struct mm_struct *mm = current->active_mm; + int ret; + + if (!idmap_pgd) + return -EINVAL; + + /* + * Provide a temporary page table with an identity mapping for + * the MMU-enable code, required for resuming. On successful + * resume (indicated by a zero return code), we need to switch + * back to the correct page tables. + */ + ret = __cpu_suspend(arg, fn); + if (ret == 0) { + cpu_switch_mm(mm->pgd, mm); + local_flush_bp_all(); + local_flush_tlb_all(); + } + + return ret; +} +#else +int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) +{ + return __cpu_suspend(arg, fn); +} +#define idmap_pgd NULL +#endif + /* * This is called by __cpu_suspend() to save the state, and do whatever * flushing is required to ensure that when the CPU goes to sleep we have @@ -47,30 +86,19 @@ void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr) virt_to_phys(save_ptr) + sizeof(*save_ptr)); } -/* - * Hide the first two arguments to __cpu_suspend - these are an implementation - * detail which platform code shouldn't have to know about. - */ -int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) -{ - struct mm_struct *mm = current->active_mm; - int ret; - - if (!idmap_pgd) - return -EINVAL; +extern struct sleep_save_sp sleep_save_sp; - /* - * Provide a temporary page table with an identity mapping for - * the MMU-enable code, required for resuming. On successful - * resume (indicated by a zero return code), we need to switch - * back to the correct page tables. - */ - ret = __cpu_suspend(arg, fn); - if (ret == 0) { - cpu_switch_mm(mm->pgd, mm); - local_flush_bp_all(); - local_flush_tlb_all(); - } +static int cpu_suspend_alloc_sp(void) +{ + void *ctx_ptr; + /* ctx_ptr is an array of physical addresses */ + ctx_ptr = kcalloc(mpidr_hash_size(), sizeof(u32), GFP_KERNEL); - return ret; + if (WARN_ON(!ctx_ptr)) + return -ENOMEM; + sleep_save_sp.save_ptr_stash = ctx_ptr; + sleep_save_sp.save_ptr_stash_phys = virt_to_phys(ctx_ptr); + sync_cache_w(&sleep_save_sp); + return 0; } +early_initcall(cpu_suspend_alloc_sp); diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c index abff4e9aaee0..98aee3258398 100644 --- a/arch/arm/kernel/time.c +++ b/arch/arm/kernel/time.c @@ -24,9 +24,9 @@ #include <linux/timer.h> #include <linux/clocksource.h> #include <linux/irq.h> +#include <linux/sched_clock.h> #include <asm/thread_info.h> -#include <asm/sched_clock.h> #include <asm/stacktrace.h> #include <asm/mach/arch.h> #include <asm/mach/time.h> @@ -120,6 +120,4 @@ void __init time_init(void) machine_desc->init_time(); else clocksource_of_init(); - - sched_clock_postinit(); } diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 486e12a0f26a..cab094c234ee 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -581,7 +581,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) return regs->ARM_r0; case NR(set_tls): - thread->tp_value = regs->ARM_r0; + thread->tp_value[0] = regs->ARM_r0; if (tls_emu) return 0; if (has_tls_reg) { @@ -699,7 +699,7 @@ static int get_tp_trap(struct pt_regs *regs, unsigned int instr) int reg = (instr >> 12) & 15; if (reg == 15) return 1; - regs->uregs[reg] = current_thread_info()->tp_value; + regs->uregs[reg] = current_thread_info()->tp_value[0]; regs->ARM_pc += 4; return 0; } diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig index 370e1a8af6ac..ebf5015508b5 100644 --- a/arch/arm/kvm/Kconfig +++ b/arch/arm/kvm/Kconfig @@ -41,9 +41,9 @@ config KVM_ARM_HOST Provides host support for ARM processors. config KVM_ARM_MAX_VCPUS - int "Number maximum supported virtual CPUs per VM" if KVM_ARM_HOST - default 4 if KVM_ARM_HOST - default 0 + int "Number maximum supported virtual CPUs per VM" + depends on KVM_ARM_HOST + default 4 help Static number of max supported virtual CPUs per VM. @@ -67,6 +67,4 @@ config KVM_ARM_TIMER ---help--- Adds support for the Architected Timers in virtual machines -source drivers/virtio/Kconfig - endif # VIRTUALIZATION diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile index 53c5ed83d16f..d99bee4950e5 100644 --- a/arch/arm/kvm/Makefile +++ b/arch/arm/kvm/Makefile @@ -14,10 +14,11 @@ CFLAGS_mmu.o := -I. AFLAGS_init.o := -Wa,-march=armv7-a$(plus_virt) AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt) -kvm-arm-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) +KVM := ../../../virt/kvm +kvm-arm-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o obj-y += kvm-arm.o init.o interrupts.o obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o obj-y += coproc.o coproc_a15.o mmio.o psci.o perf.o -obj-$(CONFIG_KVM_ARM_VGIC) += vgic.o -obj-$(CONFIG_KVM_ARM_TIMER) += arch_timer.o +obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o +obj-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index ef1703b9587b..741f66a2edbd 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -800,8 +800,8 @@ long kvm_arch_vm_ioctl(struct file *filp, static void cpu_init_hyp_mode(void *dummy) { - unsigned long long boot_pgd_ptr; - unsigned long long pgd_ptr; + phys_addr_t boot_pgd_ptr; + phys_addr_t pgd_ptr; unsigned long hyp_stack_ptr; unsigned long stack_page; unsigned long vector_ptr; @@ -809,8 +809,8 @@ static void cpu_init_hyp_mode(void *dummy) /* Switch from the HYP stub to our own HYP init vector */ __hyp_set_vectors(kvm_get_idmap_vector()); - boot_pgd_ptr = (unsigned long long)kvm_mmu_get_boot_httbr(); - pgd_ptr = (unsigned long long)kvm_mmu_get_httbr(); + boot_pgd_ptr = kvm_mmu_get_boot_httbr(); + pgd_ptr = kvm_mmu_get_httbr(); stack_page = __get_cpu_var(kvm_arm_hyp_stack_page); hyp_stack_ptr = stack_page + PAGE_SIZE; vector_ptr = (unsigned long)__kvm_hyp_vector; diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index 8eea97be1ed5..4a5199070430 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c @@ -180,6 +180,10 @@ static const struct coproc_reg cp15_regs[] = { NULL, reset_unknown, c6_DFAR }, { CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32, NULL, reset_unknown, c6_IFAR }, + + /* PAR swapped by interrupt.S */ + { CRn( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR }, + /* * DC{C,I,CI}SW operations: */ diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c index 3d74a0be47db..df4c82d47ad7 100644 --- a/arch/arm/kvm/handle_exit.c +++ b/arch/arm/kvm/handle_exit.c @@ -52,9 +52,6 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) { - if (kvm_psci_call(vcpu)) - return 1; - kvm_inject_undefined(vcpu); return 1; } diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S index f7793df62f58..16cd4ba5d7fd 100644 --- a/arch/arm/kvm/interrupts.S +++ b/arch/arm/kvm/interrupts.S @@ -49,6 +49,7 @@ __kvm_hyp_code_start: ENTRY(__kvm_tlb_flush_vmid_ipa) push {r2, r3} + dsb ishst add r0, r0, #KVM_VTTBR ldrd r2, r3, [r0] mcrr p15, 6, r2, r3, c2 @ Write VTTBR @@ -291,6 +292,7 @@ THUMB( orr r2, r2, #PSR_T_BIT ) ldr r2, =BSYM(panic) msr ELR_hyp, r2 ldr r0, =\panic_str + clrex @ Clear exclusive monitor eret .endm @@ -414,6 +416,10 @@ guest_trap: mrcne p15, 4, r2, c6, c0, 4 @ HPFAR bne 3f + /* Preserve PAR */ + mrrc p15, 0, r0, r1, c7 @ PAR + push {r0, r1} + /* Resolve IPA using the xFAR */ mcr p15, 0, r2, c7, c8, 0 @ ATS1CPR isb @@ -424,13 +430,20 @@ guest_trap: lsl r2, r2, #4 orr r2, r2, r1, lsl #24 + /* Restore PAR */ + pop {r0, r1} + mcrr p15, 0, r0, r1, c7 @ PAR + 3: load_vcpu @ Load VCPU pointer to r0 str r2, [r0, #VCPU_HPFAR] 1: mov r1, #ARM_EXCEPTION_HVC b __kvm_vcpu_return -4: pop {r0, r1, r2} @ Failed translation, return to guest +4: pop {r0, r1} @ Failed translation, return to guest + mcrr p15, 0, r0, r1, c7 @ PAR + clrex + pop {r0, r1, r2} eret /* @@ -456,6 +469,7 @@ switch_to_guest_vfp: pop {r3-r7} pop {r0-r2} + clrex eret #endif diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S index 3c8f2f0b4c5e..6f18695a09cb 100644 --- a/arch/arm/kvm/interrupts_head.S +++ b/arch/arm/kvm/interrupts_head.S @@ -302,11 +302,14 @@ vcpu .req r0 @ vcpu pointer always in r0 .endif mrc p15, 0, r2, c14, c1, 0 @ CNTKCTL + mrrc p15, 0, r4, r5, c7 @ PAR .if \store_to_vcpu == 0 - push {r2} + push {r2,r4-r5} .else str r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)] + add r12, vcpu, #CP15_OFFSET(c7_PAR) + strd r4, r5, [r12] .endif .endm @@ -319,12 +322,15 @@ vcpu .req r0 @ vcpu pointer always in r0 */ .macro write_cp15_state read_from_vcpu .if \read_from_vcpu == 0 - pop {r2} + pop {r2,r4-r5} .else ldr r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)] + add r12, vcpu, #CP15_OFFSET(c7_PAR) + ldrd r4, r5, [r12] .endif mcr p15, 0, r2, c14, c1, 0 @ CNTKCTL + mcrr p15, 0, r4, r5, c7 @ PAR .if \read_from_vcpu == 0 pop {r2-r12} @@ -497,6 +503,10 @@ vcpu .req r0 @ vcpu pointer always in r0 add r5, vcpu, r4 strd r2, r3, [r5] + @ Ensure host CNTVCT == CNTPCT + mov r2, #0 + mcrr p15, 4, r2, r2, c14 @ CNTVOFF + 1: #endif @ Allow physical timer/counter access for the host diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c index 72a12f2171b2..b8e06b7a2833 100644 --- a/arch/arm/kvm/mmio.c +++ b/arch/arm/kvm/mmio.c @@ -86,12 +86,6 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, sign_extend = kvm_vcpu_dabt_issext(vcpu); rt = kvm_vcpu_dabt_get_rd(vcpu); - if (kvm_vcpu_reg_is_pc(vcpu, rt)) { - /* IO memory trying to read/write pc */ - kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu)); - return 1; - } - mmio->is_write = is_write; mmio->phys_addr = fault_ipa; mmio->len = len; diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 84ba67b982c0..ca6bea4859b4 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -382,9 +382,6 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm) if (!pgd) return -ENOMEM; - /* stage-2 pgd must be aligned to its size */ - VM_BUG_ON((unsigned long)pgd & (S2_PGD_SIZE - 1)); - memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t)); kvm_clean_pgd(pgd); kvm->arch.pgd = pgd; diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c index 7ee5bb7a3667..86a693a02ba3 100644 --- a/arch/arm/kvm/psci.c +++ b/arch/arm/kvm/psci.c @@ -75,7 +75,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) * kvm_psci_call - handle PSCI call if r0 value is in range * @vcpu: Pointer to the VCPU struct * - * Handle PSCI calls from guests through traps from HVC or SMC instructions. + * Handle PSCI calls from guests through traps from HVC instructions. * The calling convention is similar to SMC calls to the secure world where * the function number is placed in r0 and this function returns true if the * function number specified in r0 is withing the PSCI range, and false diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c index b80256b554cd..b7840e7aa452 100644 --- a/arch/arm/kvm/reset.c +++ b/arch/arm/kvm/reset.c @@ -27,6 +27,8 @@ #include <asm/kvm_arm.h> #include <asm/kvm_coproc.h> +#include <kvm/arm_arch_timer.h> + /****************************************************************************** * Cortex-A15 Reset Values */ @@ -37,6 +39,11 @@ static struct kvm_regs a15_regs_reset = { .usr_regs.ARM_cpsr = SVC_MODE | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT, }; +static const struct kvm_irq_level a15_vtimer_irq = { + .irq = 27, + .level = 1, +}; + /******************************************************************************* * Exported reset function @@ -52,6 +59,7 @@ static struct kvm_regs a15_regs_reset = { int kvm_reset_vcpu(struct kvm_vcpu *vcpu) { struct kvm_regs *cpu_reset; + const struct kvm_irq_level *cpu_vtimer_irq; switch (vcpu->arch.target) { case KVM_ARM_TARGET_CORTEX_A15: @@ -59,6 +67,7 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) return -EINVAL; cpu_reset = &a15_regs_reset; vcpu->arch.midr = read_cpuid_id(); + cpu_vtimer_irq = &a15_vtimer_irq; break; default: return -ENODEV; @@ -70,5 +79,8 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) /* Reset CP15 registers */ kvm_reset_coprocs(vcpu); + /* Reset arch_timer context */ + kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq); + return 0; } diff --git a/arch/arm/mach-at91/at91rm9200.c b/arch/arm/mach-at91/at91rm9200.c index 9eb574397ee1..4aad93d54d6f 100644 --- a/arch/arm/mach-at91/at91rm9200.c +++ b/arch/arm/mach-at91/at91rm9200.c @@ -11,6 +11,7 @@ */ #include <linux/module.h> +#include <linux/reboot.h> #include <asm/irq.h> #include <asm/mach/arch.h> @@ -304,7 +305,7 @@ static void at91rm9200_idle(void) at91_pmc_write(AT91_PMC_SCDR, AT91_PMC_PCK); } -static void at91rm9200_restart(char mode, const char *cmd) +static void at91rm9200_restart(enum reboot_mode reboot_mode, const char *cmd) { /* * Perform a hardware reset with the use of the Watchdog timer. diff --git a/arch/arm/mach-at91/generic.h b/arch/arm/mach-at91/generic.h index f6de36aefe85..dc6e2f5f804d 100644 --- a/arch/arm/mach-at91/generic.h +++ b/arch/arm/mach-at91/generic.h @@ -10,6 +10,7 @@ #include <linux/clkdev.h> #include <linux/of.h> +#include <linux/reboot.h> /* Map io */ extern void __init at91_map_io(void); @@ -60,8 +61,8 @@ extern void at91sam9_idle(void); /* reset */ extern void at91_ioremap_rstc(u32 base_addr); -extern void at91sam9_alt_restart(char, const char *); -extern void at91sam9g45_restart(char, const char *); +extern void at91sam9_alt_restart(enum reboot_mode, const char *); +extern void at91sam9g45_restart(enum reboot_mode, const char *); /* shutdown */ extern void at91_ioremap_shdwc(u32 base_addr); diff --git a/arch/arm/mach-bcm2835/bcm2835.c b/arch/arm/mach-bcm2835/bcm2835.c index 740fa9ebe249..40686d7ef500 100644 --- a/arch/arm/mach-bcm2835/bcm2835.c +++ b/arch/arm/mach-bcm2835/bcm2835.c @@ -53,7 +53,7 @@ static void bcm2835_setup_restart(void) WARN(!wdt_regs, "failed to remap watchdog regs"); } -static void bcm2835_restart(char mode, const char *cmd) +static void bcm2835_restart(enum reboot_mode mode, const char *cmd) { u32 val; @@ -91,7 +91,7 @@ static void bcm2835_power_off(void) writel_relaxed(val, wdt_regs + PM_RSTS); /* Continue with normal reset mechanism */ - bcm2835_restart(0, ""); + bcm2835_restart(REBOOT_HARD, ""); } static struct map_desc io_map __initdata = { diff --git a/arch/arm/mach-clps711x/common.c b/arch/arm/mach-clps711x/common.c index f6d1746366d4..4ca2f3ca2de4 100644 --- a/arch/arm/mach-clps711x/common.c +++ b/arch/arm/mach-clps711x/common.c @@ -384,7 +384,7 @@ void __init clps711x_timer_init(void) setup_irq(IRQ_TC2OI, &clps711x_timer_irq); } -void clps711x_restart(char mode, const char *cmd) +void clps711x_restart(enum reboot_mode mode, const char *cmd) { soft_restart(0); } diff --git a/arch/arm/mach-clps711x/common.h b/arch/arm/mach-clps711x/common.h index 2a22f4c6cc75..9a6767bfdc47 100644 --- a/arch/arm/mach-clps711x/common.h +++ b/arch/arm/mach-clps711x/common.h @@ -4,6 +4,8 @@ * Common bits. */ +#include <linux/reboot.h> + #define CLPS711X_NR_IRQS (33) #define CLPS711X_NR_GPIO (4 * 8 + 3) #define CLPS711X_GPIO(prt, bit) ((prt) * 8 + (bit)) @@ -12,5 +14,5 @@ extern void clps711x_map_io(void); extern void clps711x_init_irq(void); extern void clps711x_timer_init(void); extern void clps711x_handle_irq(struct pt_regs *regs); -extern void clps711x_restart(char mode, const char *cmd); +extern void clps711x_restart(enum reboot_mode mode, const char *cmd); extern void clps711x_init_early(void); diff --git a/arch/arm/mach-cns3xxx/core.h b/arch/arm/mach-cns3xxx/core.h index b23b17b4da10..5218b6198dc2 100644 --- a/arch/arm/mach-cns3xxx/core.h +++ b/arch/arm/mach-cns3xxx/core.h @@ -11,6 +11,8 @@ #ifndef __CNS3XXX_CORE_H #define __CNS3XXX_CORE_H +#include <linux/reboot.h> + extern void cns3xxx_timer_init(void); #ifdef CONFIG_CACHE_L2X0 @@ -22,6 +24,6 @@ static inline void cns3xxx_l2x0_init(void) {} void __init cns3xxx_map_io(void); void __init cns3xxx_init_irq(void); void cns3xxx_power_off(void); -void cns3xxx_restart(char, const char *); +void cns3xxx_restart(enum reboot_mode, const char *); #endif /* __CNS3XXX_CORE_H */ diff --git a/arch/arm/mach-cns3xxx/pm.c b/arch/arm/mach-cns3xxx/pm.c index 79e3d47aad65..fb38c726e987 100644 --- a/arch/arm/mach-cns3xxx/pm.c +++ b/arch/arm/mach-cns3xxx/pm.c @@ -89,7 +89,7 @@ void cns3xxx_pwr_soft_rst(unsigned int block) } EXPORT_SYMBOL(cns3xxx_pwr_soft_rst); -void cns3xxx_restart(char mode, const char *cmd) +void cns3xxx_restart(enum reboot_mode mode, const char *cmd) { /* * To reset, we hit the on-board reset register diff --git a/arch/arm/mach-davinci/Kconfig b/arch/arm/mach-davinci/Kconfig index a075b3e0c5c7..e026b19b23ea 100644 --- a/arch/arm/mach-davinci/Kconfig +++ b/arch/arm/mach-davinci/Kconfig @@ -40,6 +40,7 @@ config ARCH_DAVINCI_DA850 bool "DA850/OMAP-L138/AM18x based system" select ARCH_DAVINCI_DA8XX select ARCH_HAS_CPUFREQ + select CPU_FREQ_TABLE select CP_INTC config ARCH_DAVINCI_DA8XX diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c index 8a24b6c6339f..bea6793a7ede 100644 --- a/arch/arm/mach-davinci/board-da850-evm.c +++ b/arch/arm/mach-davinci/board-da850-evm.c @@ -985,7 +985,6 @@ static struct regulator_init_data tps65070_regulator_data[] = { static struct touchscreen_init_data tps6507x_touchscreen_data = { .poll_period = 30, /* ms between touch samples */ .min_pressure = 0x30, /* minimum pressure to trigger touch */ - .vref = 0, /* turn off vref when not using A/D */ .vendor = 0, /* /sys/class/input/input?/id/vendor */ .product = 65070, /* /sys/class/input/input?/id/product */ .version = 0x100, /* /sys/class/input/input?/id/version */ diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c index 4d6933848abf..a0d4f6038b60 100644 --- a/arch/arm/mach-davinci/da850.c +++ b/arch/arm/mach-davinci/da850.c @@ -1004,7 +1004,7 @@ static const struct da850_opp da850_opp_96 = { #define OPP(freq) \ { \ - .index = (unsigned int) &da850_opp_##freq, \ + .driver_data = (unsigned int) &da850_opp_##freq, \ .frequency = freq * 1000, \ } @@ -1016,7 +1016,7 @@ static struct cpufreq_frequency_table da850_freq_table[] = { OPP(200), OPP(96), { - .index = 0, + .driver_data = 0, .frequency = CPUFREQ_TABLE_END, }, }; @@ -1044,7 +1044,7 @@ static int da850_set_voltage(unsigned int index) if (!cvdd) return -ENODEV; - opp = (struct da850_opp *) cpufreq_info.freq_table[index].index; + opp = (struct da850_opp *) cpufreq_info.freq_table[index].driver_data; return regulator_set_voltage(cvdd, opp->cvdd_min, opp->cvdd_max); } @@ -1125,7 +1125,7 @@ static int da850_set_pll0rate(struct clk *clk, unsigned long index) struct pll_data *pll = clk->pll_data; int ret; - opp = (struct da850_opp *) cpufreq_info.freq_table[index].index; + opp = (struct da850_opp *) cpufreq_info.freq_table[index].driver_data; prediv = opp->prediv; mult = opp->mult; postdiv = opp->postdiv; diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c index eb254fe861ac..71a46a348761 100644 --- a/arch/arm/mach-davinci/devices-da8xx.c +++ b/arch/arm/mach-davinci/devices-da8xx.c @@ -16,6 +16,7 @@ #include <linux/serial_8250.h> #include <linux/ahci_platform.h> #include <linux/clk.h> +#include <linux/reboot.h> #include <mach/cputype.h> #include <mach/common.h> @@ -366,7 +367,7 @@ static struct platform_device da8xx_wdt_device = { .resource = da8xx_watchdog_resources, }; -void da8xx_restart(char mode, const char *cmd) +void da8xx_restart(enum reboot_mode mode, const char *cmd) { struct device *dev; diff --git a/arch/arm/mach-davinci/devices.c b/arch/arm/mach-davinci/devices.c index 90b83d00fe2b..111573c0aad1 100644 --- a/arch/arm/mach-davinci/devices.c +++ b/arch/arm/mach-davinci/devices.c @@ -13,6 +13,7 @@ #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/io.h> +#include <linux/reboot.h> #include <mach/hardware.h> #include <linux/platform_data/i2c-davinci.h> @@ -307,7 +308,7 @@ struct platform_device davinci_wdt_device = { .resource = wdt_resources, }; -void davinci_restart(char mode, const char *cmd) +void davinci_restart(enum reboot_mode mode, const char *cmd) { davinci_watchdog_reset(&davinci_wdt_device); } diff --git a/arch/arm/mach-davinci/include/mach/common.h b/arch/arm/mach-davinci/include/mach/common.h index b124b77c90c5..cce316b92c06 100644 --- a/arch/arm/mach-davinci/include/mach/common.h +++ b/arch/arm/mach-davinci/include/mach/common.h @@ -14,6 +14,7 @@ #include <linux/compiler.h> #include <linux/types.h> +#include <linux/reboot.h> extern void davinci_timer_init(void); @@ -81,7 +82,7 @@ extern struct davinci_soc_info davinci_soc_info; extern void davinci_common_init(struct davinci_soc_info *soc_info); extern void davinci_init_ide(void); -void davinci_restart(char mode, const char *cmd); +void davinci_restart(enum reboot_mode mode, const char *cmd); void davinci_init_late(void); #ifdef CONFIG_DAVINCI_RESET_CLOCKS diff --git a/arch/arm/mach-davinci/include/mach/da8xx.h b/arch/arm/mach-davinci/include/mach/da8xx.h index 3c797e2272f8..7b41a5e9bc31 100644 --- a/arch/arm/mach-davinci/include/mach/da8xx.h +++ b/arch/arm/mach-davinci/include/mach/da8xx.h @@ -17,6 +17,7 @@ #include <linux/davinci_emac.h> #include <linux/spi/spi.h> #include <linux/platform_data/davinci_asp.h> +#include <linux/reboot.h> #include <linux/videodev2.h> #include <mach/serial.h> @@ -106,7 +107,7 @@ int da850_register_vpif_display (struct vpif_display_config *display_config); int da850_register_vpif_capture (struct vpif_capture_config *capture_config); -void da8xx_restart(char mode, const char *cmd); +void da8xx_restart(enum reboot_mode mode, const char *cmd); void da8xx_rproc_reserve_cma(void); int da8xx_register_rproc(void); diff --git a/arch/arm/mach-davinci/include/mach/tnetv107x.h b/arch/arm/mach-davinci/include/mach/tnetv107x.h index 366e975effa8..16314c64f755 100644 --- a/arch/arm/mach-davinci/include/mach/tnetv107x.h +++ b/arch/arm/mach-davinci/include/mach/tnetv107x.h @@ -35,6 +35,7 @@ #include <linux/serial_8250.h> #include <linux/input/matrix_keypad.h> #include <linux/mfd/ti_ssp.h> +#include <linux/reboot.h> #include <linux/platform_data/mmc-davinci.h> #include <linux/platform_data/mtd-davinci.h> @@ -54,7 +55,7 @@ extern struct platform_device tnetv107x_serial_device; extern void tnetv107x_init(void); extern void tnetv107x_devices_init(struct tnetv107x_device_info *); extern void tnetv107x_irq_init(void); -void tnetv107x_restart(char mode, const char *cmd); +void tnetv107x_restart(enum reboot_mode mode, const char *cmd); #endif diff --git a/arch/arm/mach-davinci/time.c b/arch/arm/mach-davinci/time.c index bad361ec1666..7a55b5c95971 100644 --- a/arch/arm/mach-davinci/time.c +++ b/arch/arm/mach-davinci/time.c @@ -18,8 +18,8 @@ #include <linux/clk.h> #include <linux/err.h> #include <linux/platform_device.h> +#include <linux/sched_clock.h> -#include <asm/sched_clock.h> #include <asm/mach/irq.h> #include <asm/mach/time.h> diff --git a/arch/arm/mach-davinci/tnetv107x.c b/arch/arm/mach-davinci/tnetv107x.c index 3b2a70d43efa..4545667ecd3c 100644 --- a/arch/arm/mach-davinci/tnetv107x.c +++ b/arch/arm/mach-davinci/tnetv107x.c @@ -19,6 +19,7 @@ #include <linux/io.h> #include <linux/err.h> #include <linux/platform_device.h> +#include <linux/reboot.h> #include <asm/mach/map.h> @@ -730,7 +731,7 @@ static void tnetv107x_watchdog_reset(struct platform_device *pdev) __raw_writel(1, ®s->kick); } -void tnetv107x_restart(char mode, const char *cmd) +void tnetv107x_restart(enum reboot_mode mode, const char *cmd) { tnetv107x_watchdog_reset(&tnetv107x_wdt_device); } diff --git a/arch/arm/mach-dove/common.c b/arch/arm/mach-dove/common.c index 2a9443d04d92..00247c771313 100644 --- a/arch/arm/mach-dove/common.c +++ b/arch/arm/mach-dove/common.c @@ -381,7 +381,7 @@ void __init dove_init(void) dove_xor1_init(); } -void dove_restart(char mode, const char *cmd) +void dove_restart(enum reboot_mode mode, const char *cmd) { /* * Enable soft reset to assert RSTOUTn. diff --git a/arch/arm/mach-dove/common.h b/arch/arm/mach-dove/common.h index e86347928b67..1d725224d146 100644 --- a/arch/arm/mach-dove/common.h +++ b/arch/arm/mach-dove/common.h @@ -11,6 +11,8 @@ #ifndef __ARCH_DOVE_COMMON_H #define __ARCH_DOVE_COMMON_H +#include <linux/reboot.h> + struct mv643xx_eth_platform_data; struct mv_sata_platform_data; @@ -42,6 +44,6 @@ void dove_spi1_init(void); void dove_i2c_init(void); void dove_sdio0_init(void); void dove_sdio1_init(void); -void dove_restart(char, const char *); +void dove_restart(enum reboot_mode, const char *); #endif diff --git a/arch/arm/mach-ebsa110/core.c b/arch/arm/mach-ebsa110/core.c index b13cc74114db..68ac934d4565 100644 --- a/arch/arm/mach-ebsa110/core.c +++ b/arch/arm/mach-ebsa110/core.c @@ -116,7 +116,7 @@ static void __init ebsa110_map_io(void) iotable_init(ebsa110_io_desc, ARRAY_SIZE(ebsa110_io_desc)); } -static void __iomem *ebsa110_ioremap_caller(unsigned long cookie, size_t size, +static void __iomem *ebsa110_ioremap_caller(phys_addr_t cookie, size_t size, unsigned int flags, void *caller) { return (void __iomem *)cookie; @@ -311,7 +311,7 @@ static int __init ebsa110_init(void) arch_initcall(ebsa110_init); -static void ebsa110_restart(char mode, const char *cmd) +static void ebsa110_restart(enum reboot_mode mode, const char *cmd) { soft_restart(0x80000000); } @@ -321,7 +321,6 @@ MACHINE_START(EBSA110, "EBSA110") .atag_offset = 0x400, .reserve_lp0 = 1, .reserve_lp2 = 1, - .restart_mode = 's', .map_io = ebsa110_map_io, .init_early = ebsa110_init_early, .init_irq = ebsa110_init_irq, diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c index c49ed3dc1aea..df8612fbbc9c 100644 --- a/arch/arm/mach-ep93xx/core.c +++ b/arch/arm/mach-ep93xx/core.c @@ -35,6 +35,7 @@ #include <linux/spi/spi.h> #include <linux/export.h> #include <linux/irqchip/arm-vic.h> +#include <linux/reboot.h> #include <mach/hardware.h> #include <linux/platform_data/video-ep93xx.h> @@ -921,7 +922,7 @@ void __init ep93xx_init_devices(void) gpio_led_register_device(-1, &ep93xx_led_data); } -void ep93xx_restart(char mode, const char *cmd) +void ep93xx_restart(enum reboot_mode mode, const char *cmd) { /* * Set then clear the SWRST bit to initiate a software reset diff --git a/arch/arm/mach-ep93xx/include/mach/platform.h b/arch/arm/mach-ep93xx/include/mach/platform.h index a14e1b37beff..e256e0baec2e 100644 --- a/arch/arm/mach-ep93xx/include/mach/platform.h +++ b/arch/arm/mach-ep93xx/include/mach/platform.h @@ -4,6 +4,8 @@ #ifndef __ASSEMBLY__ +#include <linux/reboot.h> + struct i2c_gpio_platform_data; struct i2c_board_info; struct spi_board_info; @@ -55,7 +57,7 @@ void ep93xx_ide_release_gpio(struct platform_device *pdev); void ep93xx_init_devices(void); extern void ep93xx_timer_init(void); -void ep93xx_restart(char, const char *); +void ep93xx_restart(enum reboot_mode, const char *); void ep93xx_init_late(void); #ifdef CONFIG_CRUNCH diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig index 2d503b3684c4..f5f65b58181e 100644 --- a/arch/arm/mach-exynos/Kconfig +++ b/arch/arm/mach-exynos/Kconfig @@ -93,7 +93,7 @@ config SOC_EXYNOS5440 default y depends on ARCH_EXYNOS5 select ARCH_HAS_OPP - select ARM_ARCH_TIMER + select HAVE_ARM_ARCH_TIMER select AUTO_ZRELADDR select MIGHT_HAVE_PCI select PCI_DOMAINS if PCI diff --git a/arch/arm/mach-exynos/common.c b/arch/arm/mach-exynos/common.c index 2c655db4b78e..164685bd25c8 100644 --- a/arch/arm/mach-exynos/common.c +++ b/arch/arm/mach-exynos/common.c @@ -285,12 +285,12 @@ static struct map_desc exynos5440_iodesc0[] __initdata = { }, }; -void exynos4_restart(char mode, const char *cmd) +void exynos4_restart(enum reboot_mode mode, const char *cmd) { __raw_writel(0x1, S5P_SWRESET); } -void exynos5_restart(char mode, const char *cmd) +void exynos5_restart(enum reboot_mode mode, const char *cmd) { struct device_node *np; u32 val; diff --git a/arch/arm/mach-exynos/common.h b/arch/arm/mach-exynos/common.h index 38d45fd23be4..3e156bcddcb4 100644 --- a/arch/arm/mach-exynos/common.h +++ b/arch/arm/mach-exynos/common.h @@ -12,6 +12,7 @@ #ifndef __ARCH_ARM_MACH_EXYNOS_COMMON_H #define __ARCH_ARM_MACH_EXYNOS_COMMON_H +#include <linux/reboot.h> #include <linux/of.h> void mct_init(void __iomem *base, int irq_g0, int irq_l0, int irq_l1); @@ -20,8 +21,8 @@ extern unsigned long xxti_f, xusbxti_f; struct map_desc; void exynos_init_io(void); -void exynos4_restart(char mode, const char *cmd); -void exynos5_restart(char mode, const char *cmd); +void exynos4_restart(enum reboot_mode mode, const char *cmd); +void exynos5_restart(enum reboot_mode mode, const char *cmd); void exynos_init_late(void); /* ToDo: remove these after migrating legacy exynos4 platforms to dt */ diff --git a/arch/arm/mach-footbridge/cats-hw.c b/arch/arm/mach-footbridge/cats-hw.c index 6987a09ec219..9669cc0b6318 100644 --- a/arch/arm/mach-footbridge/cats-hw.c +++ b/arch/arm/mach-footbridge/cats-hw.c @@ -86,7 +86,7 @@ fixup_cats(struct tag *tags, char **cmdline, struct meminfo *mi) MACHINE_START(CATS, "Chalice-CATS") /* Maintainer: Philip Blundell */ .atag_offset = 0x100, - .restart_mode = 's', + .reboot_mode = REBOOT_SOFT, .fixup = fixup_cats, .map_io = footbridge_map_io, .init_irq = footbridge_init_irq, diff --git a/arch/arm/mach-footbridge/common.c b/arch/arm/mach-footbridge/common.c index a42b369bc439..2739ca2c1334 100644 --- a/arch/arm/mach-footbridge/common.c +++ b/arch/arm/mach-footbridge/common.c @@ -198,9 +198,9 @@ void __init footbridge_map_io(void) } } -void footbridge_restart(char mode, const char *cmd) +void footbridge_restart(enum reboot_mode mode, const char *cmd) { - if (mode == 's') { + if (mode == REBOOT_SOFT) { /* Jump into the ROM */ soft_restart(0x41000000); } else { diff --git a/arch/arm/mach-footbridge/common.h b/arch/arm/mach-footbridge/common.h index a846e50a07b8..56607b3a773e 100644 --- a/arch/arm/mach-footbridge/common.h +++ b/arch/arm/mach-footbridge/common.h @@ -1,3 +1,4 @@ +#include <linux/reboot.h> extern void footbridge_timer_init(void); extern void isa_timer_init(void); @@ -8,4 +9,4 @@ extern void footbridge_map_io(void); extern void footbridge_init_irq(void); extern void isa_init_irq(unsigned int irq); -extern void footbridge_restart(char, const char *); +extern void footbridge_restart(enum reboot_mode, const char *); diff --git a/arch/arm/mach-footbridge/netwinder-hw.c b/arch/arm/mach-footbridge/netwinder-hw.c index 90ea23fdce4c..1fd2cf097e30 100644 --- a/arch/arm/mach-footbridge/netwinder-hw.c +++ b/arch/arm/mach-footbridge/netwinder-hw.c @@ -634,9 +634,9 @@ fixup_netwinder(struct tag *tags, char **cmdline, struct meminfo *mi) #endif } -static void netwinder_restart(char mode, const char *cmd) +static void netwinder_restart(enum reboot_mode mode, const char *cmd) { - if (mode == 's') { + if (mode == REBOOT_SOFT) { /* Jump into the ROM */ soft_restart(0x41000000); } else { diff --git a/arch/arm/mach-highbank/core.h b/arch/arm/mach-highbank/core.h index 3f65206a9b92..aea1ec5ab6f8 100644 --- a/arch/arm/mach-highbank/core.h +++ b/arch/arm/mach-highbank/core.h @@ -1,8 +1,10 @@ #ifndef __HIGHBANK_CORE_H #define __HIGHBANK_CORE_H +#include <linux/reboot.h> + extern void highbank_set_cpu_jump(int cpu, void *jump_addr); -extern void highbank_restart(char, const char *); +extern void highbank_restart(enum reboot_mode, const char *); extern void __iomem *scu_base_addr; #ifdef CONFIG_PM_SLEEP diff --git a/arch/arm/mach-highbank/system.c b/arch/arm/mach-highbank/system.c index 37d8384dcf19..2df5870b7583 100644 --- a/arch/arm/mach-highbank/system.c +++ b/arch/arm/mach-highbank/system.c @@ -15,13 +15,14 @@ */ #include <linux/io.h> #include <asm/proc-fns.h> +#include <linux/reboot.h> #include "core.h" #include "sysregs.h" -void highbank_restart(char mode, const char *cmd) +void highbank_restart(enum reboot_mode mode, const char *cmd) { - if (mode == 'h') + if (mode == REBOOT_HARD) highbank_set_pwr_hard_reset(); else highbank_set_pwr_soft_reset(); diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig index 60661a4b0e24..f54656091a9d 100644 --- a/arch/arm/mach-imx/Kconfig +++ b/arch/arm/mach-imx/Kconfig @@ -108,7 +108,6 @@ config SOC_IMX25 select ARCH_MXC_IOMUX_V3 select COMMON_CLK select CPU_ARM926T - select HAVE_CAN_FLEXCAN if CAN select MXC_AVIC config SOC_IMX27 @@ -134,7 +133,6 @@ config SOC_IMX35 select ARCH_MXC_IOMUX_V3 select COMMON_CLK select CPU_V6K - select HAVE_CAN_FLEXCAN if CAN select HAVE_EPIT select MXC_AVIC select SMP_ON_UP if SMP @@ -774,7 +772,6 @@ comment "Device tree only" config SOC_IMX53 bool "i.MX53 support" - select HAVE_CAN_FLEXCAN if CAN select HAVE_IMX_SRC select IMX_HAVE_PLATFORM_IMX2_WDT select PINCTRL @@ -797,7 +794,6 @@ config SOC_IMX6Q select CPU_V7 select HAVE_ARM_SCU if SMP select HAVE_ARM_TWD if LOCAL_TIMERS - select HAVE_CAN_FLEXCAN if CAN select HAVE_IMX_ANATOP select HAVE_IMX_GPC select HAVE_IMX_MMDC diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h index ee78847abf47..cb6c838b63ed 100644 --- a/arch/arm/mach-imx/common.h +++ b/arch/arm/mach-imx/common.h @@ -11,6 +11,8 @@ #ifndef __ASM_ARCH_MXC_COMMON_H__ #define __ASM_ARCH_MXC_COMMON_H__ +#include <linux/reboot.h> + struct platform_device; struct pt_regs; struct clk; @@ -71,7 +73,7 @@ extern int mx53_clocks_init_dt(void); extern struct platform_device *mxc_register_gpio(char *name, int id, resource_size_t iobase, resource_size_t iosize, int irq, int irq_high); extern void mxc_set_cpu_type(unsigned int type); -extern void mxc_restart(char, const char *); +extern void mxc_restart(enum reboot_mode, const char *); extern void mxc_arch_reset_init(void __iomem *); extern void mxc_arch_reset_init_dt(void); extern int mx53_revision(void); diff --git a/arch/arm/mach-imx/devices-imx25.h b/arch/arm/mach-imx/devices-imx25.h index 0d2922bc575c..769563fdeaa0 100644 --- a/arch/arm/mach-imx/devices-imx25.h +++ b/arch/arm/mach-imx/devices-imx25.h @@ -13,10 +13,10 @@ extern const struct imx_fec_data imx25_fec_data; imx_add_fec(&imx25_fec_data, pdata) extern const struct imx_flexcan_data imx25_flexcan_data[]; -#define imx25_add_flexcan(id, pdata) \ - imx_add_flexcan(&imx25_flexcan_data[id], pdata) -#define imx25_add_flexcan0(pdata) imx25_add_flexcan(0, pdata) -#define imx25_add_flexcan1(pdata) imx25_add_flexcan(1, pdata) +#define imx25_add_flexcan(id) \ + imx_add_flexcan(&imx25_flexcan_data[id]) +#define imx25_add_flexcan0() imx25_add_flexcan(0) +#define imx25_add_flexcan1() imx25_add_flexcan(1) extern const struct imx_fsl_usb2_udc_data imx25_fsl_usb2_udc_data; #define imx25_add_fsl_usb2_udc(pdata) \ diff --git a/arch/arm/mach-imx/devices-imx35.h b/arch/arm/mach-imx/devices-imx35.h index e2675f1b141c..780d8240281b 100644 --- a/arch/arm/mach-imx/devices-imx35.h +++ b/arch/arm/mach-imx/devices-imx35.h @@ -17,10 +17,10 @@ extern const struct imx_fsl_usb2_udc_data imx35_fsl_usb2_udc_data; imx_add_fsl_usb2_udc(&imx35_fsl_usb2_udc_data, pdata) extern const struct imx_flexcan_data imx35_flexcan_data[]; -#define imx35_add_flexcan(id, pdata) \ - imx_add_flexcan(&imx35_flexcan_data[id], pdata) -#define imx35_add_flexcan0(pdata) imx35_add_flexcan(0, pdata) -#define imx35_add_flexcan1(pdata) imx35_add_flexcan(1, pdata) +#define imx35_add_flexcan(id) \ + imx_add_flexcan(&imx35_flexcan_data[id]) +#define imx35_add_flexcan0() imx35_add_flexcan(0) +#define imx35_add_flexcan1() imx35_add_flexcan(1) extern const struct imx_imx2_wdt_data imx35_imx2_wdt_data; #define imx35_add_imx2_wdt() \ diff --git a/arch/arm/mach-imx/devices/Kconfig b/arch/arm/mach-imx/devices/Kconfig index 3dd2b1b041d1..68c74fb0373c 100644 --- a/arch/arm/mach-imx/devices/Kconfig +++ b/arch/arm/mach-imx/devices/Kconfig @@ -4,7 +4,6 @@ config IMX_HAVE_PLATFORM_FEC config IMX_HAVE_PLATFORM_FLEXCAN bool - select HAVE_CAN_FLEXCAN if CAN config IMX_HAVE_PLATFORM_FSL_USB2_UDC bool diff --git a/arch/arm/mach-imx/devices/devices-common.h b/arch/arm/mach-imx/devices/devices-common.h index 453e20bc2657..c13b76b9f6b3 100644 --- a/arch/arm/mach-imx/devices/devices-common.h +++ b/arch/arm/mach-imx/devices/devices-common.h @@ -50,7 +50,6 @@ struct platform_device *__init imx_add_fec( const struct imx_fec_data *data, const struct fec_platform_data *pdata); -#include <linux/can/platform/flexcan.h> struct imx_flexcan_data { int id; resource_size_t iobase; @@ -58,8 +57,7 @@ struct imx_flexcan_data { resource_size_t irq; }; struct platform_device *__init imx_add_flexcan( - const struct imx_flexcan_data *data, - const struct flexcan_platform_data *pdata); + const struct imx_flexcan_data *data); #include <linux/fsl_devices.h> struct imx_fsl_usb2_udc_data { diff --git a/arch/arm/mach-imx/devices/platform-flexcan.c b/arch/arm/mach-imx/devices/platform-flexcan.c index 1078bf0a94ef..55d61eaf63c6 100644 --- a/arch/arm/mach-imx/devices/platform-flexcan.c +++ b/arch/arm/mach-imx/devices/platform-flexcan.c @@ -38,8 +38,7 @@ const struct imx_flexcan_data imx35_flexcan_data[] __initconst = { #endif /* ifdef CONFIG_SOC_IMX35 */ struct platform_device *__init imx_add_flexcan( - const struct imx_flexcan_data *data, - const struct flexcan_platform_data *pdata) + const struct imx_flexcan_data *data) { struct resource res[] = { { @@ -54,5 +53,5 @@ struct platform_device *__init imx_add_flexcan( }; return imx_add_platform_device("flexcan", data->id, - res, ARRAY_SIZE(res), pdata, sizeof(*pdata)); + res, ARRAY_SIZE(res), NULL, 0); } diff --git a/arch/arm/mach-imx/eukrea_mbimxsd25-baseboard.c b/arch/arm/mach-imx/eukrea_mbimxsd25-baseboard.c index e2b70f4c1a2c..e77cc3af6db2 100644 --- a/arch/arm/mach-imx/eukrea_mbimxsd25-baseboard.c +++ b/arch/arm/mach-imx/eukrea_mbimxsd25-baseboard.c @@ -279,7 +279,7 @@ void __init eukrea_mbimxsd25_baseboard_init(void) imx25_add_imx_fb(&eukrea_mximxsd_fb_pdata); imx25_add_imx_ssi(0, &eukrea_mbimxsd_ssi_pdata); - imx25_add_flexcan1(NULL); + imx25_add_flexcan1(); imx25_add_sdhci_esdhc_imx(0, &sd1_pdata); gpio_request(GPIO_LED1, "LED1"); diff --git a/arch/arm/mach-imx/eukrea_mbimxsd35-baseboard.c b/arch/arm/mach-imx/eukrea_mbimxsd35-baseboard.c index 5a2d5ef12dd5..14d6c8249b76 100644 --- a/arch/arm/mach-imx/eukrea_mbimxsd35-baseboard.c +++ b/arch/arm/mach-imx/eukrea_mbimxsd35-baseboard.c @@ -287,7 +287,7 @@ void __init eukrea_mbimxsd35_baseboard_init(void) imx35_add_imx_ssi(0, &eukrea_mbimxsd_ssi_pdata); - imx35_add_flexcan1(NULL); + imx35_add_flexcan1(); imx35_add_sdhci_esdhc_imx(0, &sd1_pdata); gpio_request(GPIO_LED1, "LED1"); diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c index f5965220a4d8..7be13f8e69a0 100644 --- a/arch/arm/mach-imx/mach-imx6q.c +++ b/arch/arm/mach-imx/mach-imx6q.c @@ -27,6 +27,7 @@ #include <linux/of_platform.h> #include <linux/opp.h> #include <linux/phy.h> +#include <linux/reboot.h> #include <linux/regmap.h> #include <linux/micrel_phy.h> #include <linux/mfd/syscon.h> @@ -67,7 +68,7 @@ static void __init imx6q_init_revision(void) mxc_set_cpu_type(rev >> 16 & 0xff); } -static void imx6q_restart(char mode, const char *cmd) +static void imx6q_restart(enum reboot_mode mode, const char *cmd) { struct device_node *np; void __iomem *wdog_base; diff --git a/arch/arm/mach-imx/mach-mx25_3ds.c b/arch/arm/mach-imx/mach-mx25_3ds.c index 8bcda688a006..13490c203050 100644 --- a/arch/arm/mach-imx/mach-mx25_3ds.c +++ b/arch/arm/mach-imx/mach-mx25_3ds.c @@ -249,7 +249,7 @@ static void __init mx25pdk_init(void) imx25_add_imx_i2c0(&mx25_3ds_i2c0_data); gpio_request_one(MX25PDK_CAN_PWDN, GPIOF_OUT_INIT_LOW, "can-pwdn"); - imx25_add_flexcan0(NULL); + imx25_add_flexcan0(); } static void __init mx25pdk_timer_init(void) diff --git a/arch/arm/mach-imx/mach-mx31moboard.c b/arch/arm/mach-imx/mach-mx31moboard.c index dae4cd7be040..6f424eced181 100644 --- a/arch/arm/mach-imx/mach-mx31moboard.c +++ b/arch/arm/mach-imx/mach-mx31moboard.c @@ -268,10 +268,11 @@ static struct mc13xxx_led_platform_data moboard_led[] = { static struct mc13xxx_leds_platform_data moboard_leds = { .num_leds = ARRAY_SIZE(moboard_led), .led = moboard_led, - .flags = MC13783_LED_SLEWLIMTC, - .abmode = MC13783_LED_AB_DISABLED, - .tc1_period = MC13783_LED_PERIOD_10MS, - .tc2_period = MC13783_LED_PERIOD_10MS, + .led_control[0] = MC13783_LED_C0_ENABLE | MC13783_LED_C0_ABMODE(0), + .led_control[1] = MC13783_LED_C1_SLEWLIM, + .led_control[2] = MC13783_LED_C2_SLEWLIM, + .led_control[3] = MC13783_LED_C3_PERIOD(0), + .led_control[4] = MC13783_LED_C3_PERIOD(0), }; static struct mc13xxx_buttons_platform_data moboard_buttons = { diff --git a/arch/arm/mach-imx/mach-pcm043.c b/arch/arm/mach-imx/mach-pcm043.c index 8ed533f0f8ca..b726cb1c5fdd 100644 --- a/arch/arm/mach-imx/mach-pcm043.c +++ b/arch/arm/mach-imx/mach-pcm043.c @@ -385,7 +385,7 @@ static void __init pcm043_init(void) if (!otg_mode_host) imx35_add_fsl_usb2_udc(&otg_device_pdata); - imx35_add_flexcan1(NULL); + imx35_add_flexcan1(); imx35_add_sdhci_esdhc_imx(0, &sd1_pdata); } diff --git a/arch/arm/mach-imx/mm-imx3.c b/arch/arm/mach-imx/mm-imx3.c index 8f0f60697f55..0884ca90d15a 100644 --- a/arch/arm/mach-imx/mm-imx3.c +++ b/arch/arm/mach-imx/mm-imx3.c @@ -65,7 +65,7 @@ static void imx3_idle(void) : "=r" (reg)); } -static void __iomem *imx3_ioremap_caller(unsigned long phys_addr, size_t size, +static void __iomem *imx3_ioremap_caller(phys_addr_t phys_addr, size_t size, unsigned int mtype, void *caller) { if (mtype == MT_DEVICE) { diff --git a/arch/arm/mach-imx/system.c b/arch/arm/mach-imx/system.c index 7cdc79a9657c..6fe81bb4d3c9 100644 --- a/arch/arm/mach-imx/system.c +++ b/arch/arm/mach-imx/system.c @@ -37,7 +37,7 @@ static struct clk *wdog_clk; /* * Reset the system. It is called by machine_restart(). */ -void mxc_restart(char mode, const char *cmd) +void mxc_restart(enum reboot_mode mode, const char *cmd) { unsigned int wcr_enable; diff --git a/arch/arm/mach-imx/time.c b/arch/arm/mach-imx/time.c index fea91313678b..cd46529e9eaa 100644 --- a/arch/arm/mach-imx/time.c +++ b/arch/arm/mach-imx/time.c @@ -26,8 +26,8 @@ #include <linux/clockchips.h> #include <linux/clk.h> #include <linux/err.h> +#include <linux/sched_clock.h> -#include <asm/sched_clock.h> #include <asm/mach/time.h> #include "common.h" diff --git a/arch/arm/mach-integrator/common.h b/arch/arm/mach-integrator/common.h index 72516658be1e..ad0ac5547b2c 100644 --- a/arch/arm/mach-integrator/common.h +++ b/arch/arm/mach-integrator/common.h @@ -1,7 +1,8 @@ +#include <linux/reboot.h> #include <linux/amba/serial.h> extern struct amba_pl010_data ap_uart_data; void integrator_init_early(void); int integrator_init(bool is_cp); void integrator_reserve(void); -void integrator_restart(char, const char *); +void integrator_restart(enum reboot_mode, const char *); void integrator_init_sysfs(struct device *parent, u32 id); diff --git a/arch/arm/mach-integrator/core.c b/arch/arm/mach-integrator/core.c index 81461d218717..4cdfd7365925 100644 --- a/arch/arm/mach-integrator/core.c +++ b/arch/arm/mach-integrator/core.c @@ -124,7 +124,7 @@ void __init integrator_reserve(void) /* * To reset, we hit the on-board reset register in the system FPGA */ -void integrator_restart(char mode, const char *cmd) +void integrator_restart(enum reboot_mode mode, const char *cmd) { cm_control(CM_CTRL_RESET, CM_CTRL_RESET); } diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c index a5b15c4e8def..d9e95e612fcb 100644 --- a/arch/arm/mach-integrator/integrator_ap.c +++ b/arch/arm/mach-integrator/integrator_ap.c @@ -41,6 +41,7 @@ #include <linux/stat.h> #include <linux/sys_soc.h> #include <linux/termios.h> +#include <linux/sched_clock.h> #include <mach/hardware.h> #include <mach/platform.h> @@ -48,7 +49,6 @@ #include <asm/setup.h> #include <asm/param.h> /* HZ */ #include <asm/mach-types.h> -#include <asm/sched_clock.h> #include <mach/lm.h> #include <mach/irqs.h> diff --git a/arch/arm/mach-iop13xx/include/mach/iop13xx.h b/arch/arm/mach-iop13xx/include/mach/iop13xx.h index 7480f58267aa..17b40279e0a4 100644 --- a/arch/arm/mach-iop13xx/include/mach/iop13xx.h +++ b/arch/arm/mach-iop13xx/include/mach/iop13xx.h @@ -2,6 +2,9 @@ #define _IOP13XX_HW_H_ #ifndef __ASSEMBLY__ + +#include <linux/reboot.h> + /* The ATU offsets can change based on the strapping */ extern u32 iop13xx_atux_pmmr_offset; extern u32 iop13xx_atue_pmmr_offset; @@ -11,7 +14,7 @@ void iop13xx_map_io(void); void iop13xx_platform_init(void); void iop13xx_add_tpmi_devices(void); void iop13xx_init_irq(void); -void iop13xx_restart(char, const char *); +void iop13xx_restart(enum reboot_mode, const char *); /* CPUID CP6 R0 Page 0 */ static inline int iop13xx_cpu_id(void) diff --git a/arch/arm/mach-iop13xx/io.c b/arch/arm/mach-iop13xx/io.c index 183dc8b5511b..faaf7d4482c5 100644 --- a/arch/arm/mach-iop13xx/io.c +++ b/arch/arm/mach-iop13xx/io.c @@ -23,7 +23,7 @@ #include "pci.h" -static void __iomem *__iop13xx_ioremap_caller(unsigned long cookie, +static void __iomem *__iop13xx_ioremap_caller(phys_addr_t cookie, size_t size, unsigned int mtype, void *caller) { void __iomem * retval; diff --git a/arch/arm/mach-iop13xx/setup.c b/arch/arm/mach-iop13xx/setup.c index 3181f61ea63e..96e6c7a6793b 100644 --- a/arch/arm/mach-iop13xx/setup.c +++ b/arch/arm/mach-iop13xx/setup.c @@ -469,7 +469,6 @@ void __init iop13xx_platform_init(void) dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); dma_cap_set(DMA_XOR, plat_data->cap_mask); dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask); - dma_cap_set(DMA_MEMSET, plat_data->cap_mask); dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); break; case IOP13XX_INIT_ADMA_1: @@ -479,7 +478,6 @@ void __init iop13xx_platform_init(void) dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); dma_cap_set(DMA_XOR, plat_data->cap_mask); dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask); - dma_cap_set(DMA_MEMSET, plat_data->cap_mask); dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); break; case IOP13XX_INIT_ADMA_2: @@ -489,7 +487,6 @@ void __init iop13xx_platform_init(void) dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); dma_cap_set(DMA_XOR, plat_data->cap_mask); dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask); - dma_cap_set(DMA_MEMSET, plat_data->cap_mask); dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); dma_cap_set(DMA_PQ, plat_data->cap_mask); dma_cap_set(DMA_PQ_VAL, plat_data->cap_mask); @@ -597,7 +594,7 @@ __setup("iop13xx_init_adma", iop13xx_init_adma_setup); __setup("iop13xx_init_uart", iop13xx_init_uart_setup); __setup("iop13xx_init_i2c", iop13xx_init_i2c_setup); -void iop13xx_restart(char mode, const char *cmd) +void iop13xx_restart(enum reboot_mode mode, const char *cmd) { /* * Reset the internal bus (warning both cores are reset) diff --git a/arch/arm/mach-iop32x/n2100.c b/arch/arm/mach-iop32x/n2100.c index ea0984a7449e..069144300b77 100644 --- a/arch/arm/mach-iop32x/n2100.c +++ b/arch/arm/mach-iop32x/n2100.c @@ -286,7 +286,7 @@ static void n2100_power_off(void) ; } -static void n2100_restart(char mode, const char *cmd) +static void n2100_restart(enum reboot_mode mode, const char *cmd) { gpio_line_set(N2100_HARDWARE_RESET, GPIO_LOW); gpio_line_config(N2100_HARDWARE_RESET, GPIO_OUT); diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c index 6600cff6bd92..5327decde5a0 100644 --- a/arch/arm/mach-ixp4xx/common.c +++ b/arch/arm/mach-ixp4xx/common.c @@ -30,6 +30,7 @@ #include <linux/export.h> #include <linux/gpio.h> #include <linux/cpu.h> +#include <linux/sched_clock.h> #include <mach/udc.h> #include <mach/hardware.h> @@ -38,7 +39,6 @@ #include <asm/pgtable.h> #include <asm/page.h> #include <asm/irq.h> -#include <asm/sched_clock.h> #include <asm/system_misc.h> #include <asm/mach/map.h> @@ -531,9 +531,9 @@ static void __init ixp4xx_clockevent_init(void) 0xf, 0xfffffffe); } -void ixp4xx_restart(char mode, const char *cmd) +void ixp4xx_restart(enum reboot_mode mode, const char *cmd) { - if ( 1 && mode == 's') { + if ( 1 && mode == REBOOT_SOFT) { /* Jump into ROM at address 0 */ soft_restart(0); } else { @@ -559,7 +559,7 @@ void ixp4xx_restart(char mode, const char *cmd) * fallback to the default. */ -static void __iomem *ixp4xx_ioremap_caller(unsigned long addr, size_t size, +static void __iomem *ixp4xx_ioremap_caller(phys_addr_t addr, size_t size, unsigned int mtype, void *caller) { if (!is_pci_memory(addr)) diff --git a/arch/arm/mach-ixp4xx/dsmg600-setup.c b/arch/arm/mach-ixp4xx/dsmg600-setup.c index 5d413f8c5700..686ef34c69f5 100644 --- a/arch/arm/mach-ixp4xx/dsmg600-setup.c +++ b/arch/arm/mach-ixp4xx/dsmg600-setup.c @@ -27,6 +27,7 @@ #include <linux/i2c.h> #include <linux/i2c-gpio.h> +#include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/flash.h> diff --git a/arch/arm/mach-ixp4xx/include/mach/platform.h b/arch/arm/mach-ixp4xx/include/mach/platform.h index db5afb69c123..4c4c6a6f4526 100644 --- a/arch/arm/mach-ixp4xx/include/mach/platform.h +++ b/arch/arm/mach-ixp4xx/include/mach/platform.h @@ -13,6 +13,8 @@ #ifndef __ASSEMBLY__ +#include <linux/reboot.h> + #include <asm/types.h> #ifndef __ARMEB__ @@ -123,7 +125,7 @@ extern void ixp4xx_init_early(void); extern void ixp4xx_init_irq(void); extern void ixp4xx_sys_init(void); extern void ixp4xx_timer_init(void); -extern void ixp4xx_restart(char, const char *); +extern void ixp4xx_restart(enum reboot_mode, const char *); extern void ixp4xx_pci_preinit(void); struct pci_sys_data; extern int ixp4xx_setup(int nr, struct pci_sys_data *sys); diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c index 7c72c725b711..e9238b5567ee 100644 --- a/arch/arm/mach-kirkwood/common.c +++ b/arch/arm/mach-kirkwood/common.c @@ -20,6 +20,7 @@ #include <linux/mv643xx_i2c.h> #include <linux/timex.h> #include <linux/kexec.h> +#include <linux/reboot.h> #include <net/dsa.h> #include <asm/page.h> #include <asm/mach/map.h> @@ -722,7 +723,7 @@ void __init kirkwood_init(void) #endif } -void kirkwood_restart(char mode, const char *cmd) +void kirkwood_restart(enum reboot_mode mode, const char *cmd) { /* * Enable soft reset to assert RSTOUTn. diff --git a/arch/arm/mach-kirkwood/common.h b/arch/arm/mach-kirkwood/common.h index 1c09f3f93fbb..fcf3ba682e24 100644 --- a/arch/arm/mach-kirkwood/common.h +++ b/arch/arm/mach-kirkwood/common.h @@ -11,6 +11,8 @@ #ifndef __ARCH_KIRKWOOD_COMMON_H #define __ARCH_KIRKWOOD_COMMON_H +#include <linux/reboot.h> + struct dsa_platform_data; struct mv643xx_eth_platform_data; struct mv_sata_platform_data; @@ -53,7 +55,7 @@ void kirkwood_audio_init(void); void kirkwood_cpuidle_init(void); void kirkwood_cpufreq_init(void); -void kirkwood_restart(char, const char *); +void kirkwood_restart(enum reboot_mode, const char *); void kirkwood_clk_init(void); /* board init functions for boards not fully converted to fdt */ diff --git a/arch/arm/mach-ks8695/generic.h b/arch/arm/mach-ks8695/generic.h index 6e97ce462d73..43253f8e6de4 100644 --- a/arch/arm/mach-ks8695/generic.h +++ b/arch/arm/mach-ks8695/generic.h @@ -12,5 +12,5 @@ extern __init void ks8695_map_io(void); extern __init void ks8695_init_irq(void); -extern void ks8695_restart(char, const char *); +extern void ks8695_restart(enum reboot_mode, const char *); extern void ks8695_timer_init(void); diff --git a/arch/arm/mach-ks8695/time.c b/arch/arm/mach-ks8695/time.c index c272a3863d5f..426c97662f5b 100644 --- a/arch/arm/mach-ks8695/time.c +++ b/arch/arm/mach-ks8695/time.c @@ -154,11 +154,11 @@ void __init ks8695_timer_init(void) setup_irq(KS8695_IRQ_TIMER1, &ks8695_timer_irq); } -void ks8695_restart(char mode, const char *cmd) +void ks8695_restart(enum reboot_mode reboot_mode, const char *cmd) { unsigned int reg; - if (mode == 's') + if (reboot_mode == REBOOT_SOFT) soft_restart(0); /* disable timer0 */ diff --git a/arch/arm/mach-lpc32xx/common.c b/arch/arm/mach-lpc32xx/common.c index 0d4db8c544b5..d7aa54c25c59 100644 --- a/arch/arm/mach-lpc32xx/common.c +++ b/arch/arm/mach-lpc32xx/common.c @@ -207,11 +207,11 @@ void __init lpc32xx_map_io(void) iotable_init(lpc32xx_io_desc, ARRAY_SIZE(lpc32xx_io_desc)); } -void lpc23xx_restart(char mode, const char *cmd) +void lpc23xx_restart(enum reboot_mode mode, const char *cmd) { switch (mode) { - case 's': - case 'h': + case REBOOT_SOFT: + case REBOOT_HARD: lpc32xx_watchdog_reset(); break; diff --git a/arch/arm/mach-lpc32xx/common.h b/arch/arm/mach-lpc32xx/common.h index e0b26062a272..1cd8853b2f9b 100644 --- a/arch/arm/mach-lpc32xx/common.h +++ b/arch/arm/mach-lpc32xx/common.h @@ -21,6 +21,7 @@ #include <mach/board.h> #include <linux/platform_device.h> +#include <linux/reboot.h> /* * Other arch specific structures and functions @@ -29,7 +30,7 @@ extern void lpc32xx_timer_init(void); extern void __init lpc32xx_init_irq(void); extern void __init lpc32xx_map_io(void); extern void __init lpc32xx_serial_init(void); -extern void lpc23xx_restart(char, const char *); +extern void lpc23xx_restart(enum reboot_mode, const char *); /* diff --git a/arch/arm/mach-lpc32xx/phy3250.c b/arch/arm/mach-lpc32xx/phy3250.c index c1cd5a943ab1..e54f87ec2e4a 100644 --- a/arch/arm/mach-lpc32xx/phy3250.c +++ b/arch/arm/mach-lpc32xx/phy3250.c @@ -182,8 +182,8 @@ static void pl08x_put_signal(const struct pl08x_channel_data *cd, int ch) static struct pl08x_platform_data pl08x_pd = { .slave_channels = &pl08x_slave_channels[0], .num_slave_channels = ARRAY_SIZE(pl08x_slave_channels), - .get_signal = pl08x_get_signal, - .put_signal = pl08x_put_signal, + .get_xfer_signal = pl08x_get_signal, + .put_xfer_signal = pl08x_put_signal, .lli_buses = PL08X_AHB1, .mem_buses = PL08X_AHB1, }; diff --git a/arch/arm/mach-mmp/aspenite.c b/arch/arm/mach-mmp/aspenite.c index 5b660ec09ef5..0c002099c3a3 100644 --- a/arch/arm/mach-mmp/aspenite.c +++ b/arch/arm/mach-mmp/aspenite.c @@ -210,7 +210,7 @@ struct pxa168fb_mach_info aspenite_lcd_info = { .invert_pixclock = 0, }; -static unsigned int aspenite_matrix_key_map[] = { +static const unsigned int aspenite_matrix_key_map[] = { KEY(0, 6, KEY_UP), /* SW 4 */ KEY(0, 7, KEY_DOWN), /* SW 5 */ KEY(1, 6, KEY_LEFT), /* SW 6 */ @@ -219,11 +219,15 @@ static unsigned int aspenite_matrix_key_map[] = { KEY(4, 7, KEY_ESC), /* SW 9 */ }; +static struct matrix_keymap_data aspenite_matrix_keymap_data = { + .keymap = aspenite_matrix_key_map, + .keymap_size = ARRAY_SIZE(aspenite_matrix_key_map), +}; + static struct pxa27x_keypad_platform_data aspenite_keypad_info __initdata = { .matrix_key_rows = 5, .matrix_key_cols = 8, - .matrix_key_map = aspenite_matrix_key_map, - .matrix_key_map_size = ARRAY_SIZE(aspenite_matrix_key_map), + .matrix_keymap_data = &aspenite_matrix_keymap_data, .debounce_interval = 30, }; diff --git a/arch/arm/mach-mmp/common.c b/arch/arm/mach-mmp/common.c index 9292b7966e3b..c03b4ab582db 100644 --- a/arch/arm/mach-mmp/common.c +++ b/arch/arm/mach-mmp/common.c @@ -47,7 +47,7 @@ void __init mmp_map_io(void) mmp_chip_id = __raw_readl(MMP_CHIPID); } -void mmp_restart(char mode, const char *cmd) +void mmp_restart(enum reboot_mode mode, const char *cmd) { soft_restart(0); } diff --git a/arch/arm/mach-mmp/common.h b/arch/arm/mach-mmp/common.h index 0bdc50b134ce..991d7e9877de 100644 --- a/arch/arm/mach-mmp/common.h +++ b/arch/arm/mach-mmp/common.h @@ -1,10 +1,11 @@ +#include <linux/reboot.h> #define ARRAY_AND_SIZE(x) (x), ARRAY_SIZE(x) extern void timer_init(int irq); extern void __init icu_init_irq(void); extern void __init mmp_map_io(void); -extern void mmp_restart(char, const char *); +extern void mmp_restart(enum reboot_mode, const char *); extern void __init pxa168_clk_init(void); extern void __init pxa910_clk_init(void); extern void __init mmp2_clk_init(void); diff --git a/arch/arm/mach-mmp/include/mach/pxa168.h b/arch/arm/mach-mmp/include/mach/pxa168.h index 7ed1df21ea1c..459c2d03eb5c 100644 --- a/arch/arm/mach-mmp/include/mach/pxa168.h +++ b/arch/arm/mach-mmp/include/mach/pxa168.h @@ -1,9 +1,11 @@ #ifndef __ASM_MACH_PXA168_H #define __ASM_MACH_PXA168_H +#include <linux/reboot.h> + extern void pxa168_timer_init(void); extern void __init pxa168_init_irq(void); -extern void pxa168_restart(char, const char *); +extern void pxa168_restart(enum reboot_mode, const char *); extern void pxa168_clear_keypad_wakeup(void); #include <linux/i2c.h> diff --git a/arch/arm/mach-mmp/pxa168.c b/arch/arm/mach-mmp/pxa168.c index a30dcf3b7d9e..144e997624c0 100644 --- a/arch/arm/mach-mmp/pxa168.c +++ b/arch/arm/mach-mmp/pxa168.c @@ -172,7 +172,7 @@ int __init pxa168_add_usb_host(struct mv_usb_platform_data *pdata) return platform_device_register(&pxa168_device_usb_host); } -void pxa168_restart(char mode, const char *cmd) +void pxa168_restart(enum reboot_mode mode, const char *cmd) { soft_restart(0xffff0000); } diff --git a/arch/arm/mach-mmp/teton_bga.c b/arch/arm/mach-mmp/teton_bga.c index e4d95b4c6bb2..6aa53fb29d26 100644 --- a/arch/arm/mach-mmp/teton_bga.c +++ b/arch/arm/mach-mmp/teton_bga.c @@ -61,11 +61,15 @@ static unsigned int teton_bga_matrix_key_map[] = { KEY(1, 7, KEY_RIGHT), }; +static struct matrix_keymap_data teton_bga_matrix_keymap_data = { + .keymap = teton_bga_matrix_key_map, + .keymap_size = ARRAY_SIZE(teton_bga_matrix_key_map), +}; + static struct pxa27x_keypad_platform_data teton_bga_keypad_info __initdata = { .matrix_key_rows = 2, .matrix_key_cols = 8, - .matrix_key_map = teton_bga_matrix_key_map, - .matrix_key_map_size = ARRAY_SIZE(teton_bga_matrix_key_map), + .matrix_keymap_data = &teton_bga_matrix_keymap_data, .debounce_interval = 30, }; diff --git a/arch/arm/mach-mmp/time.c b/arch/arm/mach-mmp/time.c index 86a18b3d252e..7ac41e83cfef 100644 --- a/arch/arm/mach-mmp/time.c +++ b/arch/arm/mach-mmp/time.c @@ -28,8 +28,8 @@ #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> +#include <linux/sched_clock.h> -#include <asm/sched_clock.h> #include <mach/addr-map.h> #include <mach/regs-timers.h> #include <mach/regs-apbc.h> diff --git a/arch/arm/mach-msm/common.h b/arch/arm/mach-msm/common.h index ce8215a269e5..421cf7751a80 100644 --- a/arch/arm/mach-msm/common.h +++ b/arch/arm/mach-msm/common.h @@ -23,7 +23,7 @@ extern void msm_map_msm8x60_io(void); extern void msm_map_msm8960_io(void); extern void msm_map_qsd8x50_io(void); -extern void __iomem *__msm_ioremap_caller(unsigned long phys_addr, size_t size, +extern void __iomem *__msm_ioremap_caller(phys_addr_t phys_addr, size_t size, unsigned int mtype, void *caller); extern struct smp_operations msm_smp_ops; diff --git a/arch/arm/mach-msm/io.c b/arch/arm/mach-msm/io.c index efa113e4de86..3dc04ccaf59f 100644 --- a/arch/arm/mach-msm/io.c +++ b/arch/arm/mach-msm/io.c @@ -168,7 +168,7 @@ void __init msm_map_msm7x30_io(void) } #endif /* CONFIG_ARCH_MSM7X30 */ -void __iomem *__msm_ioremap_caller(unsigned long phys_addr, size_t size, +void __iomem *__msm_ioremap_caller(phys_addr_t phys_addr, size_t size, unsigned int mtype, void *caller) { if (mtype == MT_DEVICE) { diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c index 284313f3e02c..b6418fd5fe0d 100644 --- a/arch/arm/mach-msm/timer.c +++ b/arch/arm/mach-msm/timer.c @@ -23,10 +23,10 @@ #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> +#include <linux/sched_clock.h> #include <asm/mach/time.h> #include <asm/localtimer.h> -#include <asm/sched_clock.h> #include "common.h" diff --git a/arch/arm/mach-mv78xx0/common.c b/arch/arm/mach-mv78xx0/common.c index 749a7f8c4992..75062eff2494 100644 --- a/arch/arm/mach-mv78xx0/common.c +++ b/arch/arm/mach-mv78xx0/common.c @@ -413,7 +413,7 @@ void __init mv78xx0_init(void) clk_init(); } -void mv78xx0_restart(char mode, const char *cmd) +void mv78xx0_restart(enum reboot_mode mode, const char *cmd) { /* * Enable soft reset to assert RSTOUTn. diff --git a/arch/arm/mach-mv78xx0/common.h b/arch/arm/mach-mv78xx0/common.h index 5e9485bad0ac..6889af26077d 100644 --- a/arch/arm/mach-mv78xx0/common.h +++ b/arch/arm/mach-mv78xx0/common.h @@ -11,6 +11,8 @@ #ifndef __ARCH_MV78XX0_COMMON_H #define __ARCH_MV78XX0_COMMON_H +#include <linux/reboot.h> + struct mv643xx_eth_platform_data; struct mv_sata_platform_data; @@ -45,7 +47,7 @@ void mv78xx0_uart1_init(void); void mv78xx0_uart2_init(void); void mv78xx0_uart3_init(void); void mv78xx0_i2c_init(void); -void mv78xx0_restart(char, const char *); +void mv78xx0_restart(enum reboot_mode, const char *); extern void mv78xx0_timer_init(void); diff --git a/arch/arm/mach-mvebu/common.h b/arch/arm/mach-mvebu/common.h index 98defd5e92cd..e366010e1d91 100644 --- a/arch/arm/mach-mvebu/common.h +++ b/arch/arm/mach-mvebu/common.h @@ -17,7 +17,9 @@ #define ARMADA_XP_MAX_CPUS 4 -void mvebu_restart(char mode, const char *cmd); +#include <linux/reboot.h> + +void mvebu_restart(enum reboot_mode mode, const char *cmd); void armada_370_xp_init_irq(void); void armada_370_xp_handle_irq(struct pt_regs *regs); diff --git a/arch/arm/mach-mvebu/system-controller.c b/arch/arm/mach-mvebu/system-controller.c index b8079df8c986..f875124ff4f9 100644 --- a/arch/arm/mach-mvebu/system-controller.c +++ b/arch/arm/mach-mvebu/system-controller.c @@ -26,6 +26,7 @@ #include <linux/init.h> #include <linux/of_address.h> #include <linux/io.h> +#include <linux/reboot.h> static void __iomem *system_controller_base; @@ -63,7 +64,7 @@ static struct of_device_id of_system_controller_table[] = { { /* end of list */ }, }; -void mvebu_restart(char mode, const char *cmd) +void mvebu_restart(enum reboot_mode mode, const char *cmd) { if (!system_controller_base) { pr_err("Cannot restart, system-controller not available: check the device tree\n"); diff --git a/arch/arm/mach-mxs/Kconfig b/arch/arm/mach-mxs/Kconfig index 616fe0210da1..8cde9e05b5d6 100644 --- a/arch/arm/mach-mxs/Kconfig +++ b/arch/arm/mach-mxs/Kconfig @@ -10,7 +10,6 @@ config SOC_IMX28 select ARM_AMBA select ARM_CPU_SUSPEND if PM select CPU_ARM926T - select HAVE_CAN_FLEXCAN if CAN select PINCTRL_IMX28 config ARCH_MXS diff --git a/arch/arm/mach-mxs/mach-mxs.c b/arch/arm/mach-mxs/mach-mxs.c index 7fa611c1b287..4ce27b536dc9 100644 --- a/arch/arm/mach-mxs/mach-mxs.c +++ b/arch/arm/mach-mxs/mach-mxs.c @@ -14,12 +14,12 @@ #include <linux/clk/mxs.h> #include <linux/clkdev.h> #include <linux/clocksource.h> -#include <linux/can/platform/flexcan.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/gpio.h> #include <linux/init.h> #include <linux/irqchip/mxs.h> +#include <linux/reboot.h> #include <linux/micrel_phy.h> #include <linux/of_address.h> #include <linux/of_platform.h> @@ -76,41 +76,6 @@ static inline void __mxs_togl(u32 mask, void __iomem *reg) __raw_writel(mask, reg + MXS_TOG_ADDR); } -/* - * MX28EVK_FLEXCAN_SWITCH is shared between both flexcan controllers - */ -#define MX28EVK_FLEXCAN_SWITCH MXS_GPIO_NR(2, 13) - -static int flexcan0_en, flexcan1_en; - -static void mx28evk_flexcan_switch(void) -{ - if (flexcan0_en || flexcan1_en) - gpio_set_value(MX28EVK_FLEXCAN_SWITCH, 1); - else - gpio_set_value(MX28EVK_FLEXCAN_SWITCH, 0); -} - -static void mx28evk_flexcan0_switch(int enable) -{ - flexcan0_en = enable; - mx28evk_flexcan_switch(); -} - -static void mx28evk_flexcan1_switch(int enable) -{ - flexcan1_en = enable; - mx28evk_flexcan_switch(); -} - -static struct flexcan_platform_data flexcan_pdata[2]; - -static struct of_dev_auxdata mxs_auxdata_lookup[] __initdata = { - OF_DEV_AUXDATA("fsl,imx28-flexcan", 0x80032000, NULL, &flexcan_pdata[0]), - OF_DEV_AUXDATA("fsl,imx28-flexcan", 0x80034000, NULL, &flexcan_pdata[1]), - { /* sentinel */ } -}; - #define OCOTP_WORD_OFFSET 0x20 #define OCOTP_WORD_COUNT 0x20 @@ -270,15 +235,6 @@ static void __init imx28_evk_init(void) mxs_saif_clkmux_select(MXS_DIGCTL_SAIF_CLKMUX_EXTMSTR0); } -static void __init imx28_evk_post_init(void) -{ - if (!gpio_request_one(MX28EVK_FLEXCAN_SWITCH, GPIOF_DIR_OUT, - "flexcan-switch")) { - flexcan_pdata[0].transceiver_switch = mx28evk_flexcan0_switch; - flexcan_pdata[1].transceiver_switch = mx28evk_flexcan1_switch; - } -} - static int apx4devkit_phy_fixup(struct phy_device *phy) { phy->dev_flags |= MICREL_PHY_50MHZ_CLK; @@ -484,13 +440,10 @@ static void __init mxs_machine_init(void) crystalfontz_init(); of_platform_populate(NULL, of_default_bus_match_table, - mxs_auxdata_lookup, parent); + NULL, parent); if (of_machine_is_compatible("karo,tx28")) tx28_post_init(); - - if (of_machine_is_compatible("fsl,imx28-evk")) - imx28_evk_post_init(); } #define MX23_CLKCTRL_RESET_OFFSET 0x120 @@ -500,7 +453,7 @@ static void __init mxs_machine_init(void) /* * Reset the system. It is called by machine_restart(). */ -static void mxs_restart(char mode, const char *cmd) +static void mxs_restart(enum reboot_mode mode, const char *cmd) { struct device_node *np; void __iomem *reset_addr; diff --git a/arch/arm/mach-netx/generic.c b/arch/arm/mach-netx/generic.c index 1504b68f4c66..db25b0cef3a7 100644 --- a/arch/arm/mach-netx/generic.c +++ b/arch/arm/mach-netx/generic.c @@ -24,6 +24,7 @@ #include <linux/platform_device.h> #include <linux/io.h> #include <linux/irqchip/arm-vic.h> +#include <linux/reboot.h> #include <mach/hardware.h> #include <asm/mach/map.h> #include <mach/netx-regs.h> @@ -187,7 +188,7 @@ static int __init netx_init(void) subsys_initcall(netx_init); -void netx_restart(char mode, const char *cmd) +void netx_restart(enum reboot_mode mode, const char *cmd) { writel(NETX_SYSTEM_RES_CR_FIRMW_RES_EN | NETX_SYSTEM_RES_CR_FIRMW_RES, NETX_SYSTEM_RES_CR); diff --git a/arch/arm/mach-netx/generic.h b/arch/arm/mach-netx/generic.h index 768b26bbb42b..bb2ce471cc28 100644 --- a/arch/arm/mach-netx/generic.h +++ b/arch/arm/mach-netx/generic.h @@ -17,8 +17,10 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#include <linux/reboot.h> + extern void __init netx_map_io(void); extern void __init netx_init_irq(void); -extern void netx_restart(char, const char *); +extern void netx_restart(enum reboot_mode, const char *); extern void netx_timer_init(void); diff --git a/arch/arm/mach-nomadik/cpu-8815.c b/arch/arm/mach-nomadik/cpu-8815.c index 2df209ed1a07..13e0df9c11ce 100644 --- a/arch/arm/mach-nomadik/cpu-8815.c +++ b/arch/arm/mach-nomadik/cpu-8815.c @@ -103,7 +103,7 @@ static void __init cpu8815_map_io(void) iotable_init(cpu8815_io_desc, ARRAY_SIZE(cpu8815_io_desc)); } -static void cpu8815_restart(char mode, const char *cmd) +static void cpu8815_restart(enum reboot_mode mode, const char *cmd) { void __iomem *srcbase = ioremap(NOMADIK_SRC_BASE, SZ_4K); diff --git a/arch/arm/mach-omap1/board-voiceblue.c b/arch/arm/mach-omap1/board-voiceblue.c index 6c116e1a4b01..4677a9ccb3cb 100644 --- a/arch/arm/mach-omap1/board-voiceblue.c +++ b/arch/arm/mach-omap1/board-voiceblue.c @@ -26,6 +26,7 @@ #include <linux/serial_reg.h> #include <linux/smc91x.h> #include <linux/export.h> +#include <linux/reboot.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> @@ -215,7 +216,7 @@ void voiceblue_wdt_ping(void) gpio_set_value(0, wdt_gpio_state); } -static void voiceblue_restart(char mode, const char *cmd) +static void voiceblue_restart(enum reboot_mode mode, const char *cmd) { /* * Workaround for 5912/1611b bug mentioned in sprz209d.pdf p. 28 diff --git a/arch/arm/mach-omap1/common.h b/arch/arm/mach-omap1/common.h index 14f7e9920479..abec019a5281 100644 --- a/arch/arm/mach-omap1/common.h +++ b/arch/arm/mach-omap1/common.h @@ -28,6 +28,7 @@ #include <linux/mtd/mtd.h> #include <linux/i2c-omap.h> +#include <linux/reboot.h> #include <plat/i2c.h> @@ -70,7 +71,7 @@ static inline int omap_serial_wakeup_init(void) void omap1_init_early(void); void omap1_init_irq(void); void omap1_init_late(void); -void omap1_restart(char, const char *); +void omap1_restart(enum reboot_mode, const char *); extern void __init omap_check_revision(void); diff --git a/arch/arm/mach-omap1/reset.c b/arch/arm/mach-omap1/reset.c index 5eebd7e889d0..72bf4bf4a702 100644 --- a/arch/arm/mach-omap1/reset.c +++ b/arch/arm/mach-omap1/reset.c @@ -3,6 +3,7 @@ */ #include <linux/kernel.h> #include <linux/io.h> +#include <linux/reboot.h> #include <mach/hardware.h> @@ -22,7 +23,7 @@ #define OMAP_EXTWARM_RST_SRC_ID_SHIFT 5 -void omap1_restart(char mode, const char *cmd) +void omap1_restart(enum reboot_mode mode, const char *cmd) { /* * Workaround for 5912/1611b bug mentioned in sprz209d.pdf p. 28 diff --git a/arch/arm/mach-omap1/time.c b/arch/arm/mach-omap1/time.c index 726ec23d29c7..80603d2fef77 100644 --- a/arch/arm/mach-omap1/time.c +++ b/arch/arm/mach-omap1/time.c @@ -43,9 +43,9 @@ #include <linux/clocksource.h> #include <linux/clockchips.h> #include <linux/io.h> +#include <linux/sched_clock.h> #include <asm/irq.h> -#include <asm/sched_clock.h> #include <mach/hardware.h> #include <asm/mach/irq.h> diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index 1fdb46216590..c7b32a966f67 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig @@ -4,6 +4,7 @@ config ARCH_OMAP config ARCH_OMAP2PLUS bool "TI OMAP2/3/4/5 SoCs with device tree support" if (ARCH_MULTI_V6 || ARCH_MULTI_V7) select ARCH_HAS_CPUFREQ + select ARCH_HAS_BANDGAP select ARCH_HAS_HOLES_MEMORYMODEL select ARCH_OMAP select ARCH_REQUIRE_GPIOLIB diff --git a/arch/arm/mach-omap2/am33xx-restart.c b/arch/arm/mach-omap2/am33xx-restart.c index 88e4fa8af031..1eae96212315 100644 --- a/arch/arm/mach-omap2/am33xx-restart.c +++ b/arch/arm/mach-omap2/am33xx-restart.c @@ -6,6 +6,7 @@ * published by the Free Software Foundation. */ #include <linux/kernel.h> +#include <linux/reboot.h> #include "common.h" #include "prm-regbits-33xx.h" @@ -19,7 +20,7 @@ * Resets the SoC. For @cmd, see the 'reboot' syscall in * kernel/sys.c. No return value. */ -void am33xx_restart(char mode, const char *cmd) +void am33xx_restart(enum reboot_mode mode, const char *cmd) { /* TODO: Handle mode and cmd if necessary */ diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h index 72cab3f4f16d..dfcc182ecff9 100644 --- a/arch/arm/mach-omap2/common.h +++ b/arch/arm/mach-omap2/common.h @@ -31,6 +31,7 @@ #include <linux/i2c.h> #include <linux/i2c/twl.h> #include <linux/i2c-omap.h> +#include <linux/reboot.h> #include <asm/proc-fns.h> @@ -119,33 +120,33 @@ static inline void omap_soc_device_init(void) #endif #if defined(CONFIG_SOC_OMAP2420) || defined(CONFIG_SOC_OMAP2430) -void omap2xxx_restart(char mode, const char *cmd); +void omap2xxx_restart(enum reboot_mode mode, const char *cmd); #else -static inline void omap2xxx_restart(char mode, const char *cmd) +static inline void omap2xxx_restart(enum reboot_mode mode, const char *cmd) { } #endif #ifdef CONFIG_SOC_AM33XX -void am33xx_restart(char mode, const char *cmd); +void am33xx_restart(enum reboot_mode mode, const char *cmd); #else -static inline void am33xx_restart(char mode, const char *cmd) +static inline void am33xx_restart(enum reboot_mode mode, const char *cmd) { } #endif #ifdef CONFIG_ARCH_OMAP3 -void omap3xxx_restart(char mode, const char *cmd); +void omap3xxx_restart(enum reboot_mode mode, const char *cmd); #else -static inline void omap3xxx_restart(char mode, const char *cmd) +static inline void omap3xxx_restart(enum reboot_mode mode, const char *cmd) { } #endif #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) -void omap44xx_restart(char mode, const char *cmd); +void omap44xx_restart(enum reboot_mode mode, const char *cmd); #else -static inline void omap44xx_restart(char mode, const char *cmd) +static inline void omap44xx_restart(enum reboot_mode mode, const char *cmd) { } #endif diff --git a/arch/arm/mach-omap2/omap2-restart.c b/arch/arm/mach-omap2/omap2-restart.c index 719b716a4494..68423e26399d 100644 --- a/arch/arm/mach-omap2/omap2-restart.c +++ b/arch/arm/mach-omap2/omap2-restart.c @@ -31,7 +31,7 @@ static struct clk *reset_virt_prcm_set_ck, *reset_sys_ck; * Set the DPLL to bypass so that reboot completes successfully. No * return value. */ -void omap2xxx_restart(char mode, const char *cmd) +void omap2xxx_restart(enum reboot_mode mode, const char *cmd) { u32 rate; diff --git a/arch/arm/mach-omap2/omap3-restart.c b/arch/arm/mach-omap2/omap3-restart.c index 923c582189e5..5de2a0c2979d 100644 --- a/arch/arm/mach-omap2/omap3-restart.c +++ b/arch/arm/mach-omap2/omap3-restart.c @@ -12,6 +12,7 @@ */ #include <linux/kernel.h> #include <linux/init.h> +#include <linux/reboot.h> #include "iomap.h" #include "common.h" @@ -28,7 +29,7 @@ * Resets the SoC. For @cmd, see the 'reboot' syscall in * kernel/sys.c. No return value. */ -void omap3xxx_restart(char mode, const char *cmd) +void omap3xxx_restart(enum reboot_mode mode, const char *cmd) { omap3_ctrl_write_boot_mode((cmd ? (u8)*cmd : 0)); omap3xxx_prm_dpll3_reset(); /* never returns */ diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c index 38cd3a69cff3..57911430324e 100644 --- a/arch/arm/mach-omap2/omap4-common.c +++ b/arch/arm/mach-omap2/omap4-common.c @@ -23,6 +23,7 @@ #include <linux/export.h> #include <linux/irqchip/arm-gic.h> #include <linux/of_address.h> +#include <linux/reboot.h> #include <asm/hardware/cache-l2x0.h> #include <asm/mach/map.h> diff --git a/arch/arm/mach-omap2/omap4-restart.c b/arch/arm/mach-omap2/omap4-restart.c index f90e02e11898..41dfd7da8170 100644 --- a/arch/arm/mach-omap2/omap4-restart.c +++ b/arch/arm/mach-omap2/omap4-restart.c @@ -8,6 +8,7 @@ */ #include <linux/types.h> +#include <linux/reboot.h> #include "prminst44xx.h" /** @@ -18,7 +19,7 @@ * Resets the SoC. For @cmd, see the 'reboot' syscall in * kernel/sys.c. No return value. */ -void omap44xx_restart(char mode, const char *cmd) +void omap44xx_restart(enum reboot_mode mode, const char *cmd) { /* XXX Should save 'cmd' into scratchpad for use after reboot */ omap4_prminst_global_warm_sw_reset(); /* never returns */ diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c index 68be532f8688..5cc92874be7e 100644 --- a/arch/arm/mach-omap2/omap_device.c +++ b/arch/arm/mach-omap2/omap_device.c @@ -588,11 +588,6 @@ static int _od_runtime_suspend(struct device *dev) return ret; } -static int _od_runtime_idle(struct device *dev) -{ - return pm_generic_runtime_idle(dev); -} - static int _od_runtime_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); @@ -648,7 +643,7 @@ static int _od_resume_noirq(struct device *dev) struct dev_pm_domain omap_device_pm_domain = { .ops = { SET_RUNTIME_PM_OPS(_od_runtime_suspend, _od_runtime_resume, - _od_runtime_idle) + NULL) USE_PLATFORM_PM_SLEEP_OPS .suspend_noirq = _od_suspend_noirq, .resume_noirq = _od_resume_noirq, diff --git a/arch/arm/mach-omap2/smartreflex-class3.c b/arch/arm/mach-omap2/smartreflex-class3.c index aee3c8940a30..7a42e1960c3b 100644 --- a/arch/arm/mach-omap2/smartreflex-class3.c +++ b/arch/arm/mach-omap2/smartreflex-class3.c @@ -26,14 +26,14 @@ static int sr_class3_enable(struct omap_sr *sr) } omap_vp_enable(sr->voltdm); - return sr_enable(sr->voltdm, volt); + return sr_enable(sr, volt); } static int sr_class3_disable(struct omap_sr *sr, int is_volt_reset) { - sr_disable_errgen(sr->voltdm); + sr_disable_errgen(sr); omap_vp_disable(sr->voltdm); - sr_disable(sr->voltdm); + sr_disable(sr); if (is_volt_reset) voltdm_reset(sr->voltdm); @@ -42,7 +42,7 @@ static int sr_class3_disable(struct omap_sr *sr, int is_volt_reset) static int sr_class3_configure(struct omap_sr *sr) { - return sr_configure_errgen(sr->voltdm); + return sr_configure_errgen(sr); } /* SR class3 structure */ diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c index 3bdb0fb02028..29ac667b7a8b 100644 --- a/arch/arm/mach-omap2/timer.c +++ b/arch/arm/mach-omap2/timer.c @@ -41,10 +41,10 @@ #include <linux/of_irq.h> #include <linux/platform_device.h> #include <linux/platform_data/dmtimer-omap.h> +#include <linux/sched_clock.h> #include <asm/mach/time.h> #include <asm/smp_twd.h> -#include <asm/sched_clock.h> #include "omap_hwmod.h" #include "omap_device.h" diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c index f8a6db9239bf..b41599f98a8e 100644 --- a/arch/arm/mach-orion5x/common.c +++ b/arch/arm/mach-orion5x/common.c @@ -347,7 +347,7 @@ void __init orion5x_init(void) orion5x_wdt_init(); } -void orion5x_restart(char mode, const char *cmd) +void orion5x_restart(enum reboot_mode mode, const char *cmd) { /* * Enable and issue soft reset diff --git a/arch/arm/mach-orion5x/common.h b/arch/arm/mach-orion5x/common.h index cdaa01f3d186..a909afb384fb 100644 --- a/arch/arm/mach-orion5x/common.h +++ b/arch/arm/mach-orion5x/common.h @@ -1,6 +1,8 @@ #ifndef __ARCH_ORION5X_COMMON_H #define __ARCH_ORION5X_COMMON_H +#include <linux/reboot.h> + struct dsa_platform_data; struct mv643xx_eth_platform_data; struct mv_sata_platform_data; @@ -29,7 +31,7 @@ void orion5x_spi_init(void); void orion5x_uart0_init(void); void orion5x_uart1_init(void); void orion5x_xor_init(void); -void orion5x_restart(char, const char *); +void orion5x_restart(enum reboot_mode, const char *); /* * PCIe/PCI functions. diff --git a/arch/arm/mach-orion5x/ls-chl-setup.c b/arch/arm/mach-orion5x/ls-chl-setup.c index 24f4e14e5893..6234977b5aea 100644 --- a/arch/arm/mach-orion5x/ls-chl-setup.c +++ b/arch/arm/mach-orion5x/ls-chl-setup.c @@ -139,7 +139,7 @@ static struct mv_sata_platform_data lschl_sata_data = { static void lschl_power_off(void) { - orion5x_restart('h', NULL); + orion5x_restart(REBOOT_HARD, NULL); } /***************************************************************************** diff --git a/arch/arm/mach-orion5x/ls_hgl-setup.c b/arch/arm/mach-orion5x/ls_hgl-setup.c index fc653bb41e78..fe04c4b64569 100644 --- a/arch/arm/mach-orion5x/ls_hgl-setup.c +++ b/arch/arm/mach-orion5x/ls_hgl-setup.c @@ -185,7 +185,7 @@ static struct mv_sata_platform_data ls_hgl_sata_data = { static void ls_hgl_power_off(void) { - orion5x_restart('h', NULL); + orion5x_restart(REBOOT_HARD, NULL); } diff --git a/arch/arm/mach-orion5x/lsmini-setup.c b/arch/arm/mach-orion5x/lsmini-setup.c index 18e66e617dc2..ca4dbe973daf 100644 --- a/arch/arm/mach-orion5x/lsmini-setup.c +++ b/arch/arm/mach-orion5x/lsmini-setup.c @@ -185,7 +185,7 @@ static struct mv_sata_platform_data lsmini_sata_data = { static void lsmini_power_off(void) { - orion5x_restart('h', NULL); + orion5x_restart(REBOOT_HARD, NULL); } diff --git a/arch/arm/mach-picoxcell/common.c b/arch/arm/mach-picoxcell/common.c index b13f51bc35cf..ec79fea82704 100644 --- a/arch/arm/mach-picoxcell/common.c +++ b/arch/arm/mach-picoxcell/common.c @@ -11,6 +11,7 @@ #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_platform.h> +#include <linux/reboot.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> @@ -63,7 +64,7 @@ static const char *picoxcell_dt_match[] = { NULL }; -static void picoxcell_wdt_restart(char mode, const char *cmd) +static void picoxcell_wdt_restart(enum reboot_mode mode, const char *cmd) { /* * Configure the watchdog to reset with the shortest possible timeout diff --git a/arch/arm/mach-prima2/common.h b/arch/arm/mach-prima2/common.h index 81135cd88e54..a6304858474a 100644 --- a/arch/arm/mach-prima2/common.h +++ b/arch/arm/mach-prima2/common.h @@ -10,6 +10,8 @@ #define __MACH_PRIMA2_COMMON_H__ #include <linux/init.h> +#include <linux/reboot.h> + #include <asm/mach/time.h> #include <asm/exception.h> @@ -22,7 +24,7 @@ extern void sirfsoc_cpu_die(unsigned int cpu); extern void __init sirfsoc_of_irq_init(void); extern void __init sirfsoc_of_clk_init(void); -extern void sirfsoc_restart(char, const char *); +extern void sirfsoc_restart(enum reboot_mode, const char *); extern asmlinkage void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs); #ifndef CONFIG_DEBUG_LL diff --git a/arch/arm/mach-prima2/rstc.c b/arch/arm/mach-prima2/rstc.c index d5e0cbc934c0..ccb53391147a 100644 --- a/arch/arm/mach-prima2/rstc.c +++ b/arch/arm/mach-prima2/rstc.c @@ -13,6 +13,7 @@ #include <linux/device.h> #include <linux/of.h> #include <linux/of_address.h> +#include <linux/reboot.h> void __iomem *sirfsoc_rstc_base; static DEFINE_MUTEX(rstc_lock); @@ -84,7 +85,7 @@ int sirfsoc_reset_device(struct device *dev) #define SIRFSOC_SYS_RST_BIT BIT(31) -void sirfsoc_restart(char mode, const char *cmd) +void sirfsoc_restart(enum reboot_mode mode, const char *cmd) { writel(SIRFSOC_SYS_RST_BIT, sirfsoc_rstc_base); } diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig index 96100dbf5a2e..a8427115ee07 100644 --- a/arch/arm/mach-pxa/Kconfig +++ b/arch/arm/mach-pxa/Kconfig @@ -615,12 +615,14 @@ endmenu config PXA25x bool select CPU_XSCALE + select CPU_FREQ_TABLE if CPU_FREQ help Select code specific to PXA21x/25x/26x variants config PXA27x bool select CPU_XSCALE + select CPU_FREQ_TABLE if CPU_FREQ help Select code specific to PXA27x variants @@ -633,6 +635,7 @@ config CPU_PXA26x config PXA3xx bool select CPU_XSC3 + select CPU_FREQ_TABLE if CPU_FREQ help Select code specific to PXA3xx variants diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c index a5b8fead7d61..f162f1b77cd2 100644 --- a/arch/arm/mach-pxa/corgi.c +++ b/arch/arm/mach-pxa/corgi.c @@ -663,16 +663,16 @@ static void corgi_poweroff(void) /* Green LED off tells the bootloader to halt */ gpio_set_value(CORGI_GPIO_LED_GREEN, 0); - pxa_restart('h', NULL); + pxa_restart(REBOOT_HARD, NULL); } -static void corgi_restart(char mode, const char *cmd) +static void corgi_restart(enum reboot_mode mode, const char *cmd) { if (!machine_is_corgi()) /* Green LED on tells the bootloader to reboot */ gpio_set_value(CORGI_GPIO_LED_GREEN, 1); - pxa_restart('h', cmd); + pxa_restart(REBOOT_HARD, cmd); } static void __init corgi_init(void) diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c index 446563a7d1ad..f6726bb4eb95 100644 --- a/arch/arm/mach-pxa/em-x270.c +++ b/arch/arm/mach-pxa/em-x270.c @@ -833,21 +833,25 @@ static inline void em_x270_init_ac97(void) {} #endif #if defined(CONFIG_KEYBOARD_PXA27x) || defined(CONFIG_KEYBOARD_PXA27x_MODULE) -static unsigned int em_x270_module_matrix_keys[] = { +static const unsigned int em_x270_module_matrix_keys[] = { KEY(0, 0, KEY_A), KEY(1, 0, KEY_UP), KEY(2, 1, KEY_B), KEY(0, 2, KEY_LEFT), KEY(1, 1, KEY_ENTER), KEY(2, 0, KEY_RIGHT), KEY(0, 1, KEY_C), KEY(1, 2, KEY_DOWN), KEY(2, 2, KEY_D), }; +static struct matrix_keymap_data em_x270_matrix_keymap_data = { + .keymap = em_x270_module_matrix_keys, + .keymap_size = ARRAY_SIZE(em_x270_module_matrix_keys), +}; + struct pxa27x_keypad_platform_data em_x270_module_keypad_info = { /* code map for the matrix keys */ .matrix_key_rows = 3, .matrix_key_cols = 3, - .matrix_key_map = em_x270_module_matrix_keys, - .matrix_key_map_size = ARRAY_SIZE(em_x270_module_matrix_keys), + .matrix_keymap_data = &em_x270_matrix_keymap_data, }; -static unsigned int em_x270_exeda_matrix_keys[] = { +static const unsigned int em_x270_exeda_matrix_keys[] = { KEY(0, 0, KEY_RIGHTSHIFT), KEY(0, 1, KEY_RIGHTCTRL), KEY(0, 2, KEY_RIGHTALT), KEY(0, 3, KEY_SPACE), KEY(0, 4, KEY_LEFTALT), KEY(0, 5, KEY_LEFTCTRL), @@ -889,12 +893,16 @@ static unsigned int em_x270_exeda_matrix_keys[] = { KEY(7, 6, 0), KEY(7, 7, 0), }; +static struct matrix_keymap_data em_x270_exeda_matrix_keymap_data = { + .keymap = em_x270_exeda_matrix_keys, + .keymap_size = ARRAY_SIZE(em_x270_exeda_matrix_keys), +}; + struct pxa27x_keypad_platform_data em_x270_exeda_keypad_info = { /* code map for the matrix keys */ .matrix_key_rows = 8, .matrix_key_cols = 8, - .matrix_key_map = em_x270_exeda_matrix_keys, - .matrix_key_map_size = ARRAY_SIZE(em_x270_exeda_matrix_keys), + .matrix_keymap_data = &em_x270_exeda_matrix_keymap_data, }; static void __init em_x270_init_keypad(void) diff --git a/arch/arm/mach-pxa/ezx.c b/arch/arm/mach-pxa/ezx.c index dca10709be8f..fe2eb8394dff 100644 --- a/arch/arm/mach-pxa/ezx.c +++ b/arch/arm/mach-pxa/ezx.c @@ -392,7 +392,7 @@ static unsigned long e6_pin_config[] __initdata = { /* KEYPAD */ #ifdef CONFIG_MACH_EZX_A780 -static unsigned int a780_key_map[] = { +static const unsigned int a780_key_map[] = { KEY(0, 0, KEY_SEND), KEY(0, 1, KEY_BACK), KEY(0, 2, KEY_END), @@ -424,11 +424,15 @@ static unsigned int a780_key_map[] = { KEY(4, 4, KEY_DOWN), }; +static struct matrix_keymap_data a780_matrix_keymap_data = { + .keymap = a780_key_map, + .keymap_size = ARRAY_SIZE(a780_key_map), +}; + static struct pxa27x_keypad_platform_data a780_keypad_platform_data = { .matrix_key_rows = 5, .matrix_key_cols = 5, - .matrix_key_map = a780_key_map, - .matrix_key_map_size = ARRAY_SIZE(a780_key_map), + .matrix_keymap_data = &a780_matrix_keymap_data, .direct_key_map = { KEY_CAMERA }, .direct_key_num = 1, @@ -438,7 +442,7 @@ static struct pxa27x_keypad_platform_data a780_keypad_platform_data = { #endif /* CONFIG_MACH_EZX_A780 */ #ifdef CONFIG_MACH_EZX_E680 -static unsigned int e680_key_map[] = { +static const unsigned int e680_key_map[] = { KEY(0, 0, KEY_UP), KEY(0, 1, KEY_RIGHT), KEY(0, 2, KEY_RESERVED), @@ -455,11 +459,15 @@ static unsigned int e680_key_map[] = { KEY(2, 3, KEY_KPENTER), }; +static struct matrix_keymap_data e680_matrix_keymap_data = { + .keymap = e680_key_map, + .keymap_size = ARRAY_SIZE(e680_key_map), +}; + static struct pxa27x_keypad_platform_data e680_keypad_platform_data = { .matrix_key_rows = 3, .matrix_key_cols = 4, - .matrix_key_map = e680_key_map, - .matrix_key_map_size = ARRAY_SIZE(e680_key_map), + .matrix_keymap_data = &e680_matrix_keymap_data, .direct_key_map = { KEY_CAMERA, @@ -476,7 +484,7 @@ static struct pxa27x_keypad_platform_data e680_keypad_platform_data = { #endif /* CONFIG_MACH_EZX_E680 */ #ifdef CONFIG_MACH_EZX_A1200 -static unsigned int a1200_key_map[] = { +static const unsigned int a1200_key_map[] = { KEY(0, 0, KEY_RESERVED), KEY(0, 1, KEY_RIGHT), KEY(0, 2, KEY_PAGEDOWN), @@ -513,18 +521,22 @@ static unsigned int a1200_key_map[] = { KEY(4, 5, KEY_RESERVED), }; +static struct matrix_keymap_data a1200_matrix_keymap_data = { + .keymap = a1200_key_map, + .keymap_size = ARRAY_SIZE(a1200_key_map), +}; + static struct pxa27x_keypad_platform_data a1200_keypad_platform_data = { .matrix_key_rows = 5, .matrix_key_cols = 6, - .matrix_key_map = a1200_key_map, - .matrix_key_map_size = ARRAY_SIZE(a1200_key_map), + .matrix_keymap_data = &a1200_matrix_keymap_data, .debounce_interval = 30, }; #endif /* CONFIG_MACH_EZX_A1200 */ #ifdef CONFIG_MACH_EZX_E6 -static unsigned int e6_key_map[] = { +static const unsigned int e6_key_map[] = { KEY(0, 0, KEY_RESERVED), KEY(0, 1, KEY_RIGHT), KEY(0, 2, KEY_PAGEDOWN), @@ -561,18 +573,22 @@ static unsigned int e6_key_map[] = { KEY(4, 5, KEY_PREVIOUSSONG), }; +static struct matrix_keymap_data e6_keymap_data = { + .keymap = e6_key_map, + .keymap_size = ARRAY_SIZE(e6_key_map), +}; + static struct pxa27x_keypad_platform_data e6_keypad_platform_data = { .matrix_key_rows = 5, .matrix_key_cols = 6, - .matrix_key_map = e6_key_map, - .matrix_key_map_size = ARRAY_SIZE(e6_key_map), + .matrix_keymap_data = &e6_keymap_data, .debounce_interval = 30, }; #endif /* CONFIG_MACH_EZX_E6 */ #ifdef CONFIG_MACH_EZX_A910 -static unsigned int a910_key_map[] = { +static const unsigned int a910_key_map[] = { KEY(0, 0, KEY_NUMERIC_6), KEY(0, 1, KEY_RIGHT), KEY(0, 2, KEY_PAGEDOWN), @@ -609,18 +625,22 @@ static unsigned int a910_key_map[] = { KEY(4, 5, KEY_RESERVED), }; +static struct matrix_keymap_data a910_matrix_keymap_data = { + .keymap = a910_key_map, + .keymap_size = ARRAY_SIZE(a910_key_map), +}; + static struct pxa27x_keypad_platform_data a910_keypad_platform_data = { .matrix_key_rows = 5, .matrix_key_cols = 6, - .matrix_key_map = a910_key_map, - .matrix_key_map_size = ARRAY_SIZE(a910_key_map), + .matrix_keymap_data = &a910_matrix_keymap_data, .debounce_interval = 30, }; #endif /* CONFIG_MACH_EZX_A910 */ #ifdef CONFIG_MACH_EZX_E2 -static unsigned int e2_key_map[] = { +static const unsigned int e2_key_map[] = { KEY(0, 0, KEY_NUMERIC_6), KEY(0, 1, KEY_RIGHT), KEY(0, 2, KEY_NUMERIC_9), @@ -657,11 +677,15 @@ static unsigned int e2_key_map[] = { KEY(4, 5, KEY_RESERVED), }; +static struct matrix_keymap_data e2_matrix_keymap_data = { + .keymap = e2_key_map, + .keymap_size = ARRAY_SIZE(e2_key_map), +}; + static struct pxa27x_keypad_platform_data e2_keypad_platform_data = { .matrix_key_rows = 5, .matrix_key_cols = 6, - .matrix_key_map = e2_key_map, - .matrix_key_map_size = ARRAY_SIZE(e2_key_map), + .matrix_keymap_data = &e2_matrix_keymap_data, .debounce_interval = 30, }; diff --git a/arch/arm/mach-pxa/generic.h b/arch/arm/mach-pxa/generic.h index fd7ea39b78c0..8963984d1f43 100644 --- a/arch/arm/mach-pxa/generic.h +++ b/arch/arm/mach-pxa/generic.h @@ -9,6 +9,8 @@ * published by the Free Software Foundation. */ +#include <linux/reboot.h> + struct irq_data; extern void pxa_timer_init(void); @@ -56,4 +58,4 @@ void __init pxa_set_btuart_info(void *info); void __init pxa_set_stuart_info(void *info); void __init pxa_set_hwuart_info(void *info); -void pxa_restart(char, const char *); +void pxa_restart(enum reboot_mode, const char *); diff --git a/arch/arm/mach-pxa/littleton.c b/arch/arm/mach-pxa/littleton.c index e848c4607baf..5d665588c7eb 100644 --- a/arch/arm/mach-pxa/littleton.c +++ b/arch/arm/mach-pxa/littleton.c @@ -222,7 +222,7 @@ static inline void littleton_init_spi(void) {} #endif #if defined(CONFIG_KEYBOARD_PXA27x) || defined(CONFIG_KEYBOARD_PXA27x_MODULE) -static unsigned int littleton_matrix_key_map[] = { +static const unsigned int littleton_matrix_key_map[] = { /* KEY(row, col, key_code) */ KEY(1, 3, KEY_0), KEY(0, 0, KEY_1), KEY(1, 0, KEY_2), KEY(2, 0, KEY_3), KEY(0, 1, KEY_4), KEY(1, 1, KEY_5), KEY(2, 1, KEY_6), KEY(0, 2, KEY_7), @@ -249,11 +249,15 @@ static unsigned int littleton_matrix_key_map[] = { KEY(3, 1, KEY_F23), /* soft2 */ }; +static struct matrix_keymap_data littleton_matrix_keymap_data = { + .keymap = littleton_matrix_key_map, + .keymap_size = ARRAY_SIZE(littleton_matrix_key_map), +}; + static struct pxa27x_keypad_platform_data littleton_keypad_info = { .matrix_key_rows = 6, .matrix_key_cols = 5, - .matrix_key_map = littleton_matrix_key_map, - .matrix_key_map_size = ARRAY_SIZE(littleton_matrix_key_map), + .matrix_keymap_data = &littleton_matrix_keymap_data, .enable_rotary0 = 1, .rotary0_up_key = KEY_UP, diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c index 7a12c1ba90ff..d2c652318376 100644 --- a/arch/arm/mach-pxa/mainstone.c +++ b/arch/arm/mach-pxa/mainstone.c @@ -498,7 +498,7 @@ static struct pxaohci_platform_data mainstone_ohci_platform_data = { }; #if defined(CONFIG_KEYBOARD_PXA27x) || defined(CONFIG_KEYBOARD_PXA27x_MODULE) -static unsigned int mainstone_matrix_keys[] = { +static const unsigned int mainstone_matrix_keys[] = { KEY(0, 0, KEY_A), KEY(1, 0, KEY_B), KEY(2, 0, KEY_C), KEY(3, 0, KEY_D), KEY(4, 0, KEY_E), KEY(5, 0, KEY_F), KEY(0, 1, KEY_G), KEY(1, 1, KEY_H), KEY(2, 1, KEY_I), @@ -527,11 +527,15 @@ static unsigned int mainstone_matrix_keys[] = { KEY(4, 6, KEY_SELECT), }; +static struct matrix_keymap_data mainstone_matrix_keymap_data = { + .keymap = mainstone_matrix_keys, + .keymap_size = ARRAY_SIZE(mainstone_matrix_keys), +}; + struct pxa27x_keypad_platform_data mainstone_keypad_info = { .matrix_key_rows = 6, .matrix_key_cols = 7, - .matrix_key_map = mainstone_matrix_keys, - .matrix_key_map_size = ARRAY_SIZE(mainstone_matrix_keys), + .matrix_keymap_data = &mainstone_matrix_keymap_data, .enable_rotary0 = 1, .rotary0_up_key = KEY_UP, diff --git a/arch/arm/mach-pxa/mioa701.c b/arch/arm/mach-pxa/mioa701.c index f8979b943cbf..acc9d3cc0762 100644 --- a/arch/arm/mach-pxa/mioa701.c +++ b/arch/arm/mach-pxa/mioa701.c @@ -37,6 +37,7 @@ #include <linux/wm97xx.h> #include <linux/mtd/physmap.h> #include <linux/usb/gpio_vbus.h> +#include <linux/reboot.h> #include <linux/regulator/max1586.h> #include <linux/slab.h> #include <linux/i2c/pxa-i2c.h> @@ -222,7 +223,7 @@ static struct pxafb_mach_info mioa701_pxafb_info = { /* * Keyboard configuration */ -static unsigned int mioa701_matrix_keys[] = { +static const unsigned int mioa701_matrix_keys[] = { KEY(0, 0, KEY_UP), KEY(0, 1, KEY_RIGHT), KEY(0, 2, KEY_MEDIA), @@ -233,11 +234,16 @@ static unsigned int mioa701_matrix_keys[] = { KEY(2, 1, KEY_PHONE), /* Phone Green key */ KEY(2, 2, KEY_CAMERA) /* Camera key */ }; + +static struct matrix_keymap_data mioa701_matrix_keymap_data = { + .keymap = mioa701_matrix_keys, + .keymap_size = ARRAY_SIZE(mioa701_matrix_keys), +}; + static struct pxa27x_keypad_platform_data mioa701_keypad_info = { .matrix_key_rows = 3, .matrix_key_cols = 3, - .matrix_key_map = mioa701_matrix_keys, - .matrix_key_map_size = ARRAY_SIZE(mioa701_matrix_keys), + .matrix_keymap_data = &mioa701_matrix_keymap_data, }; /* @@ -691,13 +697,13 @@ static void mioa701_machine_exit(void); static void mioa701_poweroff(void) { mioa701_machine_exit(); - pxa_restart('s', NULL); + pxa_restart(REBOOT_SOFT, NULL); } -static void mioa701_restart(char c, const char *cmd) +static void mioa701_restart(enum reboot_mode c, const char *cmd) { mioa701_machine_exit(); - pxa_restart('s', cmd); + pxa_restart(REBOOT_SOFT, cmd); } static struct gpio global_gpios[] = { @@ -756,7 +762,6 @@ static void mioa701_machine_exit(void) MACHINE_START(MIOA701, "MIO A701") .atag_offset = 0x100, - .restart_mode = 's', .map_io = &pxa27x_map_io, .nr_irqs = PXA_NR_IRQS, .init_irq = &pxa27x_init_irq, diff --git a/arch/arm/mach-pxa/palmld.c b/arch/arm/mach-pxa/palmld.c index 909b713e5789..cf210b11ffcc 100644 --- a/arch/arm/mach-pxa/palmld.c +++ b/arch/arm/mach-pxa/palmld.c @@ -173,7 +173,7 @@ static inline void palmld_nor_init(void) {} * GPIO keyboard ******************************************************************************/ #if defined(CONFIG_KEYBOARD_PXA27x) || defined(CONFIG_KEYBOARD_PXA27x_MODULE) -static unsigned int palmld_matrix_keys[] = { +static const unsigned int palmld_matrix_keys[] = { KEY(0, 1, KEY_F2), KEY(0, 2, KEY_UP), @@ -190,11 +190,15 @@ static unsigned int palmld_matrix_keys[] = { KEY(3, 2, KEY_LEFT), }; +static struct matrix_keymap_data palmld_matrix_keymap_data = { + .keymap = palmld_matrix_keys, + .keymap_size = ARRAY_SIZE(palmld_matrix_keys), +}; + static struct pxa27x_keypad_platform_data palmld_keypad_platform_data = { .matrix_key_rows = 4, .matrix_key_cols = 3, - .matrix_key_map = palmld_matrix_keys, - .matrix_key_map_size = ARRAY_SIZE(palmld_matrix_keys), + .matrix_keymap_data = &palmld_matrix_keymap_data, .debounce_interval = 30, }; diff --git a/arch/arm/mach-pxa/palmt5.c b/arch/arm/mach-pxa/palmt5.c index 5033fd07968f..3ed9b029428b 100644 --- a/arch/arm/mach-pxa/palmt5.c +++ b/arch/arm/mach-pxa/palmt5.c @@ -108,7 +108,7 @@ static unsigned long palmt5_pin_config[] __initdata = { * GPIO keyboard ******************************************************************************/ #if defined(CONFIG_KEYBOARD_PXA27x) || defined(CONFIG_KEYBOARD_PXA27x_MODULE) -static unsigned int palmt5_matrix_keys[] = { +static const unsigned int palmt5_matrix_keys[] = { KEY(0, 0, KEY_POWER), KEY(0, 1, KEY_F1), KEY(0, 2, KEY_ENTER), @@ -124,11 +124,15 @@ static unsigned int palmt5_matrix_keys[] = { KEY(3, 2, KEY_LEFT), }; +static struct matrix_keymap_data palmt5_matrix_keymap_data = { + .keymap = palmt5_matrix_keys, + .keymap_size = ARRAY_SIZE(palmt5_matrix_keys), +}; + static struct pxa27x_keypad_platform_data palmt5_keypad_platform_data = { .matrix_key_rows = 4, .matrix_key_cols = 3, - .matrix_key_map = palmt5_matrix_keys, - .matrix_key_map_size = ARRAY_SIZE(palmt5_matrix_keys), + .matrix_keymap_data = &palmt5_matrix_keymap_data, .debounce_interval = 30, }; diff --git a/arch/arm/mach-pxa/palmtreo.c b/arch/arm/mach-pxa/palmtreo.c index d82a50b4a803..d8b937c870de 100644 --- a/arch/arm/mach-pxa/palmtreo.c +++ b/arch/arm/mach-pxa/palmtreo.c @@ -168,7 +168,7 @@ static unsigned long centro685_pin_config[] __initdata = { * GPIO keyboard ******************************************************************************/ #if IS_ENABLED(CONFIG_KEYBOARD_PXA27x) -static unsigned int treo680_matrix_keys[] = { +static const unsigned int treo680_matrix_keys[] = { KEY(0, 0, KEY_F8), /* Red/Off/Power */ KEY(0, 1, KEY_LEFT), KEY(0, 2, KEY_LEFTCTRL), /* Alternate */ @@ -227,7 +227,7 @@ static unsigned int treo680_matrix_keys[] = { KEY(7, 5, KEY_I), }; -static unsigned int centro_matrix_keys[] = { +static const unsigned int centro_matrix_keys[] = { KEY(0, 0, KEY_F9), /* Home */ KEY(0, 1, KEY_LEFT), KEY(0, 2, KEY_LEFTCTRL), /* Alternate */ @@ -286,11 +286,20 @@ static unsigned int centro_matrix_keys[] = { KEY(7, 5, KEY_I), }; +static struct matrix_keymap_data treo680_matrix_keymap_data = { + .keymap = treo680_matrix_keys, + .keymap_size = ARRAY_SIZE(treo680_matrix_keys), +}; + +static struct matrix_keymap_data centro_matrix_keymap_data = { + .keymap = centro_matrix_keys, + .keymap_size = ARRAY_SIZE(centro_matrix_keys), +}; + static struct pxa27x_keypad_platform_data treo680_keypad_pdata = { .matrix_key_rows = 8, .matrix_key_cols = 7, - .matrix_key_map = treo680_matrix_keys, - .matrix_key_map_size = ARRAY_SIZE(treo680_matrix_keys), + .matrix_keymap_data = &treo680_matrix_keymap_data, .direct_key_map = { KEY_CONNECT }, .direct_key_num = 1, @@ -301,10 +310,8 @@ static void __init palmtreo_kpc_init(void) { static struct pxa27x_keypad_platform_data *data = &treo680_keypad_pdata; - if (machine_is_centro()) { - data->matrix_key_map = centro_matrix_keys; - data->matrix_key_map_size = ARRAY_SIZE(centro_matrix_keys); - } + if (machine_is_centro()) + data->matrix_keymap_data = ¢ro_matrix_keymap_data; pxa_set_keypad_info(&treo680_keypad_pdata); } diff --git a/arch/arm/mach-pxa/palmtx.c b/arch/arm/mach-pxa/palmtx.c index 627c93a7364c..83f830dd8ad8 100644 --- a/arch/arm/mach-pxa/palmtx.c +++ b/arch/arm/mach-pxa/palmtx.c @@ -176,7 +176,7 @@ static inline void palmtx_nor_init(void) {} * GPIO keyboard ******************************************************************************/ #if defined(CONFIG_KEYBOARD_PXA27x) || defined(CONFIG_KEYBOARD_PXA27x_MODULE) -static unsigned int palmtx_matrix_keys[] = { +static const unsigned int palmtx_matrix_keys[] = { KEY(0, 0, KEY_POWER), KEY(0, 1, KEY_F1), KEY(0, 2, KEY_ENTER), @@ -192,11 +192,15 @@ static unsigned int palmtx_matrix_keys[] = { KEY(3, 2, KEY_LEFT), }; +static struct matrix_keymap_data palmtx_matrix_keymap_data = { + .keymap = palmtx_matrix_keys, + .keymap_size = ARRAY_SIZE(palmtx_matrix_keys), +}; + static struct pxa27x_keypad_platform_data palmtx_keypad_platform_data = { .matrix_key_rows = 4, .matrix_key_cols = 3, - .matrix_key_map = palmtx_matrix_keys, - .matrix_key_map_size = ARRAY_SIZE(palmtx_matrix_keys), + .matrix_keymap_data = &palmtx_matrix_keymap_data, .debounce_interval = 30, }; diff --git a/arch/arm/mach-pxa/palmz72.c b/arch/arm/mach-pxa/palmz72.c index 18b7fcd98592..1a35ddf218da 100644 --- a/arch/arm/mach-pxa/palmz72.c +++ b/arch/arm/mach-pxa/palmz72.c @@ -140,7 +140,7 @@ static unsigned long palmz72_pin_config[] __initdata = { * GPIO keyboard ******************************************************************************/ #if defined(CONFIG_KEYBOARD_PXA27x) || defined(CONFIG_KEYBOARD_PXA27x_MODULE) -static unsigned int palmz72_matrix_keys[] = { +static const unsigned int palmz72_matrix_keys[] = { KEY(0, 0, KEY_POWER), KEY(0, 1, KEY_F1), KEY(0, 2, KEY_ENTER), @@ -156,11 +156,15 @@ static unsigned int palmz72_matrix_keys[] = { KEY(3, 2, KEY_LEFT), }; +static struct matrix_keymap_data almz72_matrix_keymap_data = { + .keymap = palmz72_matrix_keys, + .keymap_size = ARRAY_SIZE(palmz72_matrix_keys), +}; + static struct pxa27x_keypad_platform_data palmz72_keypad_platform_data = { .matrix_key_rows = 4, .matrix_key_cols = 3, - .matrix_key_map = palmz72_matrix_keys, - .matrix_key_map_size = ARRAY_SIZE(palmz72_matrix_keys), + .matrix_keymap_data = &almz72_matrix_keymap_data, .debounce_interval = 30, }; diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c index 50ccd5f1d560..711d37e26bd8 100644 --- a/arch/arm/mach-pxa/poodle.c +++ b/arch/arm/mach-pxa/poodle.c @@ -422,7 +422,7 @@ static struct i2c_board_info __initdata poodle_i2c_devices[] = { static void poodle_poweroff(void) { - pxa_restart('h', NULL); + pxa_restart(REBOOT_HARD, NULL); } static void __init poodle_init(void) diff --git a/arch/arm/mach-pxa/reset.c b/arch/arm/mach-pxa/reset.c index 3fab583755d4..0d5dd646f61f 100644 --- a/arch/arm/mach-pxa/reset.c +++ b/arch/arm/mach-pxa/reset.c @@ -83,7 +83,7 @@ static void do_hw_reset(void) writel_relaxed(readl_relaxed(OSCR) + 368640, OSMR3); } -void pxa_restart(char mode, const char *cmd) +void pxa_restart(enum reboot_mode mode, const char *cmd) { local_irq_disable(); local_fiq_disable(); @@ -91,14 +91,14 @@ void pxa_restart(char mode, const char *cmd) clear_reset_status(RESET_STATUS_ALL); switch (mode) { - case 's': + case REBOOT_SOFT: /* Jump into ROM at address 0 */ soft_restart(0); break; - case 'g': + case REBOOT_GPIO: do_gpio_reset(); break; - case 'h': + case REBOOT_HARD: default: do_hw_reset(); break; diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c index 362726c49c70..2125df0444e7 100644 --- a/arch/arm/mach-pxa/spitz.c +++ b/arch/arm/mach-pxa/spitz.c @@ -31,6 +31,7 @@ #include <linux/regulator/machine.h> #include <linux/io.h> #include <linux/module.h> +#include <linux/reboot.h> #include <asm/setup.h> #include <asm/mach-types.h> @@ -924,10 +925,10 @@ static inline void spitz_i2c_init(void) {} ******************************************************************************/ static void spitz_poweroff(void) { - pxa_restart('g', NULL); + pxa_restart(REBOOT_GPIO, NULL); } -static void spitz_restart(char mode, const char *cmd) +static void spitz_restart(enum reboot_mode mode, const char *cmd) { uint32_t msc0 = __raw_readl(MSC0); /* Bootloader magic for a reboot */ @@ -979,7 +980,6 @@ static void __init spitz_fixup(struct tag *tags, char **cmdline, #ifdef CONFIG_MACH_SPITZ MACHINE_START(SPITZ, "SHARP Spitz") - .restart_mode = 'g', .fixup = spitz_fixup, .map_io = pxa27x_map_io, .nr_irqs = PXA_NR_IRQS, @@ -993,7 +993,6 @@ MACHINE_END #ifdef CONFIG_MACH_BORZOI MACHINE_START(BORZOI, "SHARP Borzoi") - .restart_mode = 'g', .fixup = spitz_fixup, .map_io = pxa27x_map_io, .nr_irqs = PXA_NR_IRQS, @@ -1007,7 +1006,6 @@ MACHINE_END #ifdef CONFIG_MACH_AKITA MACHINE_START(AKITA, "SHARP Akita") - .restart_mode = 'g', .fixup = spitz_fixup, .map_io = pxa27x_map_io, .nr_irqs = PXA_NR_IRQS, diff --git a/arch/arm/mach-pxa/tavorevb.c b/arch/arm/mach-pxa/tavorevb.c index f55979c09a5f..4680efe55345 100644 --- a/arch/arm/mach-pxa/tavorevb.c +++ b/arch/arm/mach-pxa/tavorevb.c @@ -106,7 +106,7 @@ static struct platform_device smc91x_device = { }; #if defined(CONFIG_KEYBOARD_PXA27x) || defined(CONFIG_KEYBOARD_PXA27x_MODULE) -static unsigned int tavorevb_matrix_key_map[] = { +static const unsigned int tavorevb_matrix_key_map[] = { /* KEY(row, col, key_code) */ KEY(0, 4, KEY_A), KEY(0, 5, KEY_B), KEY(0, 6, KEY_C), KEY(1, 4, KEY_E), KEY(1, 5, KEY_F), KEY(1, 6, KEY_G), @@ -147,11 +147,15 @@ static unsigned int tavorevb_matrix_key_map[] = { KEY(3, 3, KEY_F23), /* soft2 */ }; +static struct matrix_keymap_data tavorevb_matrix_keymap_data = { + .keymap = tavorevb_matrix_key_map, + .keymap_size = ARRAY_SIZE(tavorevb_matrix_key_map), +}; + static struct pxa27x_keypad_platform_data tavorevb_keypad_info = { .matrix_key_rows = 7, .matrix_key_cols = 7, - .matrix_key_map = tavorevb_matrix_key_map, - .matrix_key_map_size = ARRAY_SIZE(tavorevb_matrix_key_map), + .matrix_keymap_data = &tavorevb_matrix_keymap_data, .debounce_interval = 30, }; diff --git a/arch/arm/mach-pxa/time.c b/arch/arm/mach-pxa/time.c index 8f1ee92aea30..9aa852a8fab9 100644 --- a/arch/arm/mach-pxa/time.c +++ b/arch/arm/mach-pxa/time.c @@ -16,11 +16,11 @@ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/clockchips.h> +#include <linux/sched_clock.h> #include <asm/div64.h> #include <asm/mach/irq.h> #include <asm/mach/time.h> -#include <asm/sched_clock.h> #include <mach/regs-ost.h> #include <mach/irqs.h> diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c index 3d91d2e5bf3a..0206b915a6f6 100644 --- a/arch/arm/mach-pxa/tosa.c +++ b/arch/arm/mach-pxa/tosa.c @@ -36,6 +36,7 @@ #include <linux/input/matrix_keypad.h> #include <linux/i2c/pxa-i2c.h> #include <linux/usb/gpio_vbus.h> +#include <linux/reboot.h> #include <asm/setup.h> #include <asm/mach-types.h> @@ -911,10 +912,10 @@ static struct platform_device *devices[] __initdata = { static void tosa_poweroff(void) { - pxa_restart('g', NULL); + pxa_restart(REBOOT_GPIO, NULL); } -static void tosa_restart(char mode, const char *cmd) +static void tosa_restart(enum reboot_mode mode, const char *cmd) { uint32_t msc0 = __raw_readl(MSC0); @@ -969,7 +970,6 @@ static void __init fixup_tosa(struct tag *tags, char **cmdline, } MACHINE_START(TOSA, "SHARP Tosa") - .restart_mode = 'g', .fixup = fixup_tosa, .map_io = pxa25x_map_io, .nr_irqs = TOSA_NR_IRQS, diff --git a/arch/arm/mach-pxa/z2.c b/arch/arm/mach-pxa/z2.c index 989903a7e467..2513d8f4931f 100644 --- a/arch/arm/mach-pxa/z2.c +++ b/arch/arm/mach-pxa/z2.c @@ -345,7 +345,7 @@ static inline void z2_leds_init(void) {} * GPIO keyboard ******************************************************************************/ #if defined(CONFIG_KEYBOARD_PXA27x) || defined(CONFIG_KEYBOARD_PXA27x_MODULE) -static unsigned int z2_matrix_keys[] = { +static const unsigned int z2_matrix_keys[] = { KEY(0, 0, KEY_OPTION), KEY(1, 0, KEY_UP), KEY(2, 0, KEY_DOWN), @@ -405,11 +405,15 @@ static unsigned int z2_matrix_keys[] = { KEY(5, 7, KEY_DOT), }; +static struct matrix_keymap_data z2_matrix_keymap_data = { + .keymap = z2_matrix_keys, + .keymap_size = ARRAY_SIZE(z2_matrix_keys), +}; + static struct pxa27x_keypad_platform_data z2_keypad_platform_data = { .matrix_key_rows = 7, .matrix_key_cols = 8, - .matrix_key_map = z2_matrix_keys, - .matrix_key_map_size = ARRAY_SIZE(z2_matrix_keys), + .matrix_keymap_data = &z2_matrix_keymap_data, .debounce_interval = 30, }; diff --git a/arch/arm/mach-pxa/zylonite.c b/arch/arm/mach-pxa/zylonite.c index 1f00d650ac27..36cf7cf95ec1 100644 --- a/arch/arm/mach-pxa/zylonite.c +++ b/arch/arm/mach-pxa/zylonite.c @@ -263,7 +263,7 @@ static inline void zylonite_init_mmc(void) {} #endif #if defined(CONFIG_KEYBOARD_PXA27x) || defined(CONFIG_KEYBOARD_PXA27x_MODULE) -static unsigned int zylonite_matrix_key_map[] = { +static const unsigned int zylonite_matrix_key_map[] = { /* KEY(row, col, key_code) */ KEY(0, 0, KEY_A), KEY(0, 1, KEY_B), KEY(0, 2, KEY_C), KEY(0, 5, KEY_D), KEY(1, 0, KEY_E), KEY(1, 1, KEY_F), KEY(1, 2, KEY_G), KEY(1, 5, KEY_H), @@ -306,11 +306,15 @@ static unsigned int zylonite_matrix_key_map[] = { KEY(0, 3, KEY_AUX), /* contact */ }; +static struct matrix_keymap_data zylonite_matrix_keymap_data = { + .keymap = zylonite_matrix_key_map, + .keymap_size = ARRAY_SIZE(zylonite_matrix_key_map), +}; + static struct pxa27x_keypad_platform_data zylonite_keypad_info = { .matrix_key_rows = 8, .matrix_key_cols = 8, - .matrix_key_map = zylonite_matrix_key_map, - .matrix_key_map_size = ARRAY_SIZE(zylonite_matrix_key_map), + .matrix_keymap_data = &zylonite_matrix_keymap_data, .enable_rotary0 = 1, .rotary0_up_key = KEY_UP, diff --git a/arch/arm/mach-realview/realview_eb.c b/arch/arm/mach-realview/realview_eb.c index 5b1c8bfe6fa9..c85ddb2a0ad0 100644 --- a/arch/arm/mach-realview/realview_eb.c +++ b/arch/arm/mach-realview/realview_eb.c @@ -29,6 +29,7 @@ #include <linux/io.h> #include <linux/irqchip/arm-gic.h> #include <linux/platform_data/clk-realview.h> +#include <linux/reboot.h> #include <mach/hardware.h> #include <asm/irq.h> @@ -418,7 +419,7 @@ static void __init realview_eb_timer_init(void) realview_eb_twd_init(); } -static void realview_eb_restart(char mode, const char *cmd) +static void realview_eb_restart(enum reboot_mode mode, const char *cmd) { void __iomem *reset_ctrl = __io_address(REALVIEW_SYS_RESETCTL); void __iomem *lock_ctrl = __io_address(REALVIEW_SYS_LOCK); diff --git a/arch/arm/mach-realview/realview_pb1176.c b/arch/arm/mach-realview/realview_pb1176.c index d5e83a1f6982..c5eade76461b 100644 --- a/arch/arm/mach-realview/realview_pb1176.c +++ b/arch/arm/mach-realview/realview_pb1176.c @@ -31,6 +31,7 @@ #include <linux/io.h> #include <linux/irqchip/arm-gic.h> #include <linux/platform_data/clk-realview.h> +#include <linux/reboot.h> #include <mach/hardware.h> #include <asm/irq.h> @@ -329,7 +330,7 @@ static void __init realview_pb1176_timer_init(void) realview_timer_init(IRQ_DC1176_TIMER0); } -static void realview_pb1176_restart(char mode, const char *cmd) +static void realview_pb1176_restart(enum reboot_mode mode, const char *cmd) { void __iomem *reset_ctrl = __io_address(REALVIEW_SYS_RESETCTL); void __iomem *lock_ctrl = __io_address(REALVIEW_SYS_LOCK); diff --git a/arch/arm/mach-realview/realview_pb11mp.c b/arch/arm/mach-realview/realview_pb11mp.c index c3cfe213b5e6..f4b0962578fe 100644 --- a/arch/arm/mach-realview/realview_pb11mp.c +++ b/arch/arm/mach-realview/realview_pb11mp.c @@ -29,6 +29,7 @@ #include <linux/io.h> #include <linux/irqchip/arm-gic.h> #include <linux/platform_data/clk-realview.h> +#include <linux/reboot.h> #include <mach/hardware.h> #include <asm/irq.h> @@ -316,7 +317,7 @@ static void __init realview_pb11mp_timer_init(void) realview_pb11mp_twd_init(); } -static void realview_pb11mp_restart(char mode, const char *cmd) +static void realview_pb11mp_restart(enum reboot_mode mode, const char *cmd) { void __iomem *reset_ctrl = __io_address(REALVIEW_SYS_RESETCTL); void __iomem *lock_ctrl = __io_address(REALVIEW_SYS_LOCK); diff --git a/arch/arm/mach-realview/realview_pba8.c b/arch/arm/mach-realview/realview_pba8.c index dde652a59620..10a3e1d76891 100644 --- a/arch/arm/mach-realview/realview_pba8.c +++ b/arch/arm/mach-realview/realview_pba8.c @@ -29,6 +29,7 @@ #include <linux/io.h> #include <linux/irqchip/arm-gic.h> #include <linux/platform_data/clk-realview.h> +#include <linux/reboot.h> #include <asm/irq.h> #include <asm/mach-types.h> @@ -264,7 +265,7 @@ static void __init realview_pba8_timer_init(void) realview_timer_init(IRQ_PBA8_TIMER0_1); } -static void realview_pba8_restart(char mode, const char *cmd) +static void realview_pba8_restart(enum reboot_mode mode, const char *cmd) { void __iomem *reset_ctrl = __io_address(REALVIEW_SYS_RESETCTL); void __iomem *lock_ctrl = __io_address(REALVIEW_SYS_LOCK); diff --git a/arch/arm/mach-realview/realview_pbx.c b/arch/arm/mach-realview/realview_pbx.c index 54f0185b01e3..9d75493e3f0c 100644 --- a/arch/arm/mach-realview/realview_pbx.c +++ b/arch/arm/mach-realview/realview_pbx.c @@ -28,6 +28,7 @@ #include <linux/io.h> #include <linux/irqchip/arm-gic.h> #include <linux/platform_data/clk-realview.h> +#include <linux/reboot.h> #include <asm/irq.h> #include <asm/mach-types.h> @@ -344,7 +345,7 @@ static void realview_pbx_fixup(struct tag *tags, char **from, #endif } -static void realview_pbx_restart(char mode, const char *cmd) +static void realview_pbx_restart(enum reboot_mode mode, const char *cmd) { void __iomem *reset_ctrl = __io_address(REALVIEW_SYS_RESETCTL); void __iomem *lock_ctrl = __io_address(REALVIEW_SYS_LOCK); diff --git a/arch/arm/mach-rpc/riscpc.c b/arch/arm/mach-rpc/riscpc.c index a302cf5e0fc7..09d602b10d57 100644 --- a/arch/arm/mach-rpc/riscpc.c +++ b/arch/arm/mach-rpc/riscpc.c @@ -20,6 +20,7 @@ #include <linux/ata_platform.h> #include <linux/io.h> #include <linux/i2c.h> +#include <linux/reboot.h> #include <asm/elf.h> #include <asm/mach-types.h> @@ -201,7 +202,7 @@ static int __init rpc_init(void) arch_initcall(rpc_init); -static void rpc_restart(char mode, const char *cmd) +static void rpc_restart(enum reboot_mode mode, const char *cmd) { iomd_writeb(0, IOMD_ROMCR0); diff --git a/arch/arm/mach-s3c24xx/common.h b/arch/arm/mach-s3c24xx/common.h index 307c3714be55..84b280654f4c 100644 --- a/arch/arm/mach-s3c24xx/common.h +++ b/arch/arm/mach-s3c24xx/common.h @@ -12,6 +12,8 @@ #ifndef __ARCH_ARM_MACH_S3C24XX_COMMON_H #define __ARCH_ARM_MACH_S3C24XX_COMMON_H __FILE__ +#include <linux/reboot.h> + struct s3c2410_uartcfg; #ifdef CONFIG_CPU_S3C2410 @@ -20,7 +22,7 @@ extern int s3c2410a_init(void); extern void s3c2410_map_io(void); extern void s3c2410_init_uarts(struct s3c2410_uartcfg *cfg, int no); extern void s3c2410_init_clocks(int xtal); -extern void s3c2410_restart(char mode, const char *cmd); +extern void s3c2410_restart(enum reboot_mode mode, const char *cmd); extern void s3c2410_init_irq(void); #else #define s3c2410_init_clocks NULL @@ -36,7 +38,7 @@ extern void s3c2412_map_io(void); extern void s3c2412_init_uarts(struct s3c2410_uartcfg *cfg, int no); extern void s3c2412_init_clocks(int xtal); extern int s3c2412_baseclk_add(void); -extern void s3c2412_restart(char mode, const char *cmd); +extern void s3c2412_restart(enum reboot_mode mode, const char *cmd); extern void s3c2412_init_irq(void); #else #define s3c2412_init_clocks NULL @@ -51,7 +53,7 @@ extern void s3c2416_map_io(void); extern void s3c2416_init_uarts(struct s3c2410_uartcfg *cfg, int no); extern void s3c2416_init_clocks(int xtal); extern int s3c2416_baseclk_add(void); -extern void s3c2416_restart(char mode, const char *cmd); +extern void s3c2416_restart(enum reboot_mode mode, const char *cmd); extern void s3c2416_init_irq(void); extern struct syscore_ops s3c2416_irq_syscore_ops; @@ -66,7 +68,7 @@ extern struct syscore_ops s3c2416_irq_syscore_ops; extern void s3c244x_map_io(void); extern void s3c244x_init_uarts(struct s3c2410_uartcfg *cfg, int no); extern void s3c244x_init_clocks(int xtal); -extern void s3c244x_restart(char mode, const char *cmd); +extern void s3c244x_restart(enum reboot_mode mode, const char *cmd); #else #define s3c244x_init_clocks NULL #define s3c244x_init_uarts NULL @@ -96,7 +98,7 @@ extern void s3c2443_map_io(void); extern void s3c2443_init_uarts(struct s3c2410_uartcfg *cfg, int no); extern void s3c2443_init_clocks(int xtal); extern int s3c2443_baseclk_add(void); -extern void s3c2443_restart(char mode, const char *cmd); +extern void s3c2443_restart(enum reboot_mode mode, const char *cmd); extern void s3c2443_init_irq(void); #else #define s3c2443_init_clocks NULL diff --git a/arch/arm/mach-s3c24xx/cpufreq-utils.c b/arch/arm/mach-s3c24xx/cpufreq-utils.c index ddd8280e3875..2a0aa5684e72 100644 --- a/arch/arm/mach-s3c24xx/cpufreq-utils.c +++ b/arch/arm/mach-s3c24xx/cpufreq-utils.c @@ -60,5 +60,5 @@ void s3c2410_cpufreq_setrefresh(struct s3c_cpufreq_config *cfg) */ void s3c2410_set_fvco(struct s3c_cpufreq_config *cfg) { - __raw_writel(cfg->pll.index, S3C2410_MPLLCON); + __raw_writel(cfg->pll.driver_data, S3C2410_MPLLCON); } diff --git a/arch/arm/mach-s3c24xx/pll-s3c2410.c b/arch/arm/mach-s3c24xx/pll-s3c2410.c index dcf3420a3271..5e37d368594b 100644 --- a/arch/arm/mach-s3c24xx/pll-s3c2410.c +++ b/arch/arm/mach-s3c24xx/pll-s3c2410.c @@ -33,36 +33,36 @@ #include <plat/cpu-freq-core.h> static struct cpufreq_frequency_table pll_vals_12MHz[] = { - { .frequency = 34000000, .index = PLLVAL(82, 2, 3), }, - { .frequency = 45000000, .index = PLLVAL(82, 1, 3), }, - { .frequency = 51000000, .index = PLLVAL(161, 3, 3), }, - { .frequency = 48000000, .index = PLLVAL(120, 2, 3), }, - { .frequency = 56000000, .index = PLLVAL(142, 2, 3), }, - { .frequency = 68000000, .index = PLLVAL(82, 2, 2), }, - { .frequency = 79000000, .index = PLLVAL(71, 1, 2), }, - { .frequency = 85000000, .index = PLLVAL(105, 2, 2), }, - { .frequency = 90000000, .index = PLLVAL(112, 2, 2), }, - { .frequency = 101000000, .index = PLLVAL(127, 2, 2), }, - { .frequency = 113000000, .index = PLLVAL(105, 1, 2), }, - { .frequency = 118000000, .index = PLLVAL(150, 2, 2), }, - { .frequency = 124000000, .index = PLLVAL(116, 1, 2), }, - { .frequency = 135000000, .index = PLLVAL(82, 2, 1), }, - { .frequency = 147000000, .index = PLLVAL(90, 2, 1), }, - { .frequency = 152000000, .index = PLLVAL(68, 1, 1), }, - { .frequency = 158000000, .index = PLLVAL(71, 1, 1), }, - { .frequency = 170000000, .index = PLLVAL(77, 1, 1), }, - { .frequency = 180000000, .index = PLLVAL(82, 1, 1), }, - { .frequency = 186000000, .index = PLLVAL(85, 1, 1), }, - { .frequency = 192000000, .index = PLLVAL(88, 1, 1), }, - { .frequency = 203000000, .index = PLLVAL(161, 3, 1), }, + { .frequency = 34000000, .driver_data = PLLVAL(82, 2, 3), }, + { .frequency = 45000000, .driver_data = PLLVAL(82, 1, 3), }, + { .frequency = 51000000, .driver_data = PLLVAL(161, 3, 3), }, + { .frequency = 48000000, .driver_data = PLLVAL(120, 2, 3), }, + { .frequency = 56000000, .driver_data = PLLVAL(142, 2, 3), }, + { .frequency = 68000000, .driver_data = PLLVAL(82, 2, 2), }, + { .frequency = 79000000, .driver_data = PLLVAL(71, 1, 2), }, + { .frequency = 85000000, .driver_data = PLLVAL(105, 2, 2), }, + { .frequency = 90000000, .driver_data = PLLVAL(112, 2, 2), }, + { .frequency = 101000000, .driver_data = PLLVAL(127, 2, 2), }, + { .frequency = 113000000, .driver_data = PLLVAL(105, 1, 2), }, + { .frequency = 118000000, .driver_data = PLLVAL(150, 2, 2), }, + { .frequency = 124000000, .driver_data = PLLVAL(116, 1, 2), }, + { .frequency = 135000000, .driver_data = PLLVAL(82, 2, 1), }, + { .frequency = 147000000, .driver_data = PLLVAL(90, 2, 1), }, + { .frequency = 152000000, .driver_data = PLLVAL(68, 1, 1), }, + { .frequency = 158000000, .driver_data = PLLVAL(71, 1, 1), }, + { .frequency = 170000000, .driver_data = PLLVAL(77, 1, 1), }, + { .frequency = 180000000, .driver_data = PLLVAL(82, 1, 1), }, + { .frequency = 186000000, .driver_data = PLLVAL(85, 1, 1), }, + { .frequency = 192000000, .driver_data = PLLVAL(88, 1, 1), }, + { .frequency = 203000000, .driver_data = PLLVAL(161, 3, 1), }, /* 2410A extras */ - { .frequency = 210000000, .index = PLLVAL(132, 2, 1), }, - { .frequency = 226000000, .index = PLLVAL(105, 1, 1), }, - { .frequency = 266000000, .index = PLLVAL(125, 1, 1), }, - { .frequency = 268000000, .index = PLLVAL(126, 1, 1), }, - { .frequency = 270000000, .index = PLLVAL(127, 1, 1), }, + { .frequency = 210000000, .driver_data = PLLVAL(132, 2, 1), }, + { .frequency = 226000000, .driver_data = PLLVAL(105, 1, 1), }, + { .frequency = 266000000, .driver_data = PLLVAL(125, 1, 1), }, + { .frequency = 268000000, .driver_data = PLLVAL(126, 1, 1), }, + { .frequency = 270000000, .driver_data = PLLVAL(127, 1, 1), }, }; static int s3c2410_plls_add(struct device *dev, struct subsys_interface *sif) diff --git a/arch/arm/mach-s3c24xx/pll-s3c2440-12000000.c b/arch/arm/mach-s3c24xx/pll-s3c2440-12000000.c index 673781758319..a19460e6e7b0 100644 --- a/arch/arm/mach-s3c24xx/pll-s3c2440-12000000.c +++ b/arch/arm/mach-s3c24xx/pll-s3c2440-12000000.c @@ -21,33 +21,33 @@ #include <plat/cpu-freq-core.h> static struct cpufreq_frequency_table s3c2440_plls_12[] __initdata = { - { .frequency = 75000000, .index = PLLVAL(0x75, 3, 3), }, /* FVco 600.000000 */ - { .frequency = 80000000, .index = PLLVAL(0x98, 4, 3), }, /* FVco 640.000000 */ - { .frequency = 90000000, .index = PLLVAL(0x70, 2, 3), }, /* FVco 720.000000 */ - { .frequency = 100000000, .index = PLLVAL(0x5c, 1, 3), }, /* FVco 800.000000 */ - { .frequency = 110000000, .index = PLLVAL(0x66, 1, 3), }, /* FVco 880.000000 */ - { .frequency = 120000000, .index = PLLVAL(0x70, 1, 3), }, /* FVco 960.000000 */ - { .frequency = 150000000, .index = PLLVAL(0x75, 3, 2), }, /* FVco 600.000000 */ - { .frequency = 160000000, .index = PLLVAL(0x98, 4, 2), }, /* FVco 640.000000 */ - { .frequency = 170000000, .index = PLLVAL(0x4d, 1, 2), }, /* FVco 680.000000 */ - { .frequency = 180000000, .index = PLLVAL(0x70, 2, 2), }, /* FVco 720.000000 */ - { .frequency = 190000000, .index = PLLVAL(0x57, 1, 2), }, /* FVco 760.000000 */ - { .frequency = 200000000, .index = PLLVAL(0x5c, 1, 2), }, /* FVco 800.000000 */ - { .frequency = 210000000, .index = PLLVAL(0x84, 2, 2), }, /* FVco 840.000000 */ - { .frequency = 220000000, .index = PLLVAL(0x66, 1, 2), }, /* FVco 880.000000 */ - { .frequency = 230000000, .index = PLLVAL(0x6b, 1, 2), }, /* FVco 920.000000 */ - { .frequency = 240000000, .index = PLLVAL(0x70, 1, 2), }, /* FVco 960.000000 */ - { .frequency = 300000000, .index = PLLVAL(0x75, 3, 1), }, /* FVco 600.000000 */ - { .frequency = 310000000, .index = PLLVAL(0x93, 4, 1), }, /* FVco 620.000000 */ - { .frequency = 320000000, .index = PLLVAL(0x98, 4, 1), }, /* FVco 640.000000 */ - { .frequency = 330000000, .index = PLLVAL(0x66, 2, 1), }, /* FVco 660.000000 */ - { .frequency = 340000000, .index = PLLVAL(0x4d, 1, 1), }, /* FVco 680.000000 */ - { .frequency = 350000000, .index = PLLVAL(0xa7, 4, 1), }, /* FVco 700.000000 */ - { .frequency = 360000000, .index = PLLVAL(0x70, 2, 1), }, /* FVco 720.000000 */ - { .frequency = 370000000, .index = PLLVAL(0xb1, 4, 1), }, /* FVco 740.000000 */ - { .frequency = 380000000, .index = PLLVAL(0x57, 1, 1), }, /* FVco 760.000000 */ - { .frequency = 390000000, .index = PLLVAL(0x7a, 2, 1), }, /* FVco 780.000000 */ - { .frequency = 400000000, .index = PLLVAL(0x5c, 1, 1), }, /* FVco 800.000000 */ + { .frequency = 75000000, .driver_data = PLLVAL(0x75, 3, 3), }, /* FVco 600.000000 */ + { .frequency = 80000000, .driver_data = PLLVAL(0x98, 4, 3), }, /* FVco 640.000000 */ + { .frequency = 90000000, .driver_data = PLLVAL(0x70, 2, 3), }, /* FVco 720.000000 */ + { .frequency = 100000000, .driver_data = PLLVAL(0x5c, 1, 3), }, /* FVco 800.000000 */ + { .frequency = 110000000, .driver_data = PLLVAL(0x66, 1, 3), }, /* FVco 880.000000 */ + { .frequency = 120000000, .driver_data = PLLVAL(0x70, 1, 3), }, /* FVco 960.000000 */ + { .frequency = 150000000, .driver_data = PLLVAL(0x75, 3, 2), }, /* FVco 600.000000 */ + { .frequency = 160000000, .driver_data = PLLVAL(0x98, 4, 2), }, /* FVco 640.000000 */ + { .frequency = 170000000, .driver_data = PLLVAL(0x4d, 1, 2), }, /* FVco 680.000000 */ + { .frequency = 180000000, .driver_data = PLLVAL(0x70, 2, 2), }, /* FVco 720.000000 */ + { .frequency = 190000000, .driver_data = PLLVAL(0x57, 1, 2), }, /* FVco 760.000000 */ + { .frequency = 200000000, .driver_data = PLLVAL(0x5c, 1, 2), }, /* FVco 800.000000 */ + { .frequency = 210000000, .driver_data = PLLVAL(0x84, 2, 2), }, /* FVco 840.000000 */ + { .frequency = 220000000, .driver_data = PLLVAL(0x66, 1, 2), }, /* FVco 880.000000 */ + { .frequency = 230000000, .driver_data = PLLVAL(0x6b, 1, 2), }, /* FVco 920.000000 */ + { .frequency = 240000000, .driver_data = PLLVAL(0x70, 1, 2), }, /* FVco 960.000000 */ + { .frequency = 300000000, .driver_data = PLLVAL(0x75, 3, 1), }, /* FVco 600.000000 */ + { .frequency = 310000000, .driver_data = PLLVAL(0x93, 4, 1), }, /* FVco 620.000000 */ + { .frequency = 320000000, .driver_data = PLLVAL(0x98, 4, 1), }, /* FVco 640.000000 */ + { .frequency = 330000000, .driver_data = PLLVAL(0x66, 2, 1), }, /* FVco 660.000000 */ + { .frequency = 340000000, .driver_data = PLLVAL(0x4d, 1, 1), }, /* FVco 680.000000 */ + { .frequency = 350000000, .driver_data = PLLVAL(0xa7, 4, 1), }, /* FVco 700.000000 */ + { .frequency = 360000000, .driver_data = PLLVAL(0x70, 2, 1), }, /* FVco 720.000000 */ + { .frequency = 370000000, .driver_data = PLLVAL(0xb1, 4, 1), }, /* FVco 740.000000 */ + { .frequency = 380000000, .driver_data = PLLVAL(0x57, 1, 1), }, /* FVco 760.000000 */ + { .frequency = 390000000, .driver_data = PLLVAL(0x7a, 2, 1), }, /* FVco 780.000000 */ + { .frequency = 400000000, .driver_data = PLLVAL(0x5c, 1, 1), }, /* FVco 800.000000 */ }; static int s3c2440_plls12_add(struct device *dev, struct subsys_interface *sif) diff --git a/arch/arm/mach-s3c24xx/pll-s3c2440-16934400.c b/arch/arm/mach-s3c24xx/pll-s3c2440-16934400.c index debfa106289b..1191b2905625 100644 --- a/arch/arm/mach-s3c24xx/pll-s3c2440-16934400.c +++ b/arch/arm/mach-s3c24xx/pll-s3c2440-16934400.c @@ -21,61 +21,61 @@ #include <plat/cpu-freq-core.h> static struct cpufreq_frequency_table s3c2440_plls_169344[] __initdata = { - { .frequency = 78019200, .index = PLLVAL(121, 5, 3), }, /* FVco 624.153600 */ - { .frequency = 84067200, .index = PLLVAL(131, 5, 3), }, /* FVco 672.537600 */ - { .frequency = 90115200, .index = PLLVAL(141, 5, 3), }, /* FVco 720.921600 */ - { .frequency = 96163200, .index = PLLVAL(151, 5, 3), }, /* FVco 769.305600 */ - { .frequency = 102135600, .index = PLLVAL(185, 6, 3), }, /* FVco 817.084800 */ - { .frequency = 108259200, .index = PLLVAL(171, 5, 3), }, /* FVco 866.073600 */ - { .frequency = 114307200, .index = PLLVAL(127, 3, 3), }, /* FVco 914.457600 */ - { .frequency = 120234240, .index = PLLVAL(134, 3, 3), }, /* FVco 961.873920 */ - { .frequency = 126161280, .index = PLLVAL(141, 3, 3), }, /* FVco 1009.290240 */ - { .frequency = 132088320, .index = PLLVAL(148, 3, 3), }, /* FVco 1056.706560 */ - { .frequency = 138015360, .index = PLLVAL(155, 3, 3), }, /* FVco 1104.122880 */ - { .frequency = 144789120, .index = PLLVAL(163, 3, 3), }, /* FVco 1158.312960 */ - { .frequency = 150100363, .index = PLLVAL(187, 9, 2), }, /* FVco 600.401454 */ - { .frequency = 156038400, .index = PLLVAL(121, 5, 2), }, /* FVco 624.153600 */ - { .frequency = 162086400, .index = PLLVAL(126, 5, 2), }, /* FVco 648.345600 */ - { .frequency = 168134400, .index = PLLVAL(131, 5, 2), }, /* FVco 672.537600 */ - { .frequency = 174048000, .index = PLLVAL(177, 7, 2), }, /* FVco 696.192000 */ - { .frequency = 180230400, .index = PLLVAL(141, 5, 2), }, /* FVco 720.921600 */ - { .frequency = 186278400, .index = PLLVAL(124, 4, 2), }, /* FVco 745.113600 */ - { .frequency = 192326400, .index = PLLVAL(151, 5, 2), }, /* FVco 769.305600 */ - { .frequency = 198132480, .index = PLLVAL(109, 3, 2), }, /* FVco 792.529920 */ - { .frequency = 204271200, .index = PLLVAL(185, 6, 2), }, /* FVco 817.084800 */ - { .frequency = 210268800, .index = PLLVAL(141, 4, 2), }, /* FVco 841.075200 */ - { .frequency = 216518400, .index = PLLVAL(171, 5, 2), }, /* FVco 866.073600 */ - { .frequency = 222264000, .index = PLLVAL(97, 2, 2), }, /* FVco 889.056000 */ - { .frequency = 228614400, .index = PLLVAL(127, 3, 2), }, /* FVco 914.457600 */ - { .frequency = 234259200, .index = PLLVAL(158, 4, 2), }, /* FVco 937.036800 */ - { .frequency = 240468480, .index = PLLVAL(134, 3, 2), }, /* FVco 961.873920 */ - { .frequency = 246960000, .index = PLLVAL(167, 4, 2), }, /* FVco 987.840000 */ - { .frequency = 252322560, .index = PLLVAL(141, 3, 2), }, /* FVco 1009.290240 */ - { .frequency = 258249600, .index = PLLVAL(114, 2, 2), }, /* FVco 1032.998400 */ - { .frequency = 264176640, .index = PLLVAL(148, 3, 2), }, /* FVco 1056.706560 */ - { .frequency = 270950400, .index = PLLVAL(120, 2, 2), }, /* FVco 1083.801600 */ - { .frequency = 276030720, .index = PLLVAL(155, 3, 2), }, /* FVco 1104.122880 */ - { .frequency = 282240000, .index = PLLVAL(92, 1, 2), }, /* FVco 1128.960000 */ - { .frequency = 289578240, .index = PLLVAL(163, 3, 2), }, /* FVco 1158.312960 */ - { .frequency = 294235200, .index = PLLVAL(131, 2, 2), }, /* FVco 1176.940800 */ - { .frequency = 300200727, .index = PLLVAL(187, 9, 1), }, /* FVco 600.401454 */ - { .frequency = 306358690, .index = PLLVAL(191, 9, 1), }, /* FVco 612.717380 */ - { .frequency = 312076800, .index = PLLVAL(121, 5, 1), }, /* FVco 624.153600 */ - { .frequency = 318366720, .index = PLLVAL(86, 3, 1), }, /* FVco 636.733440 */ - { .frequency = 324172800, .index = PLLVAL(126, 5, 1), }, /* FVco 648.345600 */ - { .frequency = 330220800, .index = PLLVAL(109, 4, 1), }, /* FVco 660.441600 */ - { .frequency = 336268800, .index = PLLVAL(131, 5, 1), }, /* FVco 672.537600 */ - { .frequency = 342074880, .index = PLLVAL(93, 3, 1), }, /* FVco 684.149760 */ - { .frequency = 348096000, .index = PLLVAL(177, 7, 1), }, /* FVco 696.192000 */ - { .frequency = 355622400, .index = PLLVAL(118, 4, 1), }, /* FVco 711.244800 */ - { .frequency = 360460800, .index = PLLVAL(141, 5, 1), }, /* FVco 720.921600 */ - { .frequency = 366206400, .index = PLLVAL(165, 6, 1), }, /* FVco 732.412800 */ - { .frequency = 372556800, .index = PLLVAL(124, 4, 1), }, /* FVco 745.113600 */ - { .frequency = 378201600, .index = PLLVAL(126, 4, 1), }, /* FVco 756.403200 */ - { .frequency = 384652800, .index = PLLVAL(151, 5, 1), }, /* FVco 769.305600 */ - { .frequency = 391608000, .index = PLLVAL(177, 6, 1), }, /* FVco 783.216000 */ - { .frequency = 396264960, .index = PLLVAL(109, 3, 1), }, /* FVco 792.529920 */ - { .frequency = 402192000, .index = PLLVAL(87, 2, 1), }, /* FVco 804.384000 */ + { .frequency = 78019200, .driver_data = PLLVAL(121, 5, 3), }, /* FVco 624.153600 */ + { .frequency = 84067200, .driver_data = PLLVAL(131, 5, 3), }, /* FVco 672.537600 */ + { .frequency = 90115200, .driver_data = PLLVAL(141, 5, 3), }, /* FVco 720.921600 */ + { .frequency = 96163200, .driver_data = PLLVAL(151, 5, 3), }, /* FVco 769.305600 */ + { .frequency = 102135600, .driver_data = PLLVAL(185, 6, 3), }, /* FVco 817.084800 */ + { .frequency = 108259200, .driver_data = PLLVAL(171, 5, 3), }, /* FVco 866.073600 */ + { .frequency = 114307200, .driver_data = PLLVAL(127, 3, 3), }, /* FVco 914.457600 */ + { .frequency = 120234240, .driver_data = PLLVAL(134, 3, 3), }, /* FVco 961.873920 */ + { .frequency = 126161280, .driver_data = PLLVAL(141, 3, 3), }, /* FVco 1009.290240 */ + { .frequency = 132088320, .driver_data = PLLVAL(148, 3, 3), }, /* FVco 1056.706560 */ + { .frequency = 138015360, .driver_data = PLLVAL(155, 3, 3), }, /* FVco 1104.122880 */ + { .frequency = 144789120, .driver_data = PLLVAL(163, 3, 3), }, /* FVco 1158.312960 */ + { .frequency = 150100363, .driver_data = PLLVAL(187, 9, 2), }, /* FVco 600.401454 */ + { .frequency = 156038400, .driver_data = PLLVAL(121, 5, 2), }, /* FVco 624.153600 */ + { .frequency = 162086400, .driver_data = PLLVAL(126, 5, 2), }, /* FVco 648.345600 */ + { .frequency = 168134400, .driver_data = PLLVAL(131, 5, 2), }, /* FVco 672.537600 */ + { .frequency = 174048000, .driver_data = PLLVAL(177, 7, 2), }, /* FVco 696.192000 */ + { .frequency = 180230400, .driver_data = PLLVAL(141, 5, 2), }, /* FVco 720.921600 */ + { .frequency = 186278400, .driver_data = PLLVAL(124, 4, 2), }, /* FVco 745.113600 */ + { .frequency = 192326400, .driver_data = PLLVAL(151, 5, 2), }, /* FVco 769.305600 */ + { .frequency = 198132480, .driver_data = PLLVAL(109, 3, 2), }, /* FVco 792.529920 */ + { .frequency = 204271200, .driver_data = PLLVAL(185, 6, 2), }, /* FVco 817.084800 */ + { .frequency = 210268800, .driver_data = PLLVAL(141, 4, 2), }, /* FVco 841.075200 */ + { .frequency = 216518400, .driver_data = PLLVAL(171, 5, 2), }, /* FVco 866.073600 */ + { .frequency = 222264000, .driver_data = PLLVAL(97, 2, 2), }, /* FVco 889.056000 */ + { .frequency = 228614400, .driver_data = PLLVAL(127, 3, 2), }, /* FVco 914.457600 */ + { .frequency = 234259200, .driver_data = PLLVAL(158, 4, 2), }, /* FVco 937.036800 */ + { .frequency = 240468480, .driver_data = PLLVAL(134, 3, 2), }, /* FVco 961.873920 */ + { .frequency = 246960000, .driver_data = PLLVAL(167, 4, 2), }, /* FVco 987.840000 */ + { .frequency = 252322560, .driver_data = PLLVAL(141, 3, 2), }, /* FVco 1009.290240 */ + { .frequency = 258249600, .driver_data = PLLVAL(114, 2, 2), }, /* FVco 1032.998400 */ + { .frequency = 264176640, .driver_data = PLLVAL(148, 3, 2), }, /* FVco 1056.706560 */ + { .frequency = 270950400, .driver_data = PLLVAL(120, 2, 2), }, /* FVco 1083.801600 */ + { .frequency = 276030720, .driver_data = PLLVAL(155, 3, 2), }, /* FVco 1104.122880 */ + { .frequency = 282240000, .driver_data = PLLVAL(92, 1, 2), }, /* FVco 1128.960000 */ + { .frequency = 289578240, .driver_data = PLLVAL(163, 3, 2), }, /* FVco 1158.312960 */ + { .frequency = 294235200, .driver_data = PLLVAL(131, 2, 2), }, /* FVco 1176.940800 */ + { .frequency = 300200727, .driver_data = PLLVAL(187, 9, 1), }, /* FVco 600.401454 */ + { .frequency = 306358690, .driver_data = PLLVAL(191, 9, 1), }, /* FVco 612.717380 */ + { .frequency = 312076800, .driver_data = PLLVAL(121, 5, 1), }, /* FVco 624.153600 */ + { .frequency = 318366720, .driver_data = PLLVAL(86, 3, 1), }, /* FVco 636.733440 */ + { .frequency = 324172800, .driver_data = PLLVAL(126, 5, 1), }, /* FVco 648.345600 */ + { .frequency = 330220800, .driver_data = PLLVAL(109, 4, 1), }, /* FVco 660.441600 */ + { .frequency = 336268800, .driver_data = PLLVAL(131, 5, 1), }, /* FVco 672.537600 */ + { .frequency = 342074880, .driver_data = PLLVAL(93, 3, 1), }, /* FVco 684.149760 */ + { .frequency = 348096000, .driver_data = PLLVAL(177, 7, 1), }, /* FVco 696.192000 */ + { .frequency = 355622400, .driver_data = PLLVAL(118, 4, 1), }, /* FVco 711.244800 */ + { .frequency = 360460800, .driver_data = PLLVAL(141, 5, 1), }, /* FVco 720.921600 */ + { .frequency = 366206400, .driver_data = PLLVAL(165, 6, 1), }, /* FVco 732.412800 */ + { .frequency = 372556800, .driver_data = PLLVAL(124, 4, 1), }, /* FVco 745.113600 */ + { .frequency = 378201600, .driver_data = PLLVAL(126, 4, 1), }, /* FVco 756.403200 */ + { .frequency = 384652800, .driver_data = PLLVAL(151, 5, 1), }, /* FVco 769.305600 */ + { .frequency = 391608000, .driver_data = PLLVAL(177, 6, 1), }, /* FVco 783.216000 */ + { .frequency = 396264960, .driver_data = PLLVAL(109, 3, 1), }, /* FVco 792.529920 */ + { .frequency = 402192000, .driver_data = PLLVAL(87, 2, 1), }, /* FVco 804.384000 */ }; static int s3c2440_plls169344_add(struct device *dev, diff --git a/arch/arm/mach-s3c24xx/s3c2410.c b/arch/arm/mach-s3c24xx/s3c2410.c index ff384acc65b2..34676d1d5fec 100644 --- a/arch/arm/mach-s3c24xx/s3c2410.c +++ b/arch/arm/mach-s3c24xx/s3c2410.c @@ -22,6 +22,7 @@ #include <linux/syscore_ops.h> #include <linux/serial_core.h> #include <linux/platform_device.h> +#include <linux/reboot.h> #include <linux/io.h> #include <asm/mach/arch.h> @@ -196,9 +197,9 @@ int __init s3c2410a_init(void) return s3c2410_init(); } -void s3c2410_restart(char mode, const char *cmd) +void s3c2410_restart(enum reboot_mode mode, const char *cmd) { - if (mode == 's') { + if (mode == REBOOT_SOFT) { soft_restart(0); } diff --git a/arch/arm/mach-s3c24xx/s3c2412.c b/arch/arm/mach-s3c24xx/s3c2412.c index 0f864d4c97de..0251650cbf80 100644 --- a/arch/arm/mach-s3c24xx/s3c2412.c +++ b/arch/arm/mach-s3c24xx/s3c2412.c @@ -22,6 +22,7 @@ #include <linux/serial_core.h> #include <linux/platform_device.h> #include <linux/io.h> +#include <linux/reboot.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> @@ -129,9 +130,9 @@ static void s3c2412_idle(void) cpu_do_idle(); } -void s3c2412_restart(char mode, const char *cmd) +void s3c2412_restart(enum reboot_mode mode, const char *cmd) { - if (mode == 's') + if (mode == REBOOT_SOFT) soft_restart(0); /* errata "Watch-dog/Software Reset Problem" specifies that diff --git a/arch/arm/mach-s3c24xx/s3c2416.c b/arch/arm/mach-s3c24xx/s3c2416.c index b9c5d382dafb..9ef3ccfbe196 100644 --- a/arch/arm/mach-s3c24xx/s3c2416.c +++ b/arch/arm/mach-s3c24xx/s3c2416.c @@ -35,6 +35,7 @@ #include <linux/syscore_ops.h> #include <linux/clk.h> #include <linux/io.h> +#include <linux/reboot.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> @@ -79,9 +80,9 @@ static struct device s3c2416_dev = { .bus = &s3c2416_subsys, }; -void s3c2416_restart(char mode, const char *cmd) +void s3c2416_restart(enum reboot_mode mode, const char *cmd) { - if (mode == 's') + if (mode == REBOOT_SOFT) soft_restart(0); __raw_writel(S3C2443_SWRST_RESET, S3C2443_SWRST); diff --git a/arch/arm/mach-s3c24xx/s3c2443.c b/arch/arm/mach-s3c24xx/s3c2443.c index 8328cd65bf3d..b6c71918b25c 100644 --- a/arch/arm/mach-s3c24xx/s3c2443.c +++ b/arch/arm/mach-s3c24xx/s3c2443.c @@ -22,6 +22,7 @@ #include <linux/device.h> #include <linux/clk.h> #include <linux/io.h> +#include <linux/reboot.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> @@ -59,9 +60,9 @@ static struct device s3c2443_dev = { .bus = &s3c2443_subsys, }; -void s3c2443_restart(char mode, const char *cmd) +void s3c2443_restart(enum reboot_mode mode, const char *cmd) { - if (mode == 's') + if (mode == REBOOT_SOFT) soft_restart(0); __raw_writel(S3C2443_SWRST_RESET, S3C2443_SWRST); diff --git a/arch/arm/mach-s3c24xx/s3c244x.c b/arch/arm/mach-s3c24xx/s3c244x.c index d0423e2544c1..911b555029fc 100644 --- a/arch/arm/mach-s3c24xx/s3c244x.c +++ b/arch/arm/mach-s3c24xx/s3c244x.c @@ -18,6 +18,7 @@ #include <linux/init.h> #include <linux/serial_core.h> #include <linux/platform_device.h> +#include <linux/reboot.h> #include <linux/device.h> #include <linux/syscore_ops.h> #include <linux/clk.h> @@ -198,9 +199,9 @@ struct syscore_ops s3c244x_pm_syscore_ops = { .resume = s3c244x_resume, }; -void s3c244x_restart(char mode, const char *cmd) +void s3c244x_restart(enum reboot_mode mode, const char *cmd) { - if (mode == 's') + if (mode == REBOOT_SOFT) soft_restart(0); samsung_wdt_reset(); diff --git a/arch/arm/mach-s3c64xx/common.c b/arch/arm/mach-s3c64xx/common.c index 1aed6f4be1ce..3f62e467b129 100644 --- a/arch/arm/mach-s3c64xx/common.c +++ b/arch/arm/mach-s3c64xx/common.c @@ -21,6 +21,7 @@ #include <linux/ioport.h> #include <linux/serial_core.h> #include <linux/platform_device.h> +#include <linux/reboot.h> #include <linux/io.h> #include <linux/dma-mapping.h> #include <linux/irq.h> @@ -381,9 +382,9 @@ static int __init s3c64xx_init_irq_eint(void) } arch_initcall(s3c64xx_init_irq_eint); -void s3c64xx_restart(char mode, const char *cmd) +void s3c64xx_restart(enum reboot_mode mode, const char *cmd) { - if (mode != 's') + if (mode != REBOOT_SOFT) samsung_wdt_reset(); /* if all else fails, or mode was for soft, jump to 0 */ diff --git a/arch/arm/mach-s3c64xx/common.h b/arch/arm/mach-s3c64xx/common.h index 6cfc99bdfb37..e8f990b37665 100644 --- a/arch/arm/mach-s3c64xx/common.h +++ b/arch/arm/mach-s3c64xx/common.h @@ -17,13 +17,15 @@ #ifndef __ARCH_ARM_MACH_S3C64XX_COMMON_H #define __ARCH_ARM_MACH_S3C64XX_COMMON_H +#include <linux/reboot.h> + void s3c64xx_init_irq(u32 vic0, u32 vic1); void s3c64xx_init_io(struct map_desc *mach_desc, int size); void s3c64xx_register_clocks(unsigned long xtal, unsigned armclk_limit); void s3c64xx_setup_clocks(void); -void s3c64xx_restart(char mode, const char *cmd); +void s3c64xx_restart(enum reboot_mode mode, const char *cmd); void s3c64xx_init_late(void); #ifdef CONFIG_CPU_S3C6400 diff --git a/arch/arm/mach-s5p64x0/common.c b/arch/arm/mach-s5p64x0/common.c index 76d0053bf564..dfdfdc320ce7 100644 --- a/arch/arm/mach-s5p64x0/common.c +++ b/arch/arm/mach-s5p64x0/common.c @@ -24,6 +24,7 @@ #include <linux/dma-mapping.h> #include <linux/gpio.h> #include <linux/irq.h> +#include <linux/reboot.h> #include <asm/irq.h> #include <asm/proc-fns.h> @@ -439,9 +440,9 @@ static int __init s5p64x0_init_irq_eint(void) } arch_initcall(s5p64x0_init_irq_eint); -void s5p64x0_restart(char mode, const char *cmd) +void s5p64x0_restart(enum reboot_mode mode, const char *cmd) { - if (mode != 's') + if (mode != REBOOT_SOFT) samsung_wdt_reset(); soft_restart(0); diff --git a/arch/arm/mach-s5p64x0/common.h b/arch/arm/mach-s5p64x0/common.h index f8a60fdc5884..f3a9b43cba4a 100644 --- a/arch/arm/mach-s5p64x0/common.h +++ b/arch/arm/mach-s5p64x0/common.h @@ -12,6 +12,8 @@ #ifndef __ARCH_ARM_MACH_S5P64X0_COMMON_H #define __ARCH_ARM_MACH_S5P64X0_COMMON_H +#include <linux/reboot.h> + void s5p6440_init_irq(void); void s5p6450_init_irq(void); void s5p64x0_init_io(struct map_desc *mach_desc, int size); @@ -22,7 +24,7 @@ void s5p6440_setup_clocks(void); void s5p6450_register_clocks(void); void s5p6450_setup_clocks(void); -void s5p64x0_restart(char mode, const char *cmd); +void s5p64x0_restart(enum reboot_mode mode, const char *cmd); #ifdef CONFIG_CPU_S5P6440 diff --git a/arch/arm/mach-s5pc100/common.c b/arch/arm/mach-s5pc100/common.c index 511031564d35..4bdfecf6d024 100644 --- a/arch/arm/mach-s5pc100/common.c +++ b/arch/arm/mach-s5pc100/common.c @@ -24,6 +24,7 @@ #include <linux/serial_core.h> #include <linux/platform_device.h> #include <linux/sched.h> +#include <linux/reboot.h> #include <asm/irq.h> #include <asm/proc-fns.h> @@ -217,9 +218,9 @@ void __init s5pc100_init_uarts(struct s3c2410_uartcfg *cfg, int no) s3c24xx_init_uartdevs("s3c6400-uart", s5p_uart_resources, cfg, no); } -void s5pc100_restart(char mode, const char *cmd) +void s5pc100_restart(enum reboot_mode mode, const char *cmd) { - if (mode != 's') + if (mode != REBOOT_SOFT) samsung_wdt_reset(); soft_restart(0); diff --git a/arch/arm/mach-s5pc100/common.h b/arch/arm/mach-s5pc100/common.h index c41f912e9e1f..08d782d65d7b 100644 --- a/arch/arm/mach-s5pc100/common.h +++ b/arch/arm/mach-s5pc100/common.h @@ -12,13 +12,15 @@ #ifndef __ARCH_ARM_MACH_S5PC100_COMMON_H #define __ARCH_ARM_MACH_S5PC100_COMMON_H +#include <linux/reboot.h> + void s5pc100_init_io(struct map_desc *mach_desc, int size); void s5pc100_init_irq(void); void s5pc100_register_clocks(void); void s5pc100_setup_clocks(void); -void s5pc100_restart(char mode, const char *cmd); +void s5pc100_restart(enum reboot_mode mode, const char *cmd); extern int s5pc100_init(void); extern void s5pc100_map_io(void); diff --git a/arch/arm/mach-s5pv210/common.c b/arch/arm/mach-s5pv210/common.c index 9dfe93e2624d..023f1a796a9c 100644 --- a/arch/arm/mach-s5pv210/common.c +++ b/arch/arm/mach-s5pv210/common.c @@ -143,7 +143,7 @@ static struct map_desc s5pv210_iodesc[] __initdata = { } }; -void s5pv210_restart(char mode, const char *cmd) +void s5pv210_restart(enum reboot_mode mode, const char *cmd) { __raw_writel(0x1, S5P_SWRESET); } diff --git a/arch/arm/mach-s5pv210/common.h b/arch/arm/mach-s5pv210/common.h index 0a1cc0aef720..fe1beb54e548 100644 --- a/arch/arm/mach-s5pv210/common.h +++ b/arch/arm/mach-s5pv210/common.h @@ -12,13 +12,15 @@ #ifndef __ARCH_ARM_MACH_S5PV210_COMMON_H #define __ARCH_ARM_MACH_S5PV210_COMMON_H +#include <linux/reboot.h> + void s5pv210_init_io(struct map_desc *mach_desc, int size); void s5pv210_init_irq(void); void s5pv210_register_clocks(void); void s5pv210_setup_clocks(void); -void s5pv210_restart(char mode, const char *cmd); +void s5pv210_restart(enum reboot_mode mode, const char *cmd); extern int s5pv210_init(void); extern void s5pv210_map_io(void); diff --git a/arch/arm/mach-s5pv210/mach-aquila.c b/arch/arm/mach-s5pv210/mach-aquila.c index ed2b85485b9d..ad40ab0f5dbd 100644 --- a/arch/arm/mach-s5pv210/mach-aquila.c +++ b/arch/arm/mach-s5pv210/mach-aquila.c @@ -377,12 +377,8 @@ static struct max8998_platform_data aquila_max8998_pdata = { .buck1_set1 = S5PV210_GPH0(3), .buck1_set2 = S5PV210_GPH0(4), .buck2_set3 = S5PV210_GPH0(5), - .buck1_voltage1 = 1200000, - .buck1_voltage2 = 1200000, - .buck1_voltage3 = 1200000, - .buck1_voltage4 = 1200000, - .buck2_voltage1 = 1200000, - .buck2_voltage2 = 1200000, + .buck1_voltage = { 1200000, 1200000, 1200000, 1200000 }, + .buck2_voltage = { 1200000, 1200000 }, }; #endif diff --git a/arch/arm/mach-s5pv210/mach-goni.c b/arch/arm/mach-s5pv210/mach-goni.c index 30b24ad84f49..e5cd9fbf19e9 100644 --- a/arch/arm/mach-s5pv210/mach-goni.c +++ b/arch/arm/mach-s5pv210/mach-goni.c @@ -580,12 +580,8 @@ static struct max8998_platform_data goni_max8998_pdata = { .buck1_set1 = S5PV210_GPH0(3), .buck1_set2 = S5PV210_GPH0(4), .buck2_set3 = S5PV210_GPH0(5), - .buck1_voltage1 = 1200000, - .buck1_voltage2 = 1200000, - .buck1_voltage3 = 1200000, - .buck1_voltage4 = 1200000, - .buck2_voltage1 = 1200000, - .buck2_voltage2 = 1200000, + .buck1_voltage = { 1200000, 1200000, 1200000, 1200000 }, + .buck2_voltage = { 1200000, 1200000 }, }; #endif diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c index 9db3e98e8b85..f25b6119e028 100644 --- a/arch/arm/mach-sa1100/generic.c +++ b/arch/arm/mach-sa1100/generic.c @@ -19,6 +19,7 @@ #include <linux/cpufreq.h> #include <linux/ioport.h> #include <linux/platform_device.h> +#include <linux/reboot.h> #include <video/sa1100fb.h> @@ -131,9 +132,9 @@ static void sa1100_power_off(void) PMCR = PMCR_SF; } -void sa11x0_restart(char mode, const char *cmd) +void sa11x0_restart(enum reboot_mode mode, const char *cmd) { - if (mode == 's') { + if (mode == REBOOT_SOFT) { /* Jump into ROM at address 0 */ soft_restart(0); } else { diff --git a/arch/arm/mach-sa1100/generic.h b/arch/arm/mach-sa1100/generic.h index 2abc6a1f6e86..9a33695c9492 100644 --- a/arch/arm/mach-sa1100/generic.h +++ b/arch/arm/mach-sa1100/generic.h @@ -3,12 +3,13 @@ * * Author: Nicolas Pitre */ +#include <linux/reboot.h> extern void sa1100_timer_init(void); extern void __init sa1100_map_io(void); extern void __init sa1100_init_irq(void); extern void __init sa1100_init_gpio(void); -extern void sa11x0_restart(char, const char *); +extern void sa11x0_restart(enum reboot_mode, const char *); extern void sa11x0_init_late(void); #define SET_BANK(__nr,__start,__size) \ diff --git a/arch/arm/mach-sa1100/time.c b/arch/arm/mach-sa1100/time.c index a59a13a665a6..713c86cd3d64 100644 --- a/arch/arm/mach-sa1100/time.c +++ b/arch/arm/mach-sa1100/time.c @@ -14,9 +14,9 @@ #include <linux/irq.h> #include <linux/timex.h> #include <linux/clockchips.h> +#include <linux/sched_clock.h> #include <asm/mach/time.h> -#include <asm/sched_clock.h> #include <mach/hardware.h> #include <mach/irqs.h> diff --git a/arch/arm/mach-shark/core.c b/arch/arm/mach-shark/core.c index 153555724988..1d32c5e8eab6 100644 --- a/arch/arm/mach-shark/core.c +++ b/arch/arm/mach-shark/core.c @@ -11,6 +11,7 @@ #include <linux/serial_8250.h> #include <linux/io.h> #include <linux/cpu.h> +#include <linux/reboot.h> #include <asm/setup.h> #include <asm/mach-types.h> @@ -24,7 +25,7 @@ #define ROMCARD_SIZE 0x08000000 #define ROMCARD_START 0x10000000 -static void shark_restart(char mode, const char *cmd) +static void shark_restart(enum reboot_mode mode, const char *cmd) { short temp; /* Reset the Machine via pc[3] of the sequoia chipset */ diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig index db27e8eef192..3912ce91fee4 100644 --- a/arch/arm/mach-shmobile/Kconfig +++ b/arch/arm/mach-shmobile/Kconfig @@ -23,7 +23,7 @@ config ARCH_R8A73A4 select ARCH_WANT_OPTIONAL_GPIOLIB select ARM_GIC select CPU_V7 - select ARM_ARCH_TIMER + select HAVE_ARM_ARCH_TIMER select SH_CLK_CPG select RENESAS_IRQC @@ -59,7 +59,7 @@ config ARCH_R8A7790 select ARCH_WANT_OPTIONAL_GPIOLIB select ARM_GIC select CPU_V7 - select ARM_ARCH_TIMER + select HAVE_ARM_ARCH_TIMER select SH_CLK_CPG select RENESAS_IRQC diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c index 44a621505eeb..e115f6742107 100644 --- a/arch/arm/mach-shmobile/board-armadillo800eva.c +++ b/arch/arm/mach-shmobile/board-armadillo800eva.c @@ -42,6 +42,7 @@ #include <linux/mmc/sh_mmcif.h> #include <linux/mmc/sh_mobile_sdhi.h> #include <linux/i2c-gpio.h> +#include <linux/reboot.h> #include <mach/common.h> #include <mach/irqs.h> #include <mach/r8a7740.h> @@ -377,7 +378,7 @@ static struct resource sh_eth_resources[] = { }; static struct platform_device sh_eth_device = { - .name = "sh-eth", + .name = "r8a7740-gether", .id = -1, .dev = { .platform_data = &sh_eth_platdata, @@ -1259,7 +1260,7 @@ static void __init eva_add_early_devices(void) } #define RESCNT2 IOMEM(0xe6188020) -static void eva_restart(char mode, const char *cmd) +static void eva_restart(enum reboot_mode mode, const char *cmd) { /* Do soft power on reset */ writel((1 << 31), RESCNT2); diff --git a/arch/arm/mach-shmobile/board-kzm9g.c b/arch/arm/mach-shmobile/board-kzm9g.c index 165483c9bee2..1068120d339f 100644 --- a/arch/arm/mach-shmobile/board-kzm9g.c +++ b/arch/arm/mach-shmobile/board-kzm9g.c @@ -34,6 +34,7 @@ #include <linux/pinctrl/machine.h> #include <linux/pinctrl/pinconf-generic.h> #include <linux/platform_device.h> +#include <linux/reboot.h> #include <linux/regulator/fixed.h> #include <linux/regulator/machine.h> #include <linux/smsc911x.h> @@ -890,7 +891,7 @@ static void __init kzm_init(void) sh73a0_pm_init(); } -static void kzm9g_restart(char mode, const char *cmd) +static void kzm9g_restart(enum reboot_mode mode, const char *cmd) { #define RESCNT2 IOMEM(0xe6188020) /* Do soft power on reset */ diff --git a/arch/arm/mach-shmobile/clock-r8a7740.c b/arch/arm/mach-shmobile/clock-r8a7740.c index 7fd32d604e34..de10fd78bf2b 100644 --- a/arch/arm/mach-shmobile/clock-r8a7740.c +++ b/arch/arm/mach-shmobile/clock-r8a7740.c @@ -594,7 +594,7 @@ static struct clk_lookup lookups[] = { CLKDEV_DEV_ID("e6860000.sdhi", &mstp_clks[MSTP313]), CLKDEV_DEV_ID("sh_mmcif", &mstp_clks[MSTP312]), CLKDEV_DEV_ID("e6bd0000.mmcif", &mstp_clks[MSTP312]), - CLKDEV_DEV_ID("sh-eth", &mstp_clks[MSTP309]), + CLKDEV_DEV_ID("r8a7740-gether", &mstp_clks[MSTP309]), CLKDEV_DEV_ID("e9a00000.sh-eth", &mstp_clks[MSTP309]), CLKDEV_DEV_ID("renesas_tpu_pwm", &mstp_clks[MSTP304]), diff --git a/arch/arm/mach-shmobile/clock-r8a7778.c b/arch/arm/mach-shmobile/clock-r8a7778.c index 53798e5037d7..a0e9eb72e46d 100644 --- a/arch/arm/mach-shmobile/clock-r8a7778.c +++ b/arch/arm/mach-shmobile/clock-r8a7778.c @@ -145,7 +145,7 @@ static struct clk_lookup lookups[] = { CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP323]), /* SDHI0 */ CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP322]), /* SDHI1 */ CLKDEV_DEV_ID("sh_mobile_sdhi.2", &mstp_clks[MSTP321]), /* SDHI2 */ - CLKDEV_DEV_ID("sh-eth", &mstp_clks[MSTP114]), /* Ether */ + CLKDEV_DEV_ID("r8a777x-ether", &mstp_clks[MSTP114]), /* Ether */ CLKDEV_DEV_ID("ehci-platform", &mstp_clks[MSTP100]), /* USB EHCI port0/1 */ CLKDEV_DEV_ID("ohci-platform", &mstp_clks[MSTP100]), /* USB OHCI port0/1 */ CLKDEV_DEV_ID("i2c-rcar.0", &mstp_clks[MSTP030]), /* I2C0 */ diff --git a/arch/arm/mach-shmobile/clock-r8a7779.c b/arch/arm/mach-shmobile/clock-r8a7779.c index 9daeb8c37483..10340f5becbb 100644 --- a/arch/arm/mach-shmobile/clock-r8a7779.c +++ b/arch/arm/mach-shmobile/clock-r8a7779.c @@ -165,7 +165,7 @@ static struct clk_lookup lookups[] = { CLKDEV_DEV_ID("rcar-pcie", &mstp_clks[MSTP116]), /* PCIe */ CLKDEV_DEV_ID("sata_rcar", &mstp_clks[MSTP115]), /* SATA */ CLKDEV_DEV_ID("fc600000.sata", &mstp_clks[MSTP115]), /* SATA w/DT */ - CLKDEV_DEV_ID("sh-eth", &mstp_clks[MSTP114]), /* Ether */ + CLKDEV_DEV_ID("r8a777x-ether", &mstp_clks[MSTP114]), /* Ether */ CLKDEV_DEV_ID("ehci-platform.1", &mstp_clks[MSTP101]), /* USB EHCI port2 */ CLKDEV_DEV_ID("ohci-platform.1", &mstp_clks[MSTP101]), /* USB OHCI port2 */ CLKDEV_DEV_ID("ehci-platform.0", &mstp_clks[MSTP100]), /* USB EHCI port0/1 */ diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c index 7e105932c09d..5390c6bbbc02 100644 --- a/arch/arm/mach-shmobile/clock-sh7372.c +++ b/arch/arm/mach-shmobile/clock-sh7372.c @@ -142,15 +142,15 @@ static void pllc2_table_rebuild(struct clk *clk) /* Initialise PLLC2 frequency table */ for (i = 0; i < ARRAY_SIZE(pllc2_freq_table) - 2; i++) { pllc2_freq_table[i].frequency = clk->parent->rate * (i + 20) * 2; - pllc2_freq_table[i].index = i; + pllc2_freq_table[i].driver_data = i; } /* This is a special entry - switching PLL off makes it a repeater */ pllc2_freq_table[i].frequency = clk->parent->rate; - pllc2_freq_table[i].index = i; + pllc2_freq_table[i].driver_data = i; pllc2_freq_table[++i].frequency = CPUFREQ_TABLE_END; - pllc2_freq_table[i].index = i; + pllc2_freq_table[i].driver_data = i; } static unsigned long pllc2_recalc(struct clk *clk) diff --git a/arch/arm/mach-socfpga/socfpga.c b/arch/arm/mach-socfpga/socfpga.c index 8ea11b472b91..bfce9641e32f 100644 --- a/arch/arm/mach-socfpga/socfpga.c +++ b/arch/arm/mach-socfpga/socfpga.c @@ -19,6 +19,7 @@ #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_platform.h> +#include <linux/reboot.h> #include <asm/hardware/cache-l2x0.h> #include <asm/mach/arch.h> @@ -89,13 +90,13 @@ static void __init socfpga_init_irq(void) socfpga_sysmgr_init(); } -static void socfpga_cyclone5_restart(char mode, const char *cmd) +static void socfpga_cyclone5_restart(enum reboot_mode mode, const char *cmd) { u32 temp; temp = readl(rst_manager_base_addr + SOCFPGA_RSTMGR_CTRL); - if (mode == 'h') + if (mode == REBOOT_HARD) temp |= RSTMGR_CTRL_SWCOLDRSTREQ; else temp |= RSTMGR_CTRL_SWWARMRSTREQ; diff --git a/arch/arm/mach-spear/generic.h b/arch/arm/mach-spear/generic.h index a9fd45362fee..904f2c907b46 100644 --- a/arch/arm/mach-spear/generic.h +++ b/arch/arm/mach-spear/generic.h @@ -16,6 +16,8 @@ #include <linux/dmaengine.h> #include <linux/amba/pl08x.h> #include <linux/init.h> +#include <linux/reboot.h> + #include <asm/mach/time.h> extern void spear13xx_timer_init(void); @@ -32,7 +34,7 @@ void __init spear6xx_clk_init(void __iomem *misc_base); void __init spear13xx_map_io(void); void __init spear13xx_l2x0_init(void); -void spear_restart(char, const char *); +void spear_restart(enum reboot_mode, const char *); void spear13xx_secondary_startup(void); void __cpuinit spear13xx_cpu_die(unsigned int cpu); diff --git a/arch/arm/mach-spear/restart.c b/arch/arm/mach-spear/restart.c index 2b44500bb718..ce5e098c4888 100644 --- a/arch/arm/mach-spear/restart.c +++ b/arch/arm/mach-spear/restart.c @@ -12,14 +12,15 @@ */ #include <linux/io.h> #include <linux/amba/sp810.h> +#include <linux/reboot.h> #include <asm/system_misc.h> #include <mach/spear.h> #include "generic.h" #define SPEAR13XX_SYS_SW_RES (VA_MISC_BASE + 0x204) -void spear_restart(char mode, const char *cmd) +void spear_restart(enum reboot_mode mode, const char *cmd) { - if (mode == 's') { + if (mode == REBOOT_SOFT) { /* software reset, Jump into ROM at address 0 */ soft_restart(0); } else { diff --git a/arch/arm/mach-spear/spear3xx.c b/arch/arm/mach-spear/spear3xx.c index 0227c97797cd..bf3b1fd8cb23 100644 --- a/arch/arm/mach-spear/spear3xx.c +++ b/arch/arm/mach-spear/spear3xx.c @@ -56,8 +56,8 @@ struct pl08x_platform_data pl080_plat_data = { }, .lli_buses = PL08X_AHB1, .mem_buses = PL08X_AHB1, - .get_signal = pl080_get_signal, - .put_signal = pl080_put_signal, + .get_xfer_signal = pl080_get_signal, + .put_xfer_signal = pl080_put_signal, }; /* diff --git a/arch/arm/mach-spear/spear6xx.c b/arch/arm/mach-spear/spear6xx.c index 8b0295a41226..da26fa5b68d7 100644 --- a/arch/arm/mach-spear/spear6xx.c +++ b/arch/arm/mach-spear/spear6xx.c @@ -334,8 +334,8 @@ static struct pl08x_platform_data spear6xx_pl080_plat_data = { }, .lli_buses = PL08X_AHB1, .mem_buses = PL08X_AHB1, - .get_signal = pl080_get_signal, - .put_signal = pl080_put_signal, + .get_xfer_signal = pl080_get_signal, + .put_xfer_signal = pl080_put_signal, .slave_channels = spear600_dma_info, .num_slave_channels = ARRAY_SIZE(spear600_dma_info), }; diff --git a/arch/arm/mach-sunxi/sunxi.c b/arch/arm/mach-sunxi/sunxi.c index 84485a10fc3a..38a3c55527c8 100644 --- a/arch/arm/mach-sunxi/sunxi.c +++ b/arch/arm/mach-sunxi/sunxi.c @@ -18,6 +18,7 @@ #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/io.h> +#include <linux/reboot.h> #include <linux/clk/sunxi.h> @@ -33,7 +34,7 @@ static void __iomem *wdt_base; -static void sun4i_restart(char mode, const char *cmd) +static void sun4i_restart(enum reboot_mode mode, const char *cmd) { if (!wdt_base) return; diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig index 84d72fc36dfe..ef3a8da49b2d 100644 --- a/arch/arm/mach-tegra/Kconfig +++ b/arch/arm/mach-tegra/Kconfig @@ -28,7 +28,6 @@ config ARCH_TEGRA_2x_SOC select ARM_ERRATA_754327 if SMP select ARM_ERRATA_764369 if SMP select ARM_GIC - select CPU_FREQ_TABLE if CPU_FREQ select CPU_V7 select PINCTRL select PINCTRL_TEGRA20 @@ -46,7 +45,6 @@ config ARCH_TEGRA_3x_SOC select ARM_ERRATA_754322 select ARM_ERRATA_764369 if SMP select ARM_GIC - select CPU_FREQ_TABLE if CPU_FREQ select CPU_V7 select PINCTRL select PINCTRL_TEGRA30 @@ -60,10 +58,9 @@ config ARCH_TEGRA_3x_SOC config ARCH_TEGRA_114_SOC bool "Enable support for Tegra114 family" - select ARM_ARCH_TIMER + select HAVE_ARM_ARCH_TIMER select ARM_GIC select ARM_L1_CACHE_SHIFT_6 - select CPU_FREQ_TABLE if CPU_FREQ select CPU_V7 select PINCTRL select PINCTRL_TEGRA114 diff --git a/arch/arm/mach-tegra/board.h b/arch/arm/mach-tegra/board.h index 1787327fae3a..9a6659fe2dc2 100644 --- a/arch/arm/mach-tegra/board.h +++ b/arch/arm/mach-tegra/board.h @@ -23,8 +23,9 @@ #define __MACH_TEGRA_BOARD_H #include <linux/types.h> +#include <linux/reboot.h> -void tegra_assert_system_reset(char mode, const char *cmd); +void tegra_assert_system_reset(enum reboot_mode mode, const char *cmd); void __init tegra_init_early(void); void __init tegra_map_common_io(void); diff --git a/arch/arm/mach-tegra/common.c b/arch/arm/mach-tegra/common.c index ec5836b1e713..94a119a35af8 100644 --- a/arch/arm/mach-tegra/common.c +++ b/arch/arm/mach-tegra/common.c @@ -22,8 +22,9 @@ #include <linux/io.h> #include <linux/clk.h> #include <linux/delay.h> +#include <linux/reboot.h> #include <linux/irqchip.h> -#include <linux/clk/tegra.h> +#include <linux/clk-provider.h> #include <asm/hardware/cache-l2x0.h> @@ -60,7 +61,7 @@ u32 tegra_uart_config[4] = { #ifdef CONFIG_OF void __init tegra_dt_init_irq(void) { - tegra_clocks_init(); + of_clk_init(NULL); tegra_pmc_init(); tegra_init_irq(); irqchip_init(); @@ -68,7 +69,7 @@ void __init tegra_dt_init_irq(void) } #endif -void tegra_assert_system_reset(char mode, const char *cmd) +void tegra_assert_system_reset(enum reboot_mode mode, const char *cmd) { void __iomem *reset = IO_ADDRESS(TEGRA_PMC_BASE + 0); u32 reg; diff --git a/arch/arm/mach-u300/core.c b/arch/arm/mach-u300/core.c index 4f7ac2a11452..35670b15f281 100644 --- a/arch/arm/mach-u300/core.c +++ b/arch/arm/mach-u300/core.c @@ -300,11 +300,11 @@ static void __init u300_init_check_chip(void) /* Forward declare this function from the watchdog */ void coh901327_watchdog_reset(void); -static void u300_restart(char mode, const char *cmd) +static void u300_restart(enum reboot_mode mode, const char *cmd) { switch (mode) { - case 's': - case 'h': + case REBOOT_SOFT: + case REBOOT_HARD: #ifdef CONFIG_COH901327_WATCHDOG coh901327_watchdog_reset(); #endif diff --git a/arch/arm/mach-u300/timer.c b/arch/arm/mach-u300/timer.c index 390ae5feb1d0..b5db207dfd1e 100644 --- a/arch/arm/mach-u300/timer.c +++ b/arch/arm/mach-u300/timer.c @@ -21,9 +21,9 @@ #include <linux/delay.h> #include <linux/of_address.h> #include <linux/of_irq.h> +#include <linux/sched_clock.h> /* Generic stuff */ -#include <asm/sched_clock.h> #include <asm/mach/map.h> #include <asm/mach/time.h> diff --git a/arch/arm/mach-ux500/cpu.c b/arch/arm/mach-ux500/cpu.c index b6145ea51641..e6fb0239151b 100644 --- a/arch/arm/mach-ux500/cpu.c +++ b/arch/arm/mach-ux500/cpu.c @@ -76,13 +76,15 @@ void __init ux500_init_irq(void) } else if (cpu_is_u9540()) { prcmu_early_init(U8500_PRCMU_BASE, SZ_8K - 1); ux500_pm_init(U8500_PRCMU_BASE, SZ_8K - 1); - u8500_clk_init(U8500_CLKRST1_BASE, U8500_CLKRST2_BASE, + u9540_clk_init(U8500_CLKRST1_BASE, U8500_CLKRST2_BASE, U8500_CLKRST3_BASE, U8500_CLKRST5_BASE, U8500_CLKRST6_BASE); } else if (cpu_is_u8540()) { prcmu_early_init(U8500_PRCMU_BASE, SZ_8K + SZ_4K - 1); ux500_pm_init(U8500_PRCMU_BASE, SZ_8K + SZ_4K - 1); - u8540_clk_init(); + u8540_clk_init(U8500_CLKRST1_BASE, U8500_CLKRST2_BASE, + U8500_CLKRST3_BASE, U8500_CLKRST5_BASE, + U8500_CLKRST6_BASE); } } diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c index 54bb80b012ac..3b0572f30d56 100644 --- a/arch/arm/mach-versatile/core.c +++ b/arch/arm/mach-versatile/core.c @@ -38,6 +38,7 @@ #include <linux/clkdev.h> #include <linux/mtd/physmap.h> #include <linux/bitops.h> +#include <linux/reboot.h> #include <asm/irq.h> #include <asm/hardware/arm_timer.h> @@ -733,7 +734,7 @@ static void versatile_leds_event(led_event_t ledevt) } #endif /* CONFIG_LEDS */ -void versatile_restart(char mode, const char *cmd) +void versatile_restart(enum reboot_mode mode, const char *cmd) { void __iomem *sys = __io_address(VERSATILE_SYS_BASE); u32 val; diff --git a/arch/arm/mach-versatile/core.h b/arch/arm/mach-versatile/core.h index 5c1b87d1da6b..f06d5768e428 100644 --- a/arch/arm/mach-versatile/core.h +++ b/arch/arm/mach-versatile/core.h @@ -24,13 +24,14 @@ #include <linux/amba/bus.h> #include <linux/of_platform.h> +#include <linux/reboot.h> extern void __init versatile_init(void); extern void __init versatile_init_early(void); extern void __init versatile_init_irq(void); extern void __init versatile_map_io(void); extern void versatile_timer_init(void); -extern void versatile_restart(char, const char *); +extern void versatile_restart(enum reboot_mode, const char *); extern unsigned int mmc_status(struct device *dev); #ifdef CONFIG_OF extern struct of_dev_auxdata versatile_auxdata_lookup[]; diff --git a/arch/arm/mach-virt/Kconfig b/arch/arm/mach-virt/Kconfig index 8958f0d896bc..081d46929436 100644 --- a/arch/arm/mach-virt/Kconfig +++ b/arch/arm/mach-virt/Kconfig @@ -2,7 +2,7 @@ config ARCH_VIRT bool "Dummy Virtual Machine" if ARCH_MULTI_V7 select ARCH_WANT_OPTIONAL_GPIOLIB select ARM_GIC - select ARM_ARCH_TIMER + select HAVE_ARM_ARCH_TIMER select ARM_PSCI select HAVE_SMP select CPU_V7 diff --git a/arch/arm/mach-vt8500/vt8500.c b/arch/arm/mach-vt8500/vt8500.c index f8f2f00856e0..eefaa60d6614 100644 --- a/arch/arm/mach-vt8500/vt8500.c +++ b/arch/arm/mach-vt8500/vt8500.c @@ -21,6 +21,7 @@ #include <linux/clocksource.h> #include <linux/io.h> #include <linux/pm.h> +#include <linux/reboot.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> @@ -46,7 +47,7 @@ static void __iomem *pmc_base; -void vt8500_restart(char mode, const char *cmd) +void vt8500_restart(enum reboot_mode mode, const char *cmd) { if (pmc_base) writel(1, pmc_base + VT8500_PMSR_REG); diff --git a/arch/arm/mach-w90x900/cpu.c b/arch/arm/mach-w90x900/cpu.c index 9e4dd8b63c4a..b1eabaad50a5 100644 --- a/arch/arm/mach-w90x900/cpu.c +++ b/arch/arm/mach-w90x900/cpu.c @@ -230,9 +230,9 @@ void __init nuc900_init_clocks(void) #define WTE (1 << 7) #define WTRE (1 << 1) -void nuc9xx_restart(char mode, const char *cmd) +void nuc9xx_restart(enum reboot_mode mode, const char *cmd) { - if (mode == 's') { + if (mode == REBOOT_SOFT) { /* Jump into ROM at address 0 */ soft_restart(0); } else { diff --git a/arch/arm/mach-w90x900/nuc9xx.h b/arch/arm/mach-w90x900/nuc9xx.h index 88ef4b267089..e3ab1e1381f1 100644 --- a/arch/arm/mach-w90x900/nuc9xx.h +++ b/arch/arm/mach-w90x900/nuc9xx.h @@ -14,10 +14,13 @@ * published by the Free Software Foundation. * */ + +#include <linux/reboot.h> + struct map_desc; /* core initialisation functions */ extern void nuc900_init_irq(void); extern void nuc900_timer_init(void); -extern void nuc9xx_restart(char, const char *); +extern void nuc9xx_restart(enum reboot_mode, const char *); diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 9e8101ecd63e..6cacdc8dd654 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -392,7 +392,8 @@ config CPU_V7 select CPU_CACHE_V7 select CPU_CACHE_VIPT select CPU_COPY_V6 if MMU - select CPU_CP15_MMU + select CPU_CP15_MMU if MMU + select CPU_CP15_MPU if !MMU select CPU_HAS_ASID if MMU select CPU_PABRT_V7 select CPU_TLB_V7 if MMU diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index ee558a01f390..ecfe6e53f6e0 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile @@ -16,6 +16,7 @@ obj-$(CONFIG_MODULES) += proc-syms.o obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o obj-$(CONFIG_HIGHMEM) += highmem.o +obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_CPU_ABRT_NOMMU) += abort-nommu.o obj-$(CONFIG_CPU_ABRT_EV4) += abort-ev4.o diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index c465faca51b0..d70e0aba0c9d 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -523,6 +523,147 @@ static void aurora_flush_range(unsigned long start, unsigned long end) } } +/* + * For certain Broadcom SoCs, depending on the address range, different offsets + * need to be added to the address before passing it to L2 for + * invalidation/clean/flush + * + * Section Address Range Offset EMI + * 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC + * 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS + * 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC + * + * When the start and end addresses have crossed two different sections, we + * need to break the L2 operation into two, each within its own section. + * For example, if we need to invalidate addresses starts at 0xBFFF0000 and + * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2) + * 0xC0000000 - 0xC0001000 + * + * Note 1: + * By breaking a single L2 operation into two, we may potentially suffer some + * performance hit, but keep in mind the cross section case is very rare + * + * Note 2: + * We do not need to handle the case when the start address is in + * Section 1 and the end address is in Section 3, since it is not a valid use + * case + * + * Note 3: + * Section 1 in practical terms can no longer be used on rev A2. Because of + * that the code does not need to handle section 1 at all. + * + */ +#define BCM_SYS_EMI_START_ADDR 0x40000000UL +#define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL + +#define BCM_SYS_EMI_OFFSET 0x40000000UL +#define BCM_VC_EMI_OFFSET 0x80000000UL + +static inline int bcm_addr_is_sys_emi(unsigned long addr) +{ + return (addr >= BCM_SYS_EMI_START_ADDR) && + (addr < BCM_VC_EMI_SEC3_START_ADDR); +} + +static inline unsigned long bcm_l2_phys_addr(unsigned long addr) +{ + if (bcm_addr_is_sys_emi(addr)) + return addr + BCM_SYS_EMI_OFFSET; + else + return addr + BCM_VC_EMI_OFFSET; +} + +static void bcm_inv_range(unsigned long start, unsigned long end) +{ + unsigned long new_start, new_end; + + BUG_ON(start < BCM_SYS_EMI_START_ADDR); + + if (unlikely(end <= start)) + return; + + new_start = bcm_l2_phys_addr(start); + new_end = bcm_l2_phys_addr(end); + + /* normal case, no cross section between start and end */ + if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { + l2x0_inv_range(new_start, new_end); + return; + } + + /* They cross sections, so it can only be a cross from section + * 2 to section 3 + */ + l2x0_inv_range(new_start, + bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); + l2x0_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), + new_end); +} + +static void bcm_clean_range(unsigned long start, unsigned long end) +{ + unsigned long new_start, new_end; + + BUG_ON(start < BCM_SYS_EMI_START_ADDR); + + if (unlikely(end <= start)) + return; + + if ((end - start) >= l2x0_size) { + l2x0_clean_all(); + return; + } + + new_start = bcm_l2_phys_addr(start); + new_end = bcm_l2_phys_addr(end); + + /* normal case, no cross section between start and end */ + if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { + l2x0_clean_range(new_start, new_end); + return; + } + + /* They cross sections, so it can only be a cross from section + * 2 to section 3 + */ + l2x0_clean_range(new_start, + bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); + l2x0_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), + new_end); +} + +static void bcm_flush_range(unsigned long start, unsigned long end) +{ + unsigned long new_start, new_end; + + BUG_ON(start < BCM_SYS_EMI_START_ADDR); + + if (unlikely(end <= start)) + return; + + if ((end - start) >= l2x0_size) { + l2x0_flush_all(); + return; + } + + new_start = bcm_l2_phys_addr(start); + new_end = bcm_l2_phys_addr(end); + + /* normal case, no cross section between start and end */ + if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { + l2x0_flush_range(new_start, new_end); + return; + } + + /* They cross sections, so it can only be a cross from section + * 2 to section 3 + */ + l2x0_flush_range(new_start, + bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); + l2x0_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), + new_end); +} + static void __init l2x0_of_setup(const struct device_node *np, u32 *aux_val, u32 *aux_mask) { @@ -765,6 +906,21 @@ static const struct l2x0_of_data aurora_no_outer_data = { }, }; +static const struct l2x0_of_data bcm_l2x0_data = { + .setup = pl310_of_setup, + .save = pl310_save, + .outer_cache = { + .resume = pl310_resume, + .inv_range = bcm_inv_range, + .clean_range = bcm_clean_range, + .flush_range = bcm_flush_range, + .sync = l2x0_cache_sync, + .flush_all = l2x0_flush_all, + .inv_all = l2x0_inv_all, + .disable = l2x0_disable, + }, +}; + static const struct of_device_id l2x0_ids[] __initconst = { { .compatible = "arm,pl310-cache", .data = (void *)&pl310_data }, { .compatible = "arm,l220-cache", .data = (void *)&l2x0_data }, @@ -773,6 +929,8 @@ static const struct of_device_id l2x0_ids[] __initconst = { .data = (void *)&aurora_no_outer_data}, { .compatible = "marvell,aurora-outer-cache", .data = (void *)&aurora_with_outer_data}, + { .compatible = "bcm,bcm11351-a2-pl310-cache", + .data = (void *)&bcm_l2x0_data}, {} }; diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index 2ac37372ef52..b55b1015724b 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -20,6 +20,7 @@ #include <asm/smp_plat.h> #include <asm/thread_notify.h> #include <asm/tlbflush.h> +#include <asm/proc-fns.h> /* * On ARMv6, we have the following structure in the Context ID: @@ -39,33 +40,51 @@ * non 64-bit operations. */ #define ASID_FIRST_VERSION (1ULL << ASID_BITS) -#define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1) - -#define ASID_TO_IDX(asid) ((asid & ~ASID_MASK) - 1) -#define IDX_TO_ASID(idx) ((idx + 1) & ~ASID_MASK) +#define NUM_USER_ASIDS ASID_FIRST_VERSION static DEFINE_RAW_SPINLOCK(cpu_asid_lock); static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); -DEFINE_PER_CPU(atomic64_t, active_asids); +static DEFINE_PER_CPU(atomic64_t, active_asids); static DEFINE_PER_CPU(u64, reserved_asids); static cpumask_t tlb_flush_pending; +#ifdef CONFIG_ARM_ERRATA_798181 +void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, + cpumask_t *mask) +{ + int cpu; + unsigned long flags; + u64 context_id, asid; + + raw_spin_lock_irqsave(&cpu_asid_lock, flags); + context_id = mm->context.id.counter; + for_each_online_cpu(cpu) { + if (cpu == this_cpu) + continue; + /* + * We only need to send an IPI if the other CPUs are + * running the same ASID as the one being invalidated. + */ + asid = per_cpu(active_asids, cpu).counter; + if (asid == 0) + asid = per_cpu(reserved_asids, cpu); + if (context_id == asid) + cpumask_set_cpu(cpu, mask); + } + raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); +} +#endif + #ifdef CONFIG_ARM_LPAE static void cpu_set_reserved_ttbr0(void) { - unsigned long ttbl = __pa(swapper_pg_dir); - unsigned long ttbh = 0; - /* * Set TTBR0 to swapper_pg_dir which contains only global entries. The * ASID is set to 0. */ - asm volatile( - " mcrr p15, 0, %0, %1, c2 @ set TTBR0\n" - : - : "r" (ttbl), "r" (ttbh)); + cpu_set_ttbr(0, __pa(swapper_pg_dir)); isb(); } #else @@ -128,7 +147,16 @@ static void flush_context(unsigned int cpu) asid = 0; } else { asid = atomic64_xchg(&per_cpu(active_asids, i), 0); - __set_bit(ASID_TO_IDX(asid), asid_map); + /* + * If this CPU has already been through a + * rollover, but hasn't run another task in + * the meantime, we must preserve its reserved + * ASID, as this is the only trace we have of + * the process it is still running. + */ + if (asid == 0) + asid = per_cpu(reserved_asids, i); + __set_bit(asid & ~ASID_MASK, asid_map); } per_cpu(reserved_asids, i) = asid; } @@ -167,17 +195,19 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) /* * Allocate a free ASID. If we can't find one, take a * note of the currently active ASIDs and mark the TLBs - * as requiring flushes. + * as requiring flushes. We always count from ASID #1, + * as we reserve ASID #0 to switch via TTBR0 and indicate + * rollover events. */ - asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS); + asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); if (asid == NUM_USER_ASIDS) { generation = atomic64_add_return(ASID_FIRST_VERSION, &asid_generation); flush_context(cpu); - asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS); + asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); } __set_bit(asid, asid_map); - asid = generation | IDX_TO_ASID(asid); + asid |= generation; cpumask_clear(mm_cpumask(mm)); } diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index ef3e0f3aac96..7f9b1798c6cf 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -250,7 +250,7 @@ static void __dma_free_buffer(struct page *page, size_t size) #ifdef CONFIG_MMU #ifdef CONFIG_HUGETLB_PAGE -#error ARM Coherent DMA allocator does not (yet) support huge TLB +#warning ARM Coherent DMA allocator does not (yet) support huge TLB #endif static void *__alloc_from_contiguous(struct device *dev, size_t size, @@ -880,10 +880,24 @@ static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); /* - * Mark the D-cache clean for this page to avoid extra flushing. + * Mark the D-cache clean for these pages to avoid extra flushing. */ - if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE) - set_bit(PG_dcache_clean, &page->flags); + if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) { + unsigned long pfn; + size_t left = size; + + pfn = page_to_pfn(page) + off / PAGE_SIZE; + off %= PAGE_SIZE; + if (off) { + pfn++; + left -= PAGE_SIZE - off; + } + while (left >= PAGE_SIZE) { + page = pfn_to_page(pfn++); + set_bit(PG_dcache_clean, &page->flags); + left -= PAGE_SIZE; + } + } } /** @@ -1314,6 +1328,15 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, if (gfp & GFP_ATOMIC) return __iommu_alloc_atomic(dev, size, handle); + /* + * Following is a work-around (a.k.a. hack) to prevent pages + * with __GFP_COMP being passed to split_page() which cannot + * handle them. The real problem is that this flag probably + * should be 0 on ARM as it is not supported on this + * platform; see CONFIG_HUGETLBFS. + */ + gfp &= ~(__GFP_COMP); + pages = __iommu_alloc_buffer(dev, size, gfp, attrs); if (!pages) return NULL; @@ -1372,16 +1395,17 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs) { - struct page **pages = __iommu_get_pages(cpu_addr, attrs); + struct page **pages; size = PAGE_ALIGN(size); - if (!pages) { - WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); + if (__in_atomic_pool(cpu_addr, size)) { + __iommu_free_atomic(dev, cpu_addr, handle, size); return; } - if (__in_atomic_pool(cpu_addr, size)) { - __iommu_free_atomic(dev, cpu_addr, handle, size); + pages = __iommu_get_pages(cpu_addr, attrs); + if (!pages) { + WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); return; } @@ -1636,13 +1660,27 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p { struct dma_iommu_mapping *mapping = dev->archdata.mapping; dma_addr_t dma_addr; - int ret, len = PAGE_ALIGN(size + offset); + int ret, prot, len = PAGE_ALIGN(size + offset); dma_addr = __alloc_iova(mapping, len); if (dma_addr == DMA_ERROR_CODE) return dma_addr; - ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, 0); + switch (dir) { + case DMA_BIDIRECTIONAL: + prot = IOMMU_READ | IOMMU_WRITE; + break; + case DMA_TO_DEVICE: + prot = IOMMU_READ; + break; + case DMA_FROM_DEVICE: + prot = IOMMU_WRITE; + break; + default: + prot = 0; + } + + ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot); if (ret < 0) goto fail; @@ -1907,7 +1945,7 @@ void arm_iommu_detach_device(struct device *dev) iommu_detach_device(mapping->domain, dev); kref_put(&mapping->kref, release_iommu_mapping); - mapping = NULL; + dev->archdata.mapping = NULL; set_dma_ops(dev, NULL); pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 5dbf13f954f6..c97f7940cb95 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -491,12 +491,14 @@ do_translation_fault(unsigned long addr, unsigned int fsr, * Some section permission faults need to be handled gracefully. * They can happen due to a __{get,put}_user during an oops. */ +#ifndef CONFIG_ARM_LPAE static int do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { do_bad_area(addr, fsr, regs); return 0; } +#endif /* CONFIG_ARM_LPAE */ /* * This abort handler always returns "fault". diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 32aa5861119f..6d5ba9afb16a 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -17,6 +17,7 @@ #include <asm/highmem.h> #include <asm/smp_plat.h> #include <asm/tlbflush.h> +#include <linux/hugetlb.h> #include "mm.h" @@ -168,19 +169,23 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page) * coherent with the kernels mapping. */ if (!PageHighMem(page)) { - __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); + size_t page_size = PAGE_SIZE << compound_order(page); + __cpuc_flush_dcache_area(page_address(page), page_size); } else { - void *addr; - + unsigned long i; if (cache_is_vipt_nonaliasing()) { - addr = kmap_atomic(page); - __cpuc_flush_dcache_area(addr, PAGE_SIZE); - kunmap_atomic(addr); - } else { - addr = kmap_high_get(page); - if (addr) { + for (i = 0; i < (1 << compound_order(page)); i++) { + void *addr = kmap_atomic(page); __cpuc_flush_dcache_area(addr, PAGE_SIZE); - kunmap_high(page); + kunmap_atomic(addr); + } + } else { + for (i = 0; i < (1 << compound_order(page)); i++) { + void *addr = kmap_high_get(page); + if (addr) { + __cpuc_flush_dcache_area(addr, PAGE_SIZE); + kunmap_high(page); + } } } } @@ -287,7 +292,7 @@ void flush_dcache_page(struct page *page) mapping = page_mapping(page); if (!cache_ops_need_broadcast() && - mapping && !mapping_mapped(mapping)) + mapping && !page_mapped(page)) clear_bit(PG_dcache_clean, &page->flags); else { __flush_dcache_page(mapping, page); diff --git a/arch/arm/mm/fsr-3level.c b/arch/arm/mm/fsr-3level.c index 05a4e9431836..ab4409a2307e 100644 --- a/arch/arm/mm/fsr-3level.c +++ b/arch/arm/mm/fsr-3level.c @@ -9,11 +9,11 @@ static struct fsr_info fsr_info[] = { { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, { do_bad, SIGBUS, 0, "reserved access flag fault" }, { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" }, - { do_bad, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, + { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" }, { do_bad, SIGBUS, 0, "reserved permission fault" }, { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" }, - { do_sect_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" }, + { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" }, { do_bad, SIGBUS, 0, "synchronous external abort" }, { do_bad, SIGBUS, 0, "asynchronous external abort" }, diff --git a/arch/arm/mm/hugetlbpage.c b/arch/arm/mm/hugetlbpage.c new file mode 100644 index 000000000000..3d1e4a205b0b --- /dev/null +++ b/arch/arm/mm/hugetlbpage.c @@ -0,0 +1,101 @@ +/* + * arch/arm/mm/hugetlbpage.c + * + * Copyright (C) 2012 ARM Ltd. + * + * Based on arch/x86/include/asm/hugetlb.h and Bill Carson's patches + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/init.h> +#include <linux/fs.h> +#include <linux/mm.h> +#include <linux/hugetlb.h> +#include <linux/pagemap.h> +#include <linux/err.h> +#include <linux/sysctl.h> +#include <asm/mman.h> +#include <asm/tlb.h> +#include <asm/tlbflush.h> +#include <asm/pgalloc.h> + +/* + * On ARM, huge pages are backed by pmd's rather than pte's, so we do a lot + * of type casting from pmd_t * to pte_t *. + */ + +pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd = NULL; + + pgd = pgd_offset(mm, addr); + if (pgd_present(*pgd)) { + pud = pud_offset(pgd, addr); + if (pud_present(*pud)) + pmd = pmd_offset(pud, addr); + } + + return (pte_t *)pmd; +} + +struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, + int write) +{ + return ERR_PTR(-EINVAL); +} + +int pud_huge(pud_t pud) +{ + return 0; +} + +int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) +{ + return 0; +} + +pte_t *huge_pte_alloc(struct mm_struct *mm, + unsigned long addr, unsigned long sz) +{ + pgd_t *pgd; + pud_t *pud; + pte_t *pte = NULL; + + pgd = pgd_offset(mm, addr); + pud = pud_alloc(mm, pgd, addr); + if (pud) + pte = (pte_t *)pmd_alloc(mm, pud, addr); + + return pte; +} + +struct page * +follow_huge_pmd(struct mm_struct *mm, unsigned long address, + pmd_t *pmd, int write) +{ + struct page *page; + + page = pte_page(*(pte_t *)pmd); + if (page) + page += ((address & ~PMD_MASK) >> PAGE_SHIFT); + return page; +} + +int pmd_huge(pmd_t pmd) +{ + return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT); +} diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 9a5cdc01fcdf..6833cbead6cc 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -36,12 +36,13 @@ #include "mm.h" -static unsigned long phys_initrd_start __initdata = 0; +static phys_addr_t phys_initrd_start __initdata = 0; static unsigned long phys_initrd_size __initdata = 0; static int __init early_initrd(char *p) { - unsigned long start, size; + phys_addr_t start; + unsigned long size; char *endp; start = memparse(p, &endp); @@ -350,14 +351,14 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc) #ifdef CONFIG_BLK_DEV_INITRD if (phys_initrd_size && !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) { - pr_err("INITRD: 0x%08lx+0x%08lx is not a memory region - disabling initrd\n", - phys_initrd_start, phys_initrd_size); + pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n", + (u64)phys_initrd_start, phys_initrd_size); phys_initrd_start = phys_initrd_size = 0; } if (phys_initrd_size && memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) { - pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region - disabling initrd\n", - phys_initrd_start, phys_initrd_size); + pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n", + (u64)phys_initrd_start, phys_initrd_size); phys_initrd_start = phys_initrd_size = 0; } if (phys_initrd_size) { @@ -442,7 +443,7 @@ static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn) { struct page *start_pg, *end_pg; - unsigned long pg, pgend; + phys_addr_t pg, pgend; /* * Convert start_pfn/end_pfn to a struct page pointer. @@ -454,8 +455,8 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn) * Convert to physical addresses, and * round start upwards and end downwards. */ - pg = (unsigned long)PAGE_ALIGN(__pa(start_pg)); - pgend = (unsigned long)__pa(end_pg) & PAGE_MASK; + pg = PAGE_ALIGN(__pa(start_pg)); + pgend = __pa(end_pg) & PAGE_MASK; /* * If there are free pages between these, @@ -582,9 +583,6 @@ static void __init free_highpages(void) */ void __init mem_init(void) { - unsigned long reserved_pages, free_pages; - struct memblock_region *reg; - int i; #ifdef CONFIG_HAVE_TCM /* These pointers are filled in on TCM detection */ extern u32 dtcm_end; @@ -595,57 +593,16 @@ void __init mem_init(void) /* this will put all unused low memory onto the freelists */ free_unused_memmap(&meminfo); - - totalram_pages += free_all_bootmem(); + free_all_bootmem(); #ifdef CONFIG_SA1111 /* now that our DMA memory is actually so designated, we can free it */ - free_reserved_area(__va(PHYS_PFN_OFFSET), swapper_pg_dir, 0, NULL); + free_reserved_area(__va(PHYS_PFN_OFFSET), swapper_pg_dir, -1, NULL); #endif free_highpages(); - reserved_pages = free_pages = 0; - - for_each_bank(i, &meminfo) { - struct membank *bank = &meminfo.bank[i]; - unsigned int pfn1, pfn2; - struct page *page, *end; - - pfn1 = bank_pfn_start(bank); - pfn2 = bank_pfn_end(bank); - - page = pfn_to_page(pfn1); - end = pfn_to_page(pfn2 - 1) + 1; - - do { - if (PageReserved(page)) - reserved_pages++; - else if (!page_count(page)) - free_pages++; - page++; - } while (page < end); - } - - /* - * Since our memory may not be contiguous, calculate the - * real number of pages we have in this system - */ - printk(KERN_INFO "Memory:"); - num_physpages = 0; - for_each_memblock(memory, reg) { - unsigned long pages = memblock_region_memory_end_pfn(reg) - - memblock_region_memory_base_pfn(reg); - num_physpages += pages; - printk(" %ldMB", pages >> (20 - PAGE_SHIFT)); - } - printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); - - printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n", - nr_free_pages() << (PAGE_SHIFT-10), - free_pages << (PAGE_SHIFT-10), - reserved_pages << (PAGE_SHIFT-10), - totalhigh_pages << (PAGE_SHIFT-10)); + mem_init_print_info(NULL); #define MLK(b, t) b, t, ((t) - (b)) >> 10 #define MLM(b, t) b, t, ((t) - (b)) >> 20 @@ -711,7 +668,7 @@ void __init mem_init(void) BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); #endif - if (PAGE_SIZE >= 16384 && num_physpages <= 128) { + if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { extern int sysctl_overcommit_memory; /* * On a machine this small we won't get @@ -728,12 +685,12 @@ void free_initmem(void) extern char __tcm_start, __tcm_end; poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start); - free_reserved_area(&__tcm_start, &__tcm_end, 0, "TCM link"); + free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link"); #endif poison_init_mem(__init_begin, __init_end - __init_begin); if (!machine_is_integrator() && !machine_is_cintegrator()) - free_initmem_default(0); + free_initmem_default(-1); } #ifdef CONFIG_BLK_DEV_INITRD @@ -744,7 +701,7 @@ void free_initrd_mem(unsigned long start, unsigned long end) { if (!keep_initrd) { poison_init_mem((void *)start, PAGE_ALIGN(end) - start); - free_reserved_area(start, end, 0, "initrd"); + free_reserved_area((void *)start, (void *)end, -1, "initrd"); } } diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 04d9006eab1f..f123d6eb074b 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -331,10 +331,10 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, return (void __iomem *) (offset + addr); } -void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size, +void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size, unsigned int mtype, void *caller) { - unsigned long last_addr; + phys_addr_t last_addr; unsigned long offset = phys_addr & ~PAGE_MASK; unsigned long pfn = __phys_to_pfn(phys_addr); @@ -367,12 +367,12 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, } EXPORT_SYMBOL(__arm_ioremap_pfn); -void __iomem * (*arch_ioremap_caller)(unsigned long, size_t, +void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *) = __arm_ioremap_caller; void __iomem * -__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) +__arm_ioremap(phys_addr_t phys_addr, size_t size, unsigned int mtype) { return arch_ioremap_caller(phys_addr, size, mtype, __builtin_return_address(0)); @@ -387,7 +387,7 @@ EXPORT_SYMBOL(__arm_ioremap); * CONFIG_GENERIC_ALLOCATOR for allocating external memory. */ void __iomem * -__arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached) +__arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached) { unsigned int mtype; diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index 10062ceadd1c..0c6356255fe3 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c @@ -181,11 +181,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm) if (mmap_is_legacy()) { mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; mm->get_unmapped_area = arch_get_unmapped_area; - mm->unmap_area = arch_unmap_area; } else { mm->mmap_base = mmap_base(random_factor); mm->get_unmapped_area = arch_get_unmapped_area_topdown; - mm->unmap_area = arch_unmap_area_topdown; } } diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index d1d1cefa1f93..d7229d28c7f8 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -675,7 +675,8 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, } static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, - unsigned long end, unsigned long phys, const struct mem_type *type) + unsigned long end, phys_addr_t phys, + const struct mem_type *type) { pud_t *pud = pud_offset(pgd, addr); unsigned long next; @@ -989,27 +990,28 @@ phys_addr_t arm_lowmem_limit __initdata = 0; void __init sanity_check_meminfo(void) { int i, j, highmem = 0; + phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1; for (i = 0, j = 0; i < meminfo.nr_banks; i++) { struct membank *bank = &meminfo.bank[j]; - *bank = meminfo.bank[i]; + phys_addr_t size_limit; - if (bank->start > ULONG_MAX) - highmem = 1; + *bank = meminfo.bank[i]; + size_limit = bank->size; -#ifdef CONFIG_HIGHMEM - if (__va(bank->start) >= vmalloc_min || - __va(bank->start) < (void *)PAGE_OFFSET) + if (bank->start >= vmalloc_limit) highmem = 1; + else + size_limit = vmalloc_limit - bank->start; bank->highmem = highmem; +#ifdef CONFIG_HIGHMEM /* * Split those memory banks which are partially overlapping * the vmalloc area greatly simplifying things later. */ - if (!highmem && __va(bank->start) < vmalloc_min && - bank->size > vmalloc_min - __va(bank->start)) { + if (!highmem && bank->size > size_limit) { if (meminfo.nr_banks >= NR_BANKS) { printk(KERN_CRIT "NR_BANKS too low, " "ignoring high memory\n"); @@ -1018,16 +1020,14 @@ void __init sanity_check_meminfo(void) (meminfo.nr_banks - i) * sizeof(*bank)); meminfo.nr_banks++; i++; - bank[1].size -= vmalloc_min - __va(bank->start); - bank[1].start = __pa(vmalloc_min - 1) + 1; + bank[1].size -= size_limit; + bank[1].start = vmalloc_limit; bank[1].highmem = highmem = 1; j++; } - bank->size = vmalloc_min - __va(bank->start); + bank->size = size_limit; } #else - bank->highmem = highmem; - /* * Highmem banks not allowed with !CONFIG_HIGHMEM. */ @@ -1040,31 +1040,16 @@ void __init sanity_check_meminfo(void) } /* - * Check whether this memory bank would entirely overlap - * the vmalloc area. - */ - if (__va(bank->start) >= vmalloc_min || - __va(bank->start) < (void *)PAGE_OFFSET) { - printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx " - "(vmalloc region overlap).\n", - (unsigned long long)bank->start, - (unsigned long long)bank->start + bank->size - 1); - continue; - } - - /* * Check whether this memory bank would partially overlap * the vmalloc area. */ - if (__va(bank->start + bank->size - 1) >= vmalloc_min || - __va(bank->start + bank->size - 1) <= __va(bank->start)) { - unsigned long newsize = vmalloc_min - __va(bank->start); + if (bank->size > size_limit) { printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx " "to -%.8llx (vmalloc region overlap).\n", (unsigned long long)bank->start, (unsigned long long)bank->start + bank->size - 1, - (unsigned long long)bank->start + newsize - 1); - bank->size = newsize; + (unsigned long long)bank->start + size_limit - 1); + bank->size = size_limit; } #endif if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit) diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 5a3aba614a40..1fa50100ab6a 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -8,6 +8,7 @@ #include <linux/pagemap.h> #include <linux/io.h> #include <linux/memblock.h> +#include <linux/kernel.h> #include <asm/cacheflush.h> #include <asm/sections.h> @@ -15,9 +16,260 @@ #include <asm/setup.h> #include <asm/traps.h> #include <asm/mach/arch.h> +#include <asm/cputype.h> +#include <asm/mpu.h> #include "mm.h" +#ifdef CONFIG_ARM_MPU +struct mpu_rgn_info mpu_rgn_info; + +/* Region number */ +static void rgnr_write(u32 v) +{ + asm("mcr p15, 0, %0, c6, c2, 0" : : "r" (v)); +} + +/* Data-side / unified region attributes */ + +/* Region access control register */ +static void dracr_write(u32 v) +{ + asm("mcr p15, 0, %0, c6, c1, 4" : : "r" (v)); +} + +/* Region size register */ +static void drsr_write(u32 v) +{ + asm("mcr p15, 0, %0, c6, c1, 2" : : "r" (v)); +} + +/* Region base address register */ +static void drbar_write(u32 v) +{ + asm("mcr p15, 0, %0, c6, c1, 0" : : "r" (v)); +} + +static u32 drbar_read(void) +{ + u32 v; + asm("mrc p15, 0, %0, c6, c1, 0" : "=r" (v)); + return v; +} +/* Optional instruction-side region attributes */ + +/* I-side Region access control register */ +static void iracr_write(u32 v) +{ + asm("mcr p15, 0, %0, c6, c1, 5" : : "r" (v)); +} + +/* I-side Region size register */ +static void irsr_write(u32 v) +{ + asm("mcr p15, 0, %0, c6, c1, 3" : : "r" (v)); +} + +/* I-side Region base address register */ +static void irbar_write(u32 v) +{ + asm("mcr p15, 0, %0, c6, c1, 1" : : "r" (v)); +} + +static unsigned long irbar_read(void) +{ + unsigned long v; + asm("mrc p15, 0, %0, c6, c1, 1" : "=r" (v)); + return v; +} + +/* MPU initialisation functions */ +void __init sanity_check_meminfo_mpu(void) +{ + int i; + struct membank *bank = meminfo.bank; + phys_addr_t phys_offset = PHYS_OFFSET; + phys_addr_t aligned_region_size, specified_mem_size, rounded_mem_size; + + /* Initially only use memory continuous from PHYS_OFFSET */ + if (bank_phys_start(&bank[0]) != phys_offset) + panic("First memory bank must be contiguous from PHYS_OFFSET"); + + /* Banks have already been sorted by start address */ + for (i = 1; i < meminfo.nr_banks; i++) { + if (bank[i].start <= bank_phys_end(&bank[0]) && + bank_phys_end(&bank[i]) > bank_phys_end(&bank[0])) { + bank[0].size = bank_phys_end(&bank[i]) - bank[0].start; + } else { + pr_notice("Ignoring RAM after 0x%.8lx. " + "First non-contiguous (ignored) bank start: 0x%.8lx\n", + (unsigned long)bank_phys_end(&bank[0]), + (unsigned long)bank_phys_start(&bank[i])); + break; + } + } + /* All contiguous banks are now merged in to the first bank */ + meminfo.nr_banks = 1; + specified_mem_size = bank[0].size; + + /* + * MPU has curious alignment requirements: Size must be power of 2, and + * region start must be aligned to the region size + */ + if (phys_offset != 0) + pr_info("PHYS_OFFSET != 0 => MPU Region size constrained by alignment requirements\n"); + + /* + * Maximum aligned region might overflow phys_addr_t if phys_offset is + * 0. Hence we keep everything below 4G until we take the smaller of + * the aligned_region_size and rounded_mem_size, one of which is + * guaranteed to be smaller than the maximum physical address. + */ + aligned_region_size = (phys_offset - 1) ^ (phys_offset); + /* Find the max power-of-two sized region that fits inside our bank */ + rounded_mem_size = (1 << __fls(bank[0].size)) - 1; + + /* The actual region size is the smaller of the two */ + aligned_region_size = aligned_region_size < rounded_mem_size + ? aligned_region_size + 1 + : rounded_mem_size + 1; + + if (aligned_region_size != specified_mem_size) + pr_warn("Truncating memory from 0x%.8lx to 0x%.8lx (MPU region constraints)", + (unsigned long)specified_mem_size, + (unsigned long)aligned_region_size); + + meminfo.bank[0].size = aligned_region_size; + pr_debug("MPU Region from 0x%.8lx size 0x%.8lx (end 0x%.8lx))\n", + (unsigned long)phys_offset, + (unsigned long)aligned_region_size, + (unsigned long)bank_phys_end(&bank[0])); + +} + +static int mpu_present(void) +{ + return ((read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA) == MMFR0_PMSAv7); +} + +static int mpu_max_regions(void) +{ + /* + * We don't support a different number of I/D side regions so if we + * have separate instruction and data memory maps then return + * whichever side has a smaller number of supported regions. + */ + u32 dregions, iregions, mpuir; + mpuir = read_cpuid(CPUID_MPUIR); + + dregions = iregions = (mpuir & MPUIR_DREGION_SZMASK) >> MPUIR_DREGION; + + /* Check for separate d-side and i-side memory maps */ + if (mpuir & MPUIR_nU) + iregions = (mpuir & MPUIR_IREGION_SZMASK) >> MPUIR_IREGION; + + /* Use the smallest of the two maxima */ + return min(dregions, iregions); +} + +static int mpu_iside_independent(void) +{ + /* MPUIR.nU specifies whether there is *not* a unified memory map */ + return read_cpuid(CPUID_MPUIR) & MPUIR_nU; +} + +static int mpu_min_region_order(void) +{ + u32 drbar_result, irbar_result; + /* We've kept a region free for this probing */ + rgnr_write(MPU_PROBE_REGION); + isb(); + /* + * As per ARM ARM, write 0xFFFFFFFC to DRBAR to find the minimum + * region order + */ + drbar_write(0xFFFFFFFC); + drbar_result = irbar_result = drbar_read(); + drbar_write(0x0); + /* If the MPU is non-unified, we use the larger of the two minima*/ + if (mpu_iside_independent()) { + irbar_write(0xFFFFFFFC); + irbar_result = irbar_read(); + irbar_write(0x0); + } + isb(); /* Ensure that MPU region operations have completed */ + /* Return whichever result is larger */ + return __ffs(max(drbar_result, irbar_result)); +} + +static int mpu_setup_region(unsigned int number, phys_addr_t start, + unsigned int size_order, unsigned int properties) +{ + u32 size_data; + + /* We kept a region free for probing resolution of MPU regions*/ + if (number > mpu_max_regions() || number == MPU_PROBE_REGION) + return -ENOENT; + + if (size_order > 32) + return -ENOMEM; + + if (size_order < mpu_min_region_order()) + return -ENOMEM; + + /* Writing N to bits 5:1 (RSR_SZ) specifies region size 2^N+1 */ + size_data = ((size_order - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN; + + dsb(); /* Ensure all previous data accesses occur with old mappings */ + rgnr_write(number); + isb(); + drbar_write(start); + dracr_write(properties); + isb(); /* Propagate properties before enabling region */ + drsr_write(size_data); + + /* Check for independent I-side registers */ + if (mpu_iside_independent()) { + irbar_write(start); + iracr_write(properties); + isb(); + irsr_write(size_data); + } + isb(); + + /* Store region info (we treat i/d side the same, so only store d) */ + mpu_rgn_info.rgns[number].dracr = properties; + mpu_rgn_info.rgns[number].drbar = start; + mpu_rgn_info.rgns[number].drsr = size_data; + return 0; +} + +/* +* Set up default MPU regions, doing nothing if there is no MPU +*/ +void __init mpu_setup(void) +{ + int region_err; + if (!mpu_present()) + return; + + region_err = mpu_setup_region(MPU_RAM_REGION, PHYS_OFFSET, + ilog2(meminfo.bank[0].size), + MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL); + if (region_err) { + panic("MPU region initialization failure! %d", region_err); + } else { + pr_info("Using ARMv7 PMSA Compliant MPU. " + "Region independence: %s, Max regions: %d\n", + mpu_iside_independent() ? "Yes" : "No", + mpu_max_regions()); + } +} +#else +static void sanity_check_meminfo_mpu(void) {} +static void __init mpu_setup(void) {} +#endif /* CONFIG_ARM_MPU */ + void __init arm_mm_memblock_reserve(void) { #ifndef CONFIG_CPU_V7M @@ -37,7 +289,9 @@ void __init arm_mm_memblock_reserve(void) void __init sanity_check_meminfo(void) { - phys_addr_t end = bank_phys_end(&meminfo.bank[meminfo.nr_banks - 1]); + phys_addr_t end; + sanity_check_meminfo_mpu(); + end = bank_phys_end(&meminfo.bank[meminfo.nr_banks - 1]); high_memory = __va(end - 1) + 1; } @@ -48,6 +302,7 @@ void __init sanity_check_meminfo(void) void __init paging_init(struct machine_desc *mdesc) { early_trap_init((void *)CONFIG_VECTORS_BASE); + mpu_setup(); bootmem_init(); } @@ -94,16 +349,16 @@ void __iomem *__arm_ioremap_pfn_caller(unsigned long pfn, unsigned long offset, return __arm_ioremap_pfn(pfn, offset, size, mtype); } -void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size, +void __iomem *__arm_ioremap(phys_addr_t phys_addr, size_t size, unsigned int mtype) { return (void __iomem *)phys_addr; } EXPORT_SYMBOL(__arm_ioremap); -void __iomem * (*arch_ioremap_caller)(unsigned long, size_t, unsigned int, void *); +void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *); -void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size, +void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size, unsigned int mtype, void *caller) { return __arm_ioremap(phys_addr, size, mtype); diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 919405e20b80..2d1ef87328a1 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S @@ -140,8 +140,10 @@ ENTRY(cpu_v6_set_pte_ext) ENTRY(cpu_v6_do_suspend) stmfd sp!, {r4 - r9, lr} mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID +#ifdef CONFIG_MMU mrc p15, 0, r5, c3, c0, 0 @ Domain ID mrc p15, 0, r6, c2, c0, 1 @ Translation table base 1 +#endif mrc p15, 0, r7, c1, c0, 1 @ auxiliary control register mrc p15, 0, r8, c1, c0, 2 @ co-processor access control mrc p15, 0, r9, c1, c0, 0 @ control register @@ -158,14 +160,16 @@ ENTRY(cpu_v6_do_resume) mcr p15, 0, ip, c13, c0, 1 @ set reserved context ID ldmia r0, {r4 - r9} mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID +#ifdef CONFIG_MMU mcr p15, 0, r5, c3, c0, 0 @ Domain ID ALT_SMP(orr r1, r1, #TTB_FLAGS_SMP) ALT_UP(orr r1, r1, #TTB_FLAGS_UP) mcr p15, 0, r1, c2, c0, 0 @ Translation table base 0 mcr p15, 0, r6, c2, c0, 1 @ Translation table base 1 + mcr p15, 0, ip, c2, c0, 2 @ TTB control register +#endif mcr p15, 0, r7, c1, c0, 1 @ auxiliary control register mcr p15, 0, r8, c1, c0, 2 @ co-processor access control - mcr p15, 0, ip, c2, c0, 2 @ TTB control register mcr p15, 0, ip, c7, c5, 4 @ ISB mov r0, r9 @ control register b cpu_resume_mmu diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S index 363027e811d6..5ffe1956c6d9 100644 --- a/arch/arm/mm/proc-v7-3level.S +++ b/arch/arm/mm/proc-v7-3level.S @@ -39,6 +39,14 @@ #define TTB_FLAGS_SMP (TTB_IRGN_WBWA|TTB_S|TTB_RGN_OC_WBWA) #define PMD_FLAGS_SMP (PMD_SECT_WBWA|PMD_SECT_S) +#ifndef __ARMEB__ +# define rpgdl r0 +# define rpgdh r1 +#else +# define rpgdl r1 +# define rpgdh r0 +#endif + /* * cpu_v7_switch_mm(pgd_phys, tsk) * @@ -47,10 +55,10 @@ */ ENTRY(cpu_v7_switch_mm) #ifdef CONFIG_MMU - mmid r1, r1 @ get mm->context.id - asid r3, r1 - mov r3, r3, lsl #(48 - 32) @ ASID - mcrr p15, 0, r0, r3, c2 @ set TTB 0 + mmid r2, r2 + asid r2, r2 + orr rpgdh, rpgdh, r2, lsl #(48 - 32) @ upper 32-bits of pgd + mcrr p15, 0, rpgdl, rpgdh, c2 @ set TTB 0 isb #endif mov pc, lr @@ -106,7 +114,8 @@ ENDPROC(cpu_v7_set_pte_ext) */ .macro v7_ttb_setup, zero, ttbr0, ttbr1, tmp ldr \tmp, =swapper_pg_dir @ swapper_pg_dir virtual address - cmp \ttbr1, \tmp @ PHYS_OFFSET > PAGE_OFFSET? (branch below) + mov \tmp, \tmp, lsr #ARCH_PGD_SHIFT + cmp \ttbr1, \tmp @ PHYS_OFFSET > PAGE_OFFSET? mrc p15, 0, \tmp, c2, c0, 2 @ TTB control register orr \tmp, \tmp, #TTB_EAE ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP) @@ -114,27 +123,21 @@ ENDPROC(cpu_v7_set_pte_ext) ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP << 16) ALT_UP(orr \tmp, \tmp, #TTB_FLAGS_UP << 16) /* - * TTBR0/TTBR1 split (PAGE_OFFSET): - * 0x40000000: T0SZ = 2, T1SZ = 0 (not used) - * 0x80000000: T0SZ = 0, T1SZ = 1 - * 0xc0000000: T0SZ = 0, T1SZ = 2 - * - * Only use this feature if PHYS_OFFSET <= PAGE_OFFSET, otherwise - * booting secondary CPUs would end up using TTBR1 for the identity - * mapping set up in TTBR0. + * Only use split TTBRs if PHYS_OFFSET <= PAGE_OFFSET (cmp above), + * otherwise booting secondary CPUs would end up using TTBR1 for the + * identity mapping set up in TTBR0. */ - bhi 9001f @ PHYS_OFFSET > PAGE_OFFSET? - orr \tmp, \tmp, #(((PAGE_OFFSET >> 30) - 1) << 16) @ TTBCR.T1SZ -#if defined CONFIG_VMSPLIT_2G - /* PAGE_OFFSET == 0x80000000, T1SZ == 1 */ - add \ttbr1, \ttbr1, #1 << 4 @ skip two L1 entries -#elif defined CONFIG_VMSPLIT_3G - /* PAGE_OFFSET == 0xc0000000, T1SZ == 2 */ - add \ttbr1, \ttbr1, #4096 * (1 + 3) @ only L2 used, skip pgd+3*pmd -#endif - /* CONFIG_VMSPLIT_1G does not need TTBR1 adjustment */ -9001: mcr p15, 0, \tmp, c2, c0, 2 @ TTB control register - mcrr p15, 1, \ttbr1, \zero, c2 @ load TTBR1 + orrls \tmp, \tmp, #TTBR1_SIZE @ TTBCR.T1SZ + mcr p15, 0, \tmp, c2, c0, 2 @ TTBCR + mov \tmp, \ttbr1, lsr #(32 - ARCH_PGD_SHIFT) @ upper bits + mov \ttbr1, \ttbr1, lsl #ARCH_PGD_SHIFT @ lower bits + addls \ttbr1, \ttbr1, #TTBR1_OFFSET + mcrr p15, 1, \ttbr1, \zero, c2 @ load TTBR1 + mov \tmp, \ttbr0, lsr #(32 - ARCH_PGD_SHIFT) @ upper bits + mov \ttbr0, \ttbr0, lsl #ARCH_PGD_SHIFT @ lower bits + mcrr p15, 0, \ttbr0, \zero, c2 @ load TTBR0 + mcrr p15, 1, \ttbr1, \zero, c2 @ load TTBR1 + mcrr p15, 0, \ttbr0, \zero, c2 @ load TTBR0 .endm __CPUINIT diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index e35fec34453e..7ef3ad05df39 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -98,9 +98,11 @@ ENTRY(cpu_v7_do_suspend) mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID mrc p15, 0, r5, c13, c0, 3 @ User r/o thread ID stmia r0!, {r4 - r5} +#ifdef CONFIG_MMU mrc p15, 0, r6, c3, c0, 0 @ Domain ID mrc p15, 0, r7, c2, c0, 1 @ TTB 1 mrc p15, 0, r11, c2, c0, 2 @ TTB control register +#endif mrc p15, 0, r8, c1, c0, 0 @ Control register mrc p15, 0, r9, c1, c0, 1 @ Auxiliary control register mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control @@ -110,13 +112,14 @@ ENDPROC(cpu_v7_do_suspend) ENTRY(cpu_v7_do_resume) mov ip, #0 - mcr p15, 0, ip, c8, c7, 0 @ invalidate TLBs mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache mcr p15, 0, ip, c13, c0, 1 @ set reserved context ID ldmia r0!, {r4 - r5} mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID mcr p15, 0, r5, c13, c0, 3 @ User r/o thread ID ldmia r0, {r6 - r11} +#ifdef CONFIG_MMU + mcr p15, 0, ip, c8, c7, 0 @ invalidate TLBs mcr p15, 0, r6, c3, c0, 0 @ Domain ID #ifndef CONFIG_ARM_LPAE ALT_SMP(orr r1, r1, #TTB_FLAGS_SMP) @@ -125,14 +128,15 @@ ENTRY(cpu_v7_do_resume) mcr p15, 0, r1, c2, c0, 0 @ TTB 0 mcr p15, 0, r7, c2, c0, 1 @ TTB 1 mcr p15, 0, r11, c2, c0, 2 @ TTB control register - mrc p15, 0, r4, c1, c0, 1 @ Read Auxiliary control register - teq r4, r9 @ Is it already set? - mcrne p15, 0, r9, c1, c0, 1 @ No, so write it - mcr p15, 0, r10, c1, c0, 2 @ Co-processor access control ldr r4, =PRRR @ PRRR ldr r5, =NMRR @ NMRR mcr p15, 0, r4, c10, c2, 0 @ write PRRR mcr p15, 0, r5, c10, c2, 1 @ write NMRR +#endif /* CONFIG_MMU */ + mrc p15, 0, r4, c1, c0, 1 @ Read Auxiliary control register + teq r4, r9 @ Is it already set? + mcrne p15, 0, r9, c1, c0, 1 @ No, so write it + mcr p15, 0, r10, c1, c0, 2 @ Co-processor access control isb dsb mov r0, r8 @ control register @@ -178,7 +182,8 @@ ENDPROC(cpu_pj4b_do_idle) */ __v7_ca5mp_setup: __v7_ca9mp_setup: - mov r10, #(1 << 0) @ TLB ops broadcasting +__v7_cr7mp_setup: + mov r10, #(1 << 0) @ Cache/TLB ops broadcasting b 1f __v7_ca7mp_setup: __v7_ca15mp_setup: @@ -443,6 +448,16 @@ __v7_pj4b_proc_info: #endif /* + * ARM Ltd. Cortex R7 processor. + */ + .type __v7_cr7mp_proc_info, #object +__v7_cr7mp_proc_info: + .long 0x410fc170 + .long 0xff0ffff0 + __v7_proc __v7_cr7mp_setup + .size __v7_cr7mp_proc_info, . - __v7_cr7mp_proc_info + + /* * ARM Ltd. Cortex A7 processor. */ .type __v7_ca7mp_proc_info, #object diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 1a643ee8e082..f50d223a0bd3 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -900,8 +900,7 @@ void bpf_jit_compile(struct sk_filter *fp) #endif alloc_size = 4 * ctx.idx; - ctx.target = module_alloc(max(sizeof(struct work_struct), - alloc_size)); + ctx.target = module_alloc(alloc_size); if (unlikely(ctx.target == NULL)) goto out; @@ -927,19 +926,8 @@ out: return; } -static void bpf_jit_free_worker(struct work_struct *work) -{ - module_free(NULL, work); -} - void bpf_jit_free(struct sk_filter *fp) { - struct work_struct *work; - - if (fp->bpf_func != sk_run_filter) { - work = (struct work_struct *)fp->bpf_func; - - INIT_WORK(work, bpf_jit_free_worker); - schedule_work(work); - } + if (fp->bpf_func != sk_run_filter) + module_free(NULL, fp->bpf_func); } diff --git a/arch/arm/plat-iop/adma.c b/arch/arm/plat-iop/adma.c index 1ff6a37e893c..a4d1f8de3b5b 100644 --- a/arch/arm/plat-iop/adma.c +++ b/arch/arm/plat-iop/adma.c @@ -192,12 +192,10 @@ static int __init iop3xx_adma_cap_init(void) #ifdef CONFIG_ARCH_IOP32X /* the 32x AAU does not perform zero sum */ dma_cap_set(DMA_XOR, iop3xx_aau_data.cap_mask); - dma_cap_set(DMA_MEMSET, iop3xx_aau_data.cap_mask); dma_cap_set(DMA_INTERRUPT, iop3xx_aau_data.cap_mask); #else dma_cap_set(DMA_XOR, iop3xx_aau_data.cap_mask); dma_cap_set(DMA_XOR_VAL, iop3xx_aau_data.cap_mask); - dma_cap_set(DMA_MEMSET, iop3xx_aau_data.cap_mask); dma_cap_set(DMA_INTERRUPT, iop3xx_aau_data.cap_mask); #endif diff --git a/arch/arm/plat-iop/gpio.c b/arch/arm/plat-iop/gpio.c index e4de9be78feb..697de6dc4936 100644 --- a/arch/arm/plat-iop/gpio.c +++ b/arch/arm/plat-iop/gpio.c @@ -17,6 +17,7 @@ #include <linux/gpio.h> #include <linux/export.h> #include <asm/hardware/iop3xx.h> +#include <mach/gpio.h> void gpio_line_config(int line, int direction) { diff --git a/arch/arm/plat-iop/restart.c b/arch/arm/plat-iop/restart.c index 33fa699a4d28..3a4d5e5fde52 100644 --- a/arch/arm/plat-iop/restart.c +++ b/arch/arm/plat-iop/restart.c @@ -11,7 +11,7 @@ #include <asm/system_misc.h> #include <mach/hardware.h> -void iop3xx_restart(char mode, const char *cmd) +void iop3xx_restart(enum reboot_mode mode, const char *cmd) { *IOP3XX_PCSR = 0x30; diff --git a/arch/arm/plat-iop/time.c b/arch/arm/plat-iop/time.c index 837a2d52e9db..29606bd75f3f 100644 --- a/arch/arm/plat-iop/time.c +++ b/arch/arm/plat-iop/time.c @@ -22,9 +22,9 @@ #include <linux/clocksource.h> #include <linux/clockchips.h> #include <linux/export.h> +#include <linux/sched_clock.h> #include <mach/hardware.h> #include <asm/irq.h> -#include <asm/sched_clock.h> #include <asm/uaccess.h> #include <asm/mach/irq.h> #include <asm/mach/time.h> diff --git a/arch/arm/plat-omap/counter_32k.c b/arch/arm/plat-omap/counter_32k.c index 5b0b86bb34bb..d9bc98eb2a6b 100644 --- a/arch/arm/plat-omap/counter_32k.c +++ b/arch/arm/plat-omap/counter_32k.c @@ -18,9 +18,9 @@ #include <linux/err.h> #include <linux/io.h> #include <linux/clocksource.h> +#include <linux/sched_clock.h> #include <asm/mach/time.h> -#include <asm/sched_clock.h> #include <plat/counter-32k.h> diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c index c019b7aaf776..c66d163d7a2a 100644 --- a/arch/arm/plat-orion/common.c +++ b/arch/arm/plat-orion/common.c @@ -666,14 +666,9 @@ void __init orion_xor0_init(unsigned long mapbase_low, orion_xor0_shared_resources[3].start = irq_1; orion_xor0_shared_resources[3].end = irq_1; - /* - * two engines can't do memset simultaneously, this limitation - * satisfied by removing memset support from one of the engines. - */ dma_cap_set(DMA_MEMCPY, orion_xor0_channels_data[0].cap_mask); dma_cap_set(DMA_XOR, orion_xor0_channels_data[0].cap_mask); - dma_cap_set(DMA_MEMSET, orion_xor0_channels_data[1].cap_mask); dma_cap_set(DMA_MEMCPY, orion_xor0_channels_data[1].cap_mask); dma_cap_set(DMA_XOR, orion_xor0_channels_data[1].cap_mask); @@ -732,14 +727,9 @@ void __init orion_xor1_init(unsigned long mapbase_low, orion_xor1_shared_resources[3].start = irq_1; orion_xor1_shared_resources[3].end = irq_1; - /* - * two engines can't do memset simultaneously, this limitation - * satisfied by removing memset support from one of the engines. - */ dma_cap_set(DMA_MEMCPY, orion_xor1_channels_data[0].cap_mask); dma_cap_set(DMA_XOR, orion_xor1_channels_data[0].cap_mask); - dma_cap_set(DMA_MEMSET, orion_xor1_channels_data[1].cap_mask); dma_cap_set(DMA_MEMCPY, orion_xor1_channels_data[1].cap_mask); dma_cap_set(DMA_XOR, orion_xor1_channels_data[1].cap_mask); diff --git a/arch/arm/plat-orion/time.c b/arch/arm/plat-orion/time.c index 5d5ac0f05422..9d2b2ac74938 100644 --- a/arch/arm/plat-orion/time.c +++ b/arch/arm/plat-orion/time.c @@ -16,7 +16,7 @@ #include <linux/clockchips.h> #include <linux/interrupt.h> #include <linux/irq.h> -#include <asm/sched_clock.h> +#include <linux/sched_clock.h> /* * MBus bridge block registers. diff --git a/arch/arm/plat-samsung/include/plat/cpu-freq-core.h b/arch/arm/plat-samsung/include/plat/cpu-freq-core.h index d7e17150028a..7231c8e4975e 100644 --- a/arch/arm/plat-samsung/include/plat/cpu-freq-core.h +++ b/arch/arm/plat-samsung/include/plat/cpu-freq-core.h @@ -285,7 +285,7 @@ static inline int s3c_cpufreq_addfreq(struct cpufreq_frequency_table *table, s3c_freq_dbg("%s: { %d = %u kHz }\n", __func__, index, freq); - table[index].index = index; + table[index].driver_data = index; table[index].frequency = freq; } diff --git a/arch/arm/plat-samsung/samsung-time.c b/arch/arm/plat-samsung/samsung-time.c index f899cbc9b288..2957075ca836 100644 --- a/arch/arm/plat-samsung/samsung-time.c +++ b/arch/arm/plat-samsung/samsung-time.c @@ -15,12 +15,12 @@ #include <linux/clk.h> #include <linux/clockchips.h> #include <linux/platform_device.h> +#include <linux/sched_clock.h> #include <asm/smp_twd.h> #include <asm/mach/time.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> -#include <asm/sched_clock.h> #include <mach/map.h> #include <plat/devs.h> diff --git a/arch/arm/plat-versatile/headsmp.S b/arch/arm/plat-versatile/headsmp.S index b178d44e9eaa..2677bc3762d7 100644 --- a/arch/arm/plat-versatile/headsmp.S +++ b/arch/arm/plat-versatile/headsmp.S @@ -11,8 +11,6 @@ #include <linux/linkage.h> #include <linux/init.h> - __INIT - /* * Realview/Versatile Express specific entry point for secondary CPUs. * This provides a "holding pen" into which all secondary cores are held diff --git a/arch/arm/plat-versatile/sched-clock.c b/arch/arm/plat-versatile/sched-clock.c index b33b74c87232..51b109e3b6c3 100644 --- a/arch/arm/plat-versatile/sched-clock.c +++ b/arch/arm/plat-versatile/sched-clock.c @@ -20,8 +20,8 @@ */ #include <linux/kernel.h> #include <linux/io.h> +#include <linux/sched_clock.h> -#include <asm/sched_clock.h> #include <plat/sched_clock.h> static void __iomem *ctr; diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index 13609e01f4b7..f71c37edca26 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c @@ -314,4 +314,5 @@ EXPORT_SYMBOL_GPL(HYPERVISOR_hvm_op); EXPORT_SYMBOL_GPL(HYPERVISOR_memory_op); EXPORT_SYMBOL_GPL(HYPERVISOR_physdev_op); EXPORT_SYMBOL_GPL(HYPERVISOR_vcpu_op); +EXPORT_SYMBOL_GPL(HYPERVISOR_tmem_op); EXPORT_SYMBOL_GPL(privcmd_call); diff --git a/arch/arm/xen/hypercall.S b/arch/arm/xen/hypercall.S index 199cb2da7663..d1cf7b7c2200 100644 --- a/arch/arm/xen/hypercall.S +++ b/arch/arm/xen/hypercall.S @@ -88,6 +88,7 @@ HYPERCALL2(hvm_op); HYPERCALL2(memory_op); HYPERCALL2(physdev_op); HYPERCALL3(vcpu_op); +HYPERCALL1(tmem_op); ENTRY(privcmd_call) stmdb sp!, {r4} diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 56b3f6d447ae..9737e97f9f38 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -7,6 +7,7 @@ config ARM64 select ARM_AMBA select ARM_ARCH_TIMER select ARM_GIC + select BUILDTIME_EXTABLE_SORT select CLONE_BACKWARDS select COMMON_CLK select GENERIC_CLOCKEVENTS @@ -111,6 +112,11 @@ config ARCH_VEXPRESS This enables support for the ARMv8 software model (Versatile Express). +config ARCH_XGENE + bool "AppliedMicro X-Gene SOC Family" + help + This enables support for AppliedMicro X-Gene SOC Family + endmenu menu "Bus support" @@ -148,6 +154,8 @@ config NR_CPUS int "Maximum number of CPUs (2-32)" range 2 32 depends on SMP + # These have to remain sorted largest to smallest + default "8" if ARCH_XGENE default "4" source kernel/Kconfig.preempt @@ -180,8 +188,35 @@ config HW_PERF_EVENTS Enable hardware performance counter support for perf events. If disabled, perf events will use software events only. +config SYS_SUPPORTS_HUGETLBFS + def_bool y + +config ARCH_WANT_GENERAL_HUGETLB + def_bool y + +config ARCH_WANT_HUGE_PMD_SHARE + def_bool y if !ARM64_64K_PAGES + +config HAVE_ARCH_TRANSPARENT_HUGEPAGE + def_bool y + source "mm/Kconfig" +config XEN_DOM0 + def_bool y + depends on XEN + +config XEN + bool "Xen guest support on ARM64 (EXPERIMENTAL)" + depends on ARM64 && OF + help + Say Y if you want to run Linux in a Virtual Machine on Xen on ARM64. + +config FORCE_MAX_ZONEORDER + int + default "14" if (ARM64_64K_PAGES && TRANSPARENT_HUGEPAGE) + default "11" + endmenu menu "Boot options" @@ -235,6 +270,8 @@ source "drivers/Kconfig" source "fs/Kconfig" +source "arch/arm64/kvm/Kconfig" + source "arch/arm64/Kconfig.debug" source "security/Kconfig" diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index c95c5cb212fd..d90cf79f233a 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -37,6 +37,8 @@ TEXT_OFFSET := 0x00080000 export TEXT_OFFSET GZFLAGS core-y += arch/arm64/kernel/ arch/arm64/mm/ +core-$(CONFIG_KVM) += arch/arm64/kvm/ +core-$(CONFIG_XEN) += arch/arm64/xen/ libs-y := arch/arm64/lib/ $(libs-y) libs-y += $(LIBGCC) @@ -60,6 +62,10 @@ zinstall install: vmlinux dtbs: scripts $(Q)$(MAKE) $(build)=$(boot)/dts dtbs +PHONY += vdso_install +vdso_install: + $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso $@ + # We use MRPROPER_FILES and CLEAN_FILES now archclean: $(Q)$(MAKE) $(clean)=$(boot) diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile index 68457e9e0975..c52bdb051f66 100644 --- a/arch/arm64/boot/dts/Makefile +++ b/arch/arm64/boot/dts/Makefile @@ -1,4 +1,5 @@ dtb-$(CONFIG_ARCH_VEXPRESS) += rtsm_ve-aemv8a.dtb foundation-v8.dtb +dtb-$(CONFIG_ARCH_XGENE) += apm-mustang.dtb targets += dtbs targets += $(dtb-y) diff --git a/arch/arm64/boot/dts/apm-mustang.dts b/arch/arm64/boot/dts/apm-mustang.dts new file mode 100644 index 000000000000..1247ca1200b1 --- /dev/null +++ b/arch/arm64/boot/dts/apm-mustang.dts @@ -0,0 +1,26 @@ +/* + * dts file for AppliedMicro (APM) Mustang Board + * + * Copyright (C) 2013, Applied Micro Circuits Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + */ + +/dts-v1/; + +/include/ "apm-storm.dtsi" + +/ { + model = "APM X-Gene Mustang board"; + compatible = "apm,mustang", "apm,xgene-storm"; + + chosen { }; + + memory { + device_type = "memory"; + reg = < 0x1 0x00000000 0x0 0x80000000 >; /* Updated by bootloader */ + }; +}; diff --git a/arch/arm64/boot/dts/apm-storm.dtsi b/arch/arm64/boot/dts/apm-storm.dtsi new file mode 100644 index 000000000000..bfdc57834929 --- /dev/null +++ b/arch/arm64/boot/dts/apm-storm.dtsi @@ -0,0 +1,116 @@ +/* + * dts file for AppliedMicro (APM) X-Gene Storm SOC + * + * Copyright (C) 2013, Applied Micro Circuits Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + */ + +/ { + compatible = "apm,xgene-storm"; + interrupt-parent = <&gic>; + #address-cells = <2>; + #size-cells = <2>; + + cpus { + #address-cells = <2>; + #size-cells = <0>; + + cpu@000 { + device_type = "cpu"; + compatible = "apm,potenza", "arm,armv8"; + reg = <0x0 0x000>; + enable-method = "spin-table"; + cpu-release-addr = <0x1 0x0000fff8>; + }; + cpu@001 { + device_type = "cpu"; + compatible = "apm,potenza", "arm,armv8"; + reg = <0x0 0x001>; + enable-method = "spin-table"; + cpu-release-addr = <0x1 0x0000fff8>; + }; + cpu@100 { + device_type = "cpu"; + compatible = "apm,potenza", "arm,armv8"; + reg = <0x0 0x100>; + enable-method = "spin-table"; + cpu-release-addr = <0x1 0x0000fff8>; + }; + cpu@101 { + device_type = "cpu"; + compatible = "apm,potenza", "arm,armv8"; + reg = <0x0 0x101>; + enable-method = "spin-table"; + cpu-release-addr = <0x1 0x0000fff8>; + }; + cpu@200 { + device_type = "cpu"; + compatible = "apm,potenza", "arm,armv8"; + reg = <0x0 0x200>; + enable-method = "spin-table"; + cpu-release-addr = <0x1 0x0000fff8>; + }; + cpu@201 { + device_type = "cpu"; + compatible = "apm,potenza", "arm,armv8"; + reg = <0x0 0x201>; + enable-method = "spin-table"; + cpu-release-addr = <0x1 0x0000fff8>; + }; + cpu@300 { + device_type = "cpu"; + compatible = "apm,potenza", "arm,armv8"; + reg = <0x0 0x300>; + enable-method = "spin-table"; + cpu-release-addr = <0x1 0x0000fff8>; + }; + cpu@301 { + device_type = "cpu"; + compatible = "apm,potenza", "arm,armv8"; + reg = <0x0 0x301>; + enable-method = "spin-table"; + cpu-release-addr = <0x1 0x0000fff8>; + }; + }; + + gic: interrupt-controller@78010000 { + compatible = "arm,cortex-a15-gic"; + #interrupt-cells = <3>; + interrupt-controller; + reg = <0x0 0x78010000 0x0 0x1000>, /* GIC Dist */ + <0x0 0x78020000 0x0 0x1000>, /* GIC CPU */ + <0x0 0x78040000 0x0 0x2000>, /* GIC VCPU Control */ + <0x0 0x78060000 0x0 0x2000>; /* GIC VCPU */ + interrupts = <1 9 0xf04>; /* GIC Maintenence IRQ */ + }; + + timer { + compatible = "arm,armv8-timer"; + interrupts = <1 0 0xff01>, /* Secure Phys IRQ */ + <1 13 0xff01>, /* Non-secure Phys IRQ */ + <1 14 0xff01>, /* Virt IRQ */ + <1 15 0xff01>; /* Hyp IRQ */ + clock-frequency = <50000000>; + }; + + soc { + compatible = "simple-bus"; + #address-cells = <2>; + #size-cells = <2>; + ranges; + + serial0: serial@1c020000 { + device_type = "serial"; + compatible = "ns16550"; + reg = <0 0x1c020000 0x0 0x1000>; + reg-shift = <2>; + clock-frequency = <10000000>; /* Updated by bootloader */ + interrupt-parent = <&gic>; + interrupts = <0x0 0x4c 0x4>; + }; + }; +}; diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 8d9696adb440..5b3e83217b03 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -24,6 +24,7 @@ CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set # CONFIG_IOSCHED_DEADLINE is not set CONFIG_ARCH_VEXPRESS=y +CONFIG_ARCH_XGENE=y CONFIG_SMP=y CONFIG_PREEMPT_VOLUNTARY=y CONFIG_CMDLINE="console=ttyAMA0" @@ -54,6 +55,9 @@ CONFIG_INPUT_EVDEV=y # CONFIG_SERIO_I8042 is not set # CONFIG_SERIO_SERPORT is not set CONFIG_LEGACY_PTY_COUNT=16 +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_OF_PLATFORM=y CONFIG_SERIAL_AMBA_PL011=y CONFIG_SERIAL_AMBA_PL011_CONSOLE=y # CONFIG_HW_RANDOM is not set diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h index bf6ab242f047..d56ed11ba9a3 100644 --- a/arch/arm64/include/asm/arch_timer.h +++ b/arch/arm64/include/asm/arch_timer.h @@ -110,16 +110,6 @@ static inline void __cpuinit arch_counter_set_user_access(void) asm volatile("msr cntkctl_el1, %0" : : "r" (cntkctl)); } -static inline u64 arch_counter_get_cntpct(void) -{ - u64 cval; - - isb(); - asm volatile("mrs %0, cntpct_el0" : "=r" (cval)); - - return cval; -} - static inline u64 arch_counter_get_cntvct(void) { u64 cval; diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index 3300cbd18a89..fea9ee327206 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -123,9 +123,6 @@ static inline void __flush_icache_all(void) #define flush_dcache_mmap_unlock(mapping) \ spin_unlock_irq(&(mapping)->tree_lock) -#define flush_icache_user_range(vma,page,addr,len) \ - flush_dcache_page(page) - /* * We don't appear to need to do anything here. In fact, if we did, we'd * duplicate cache flushing elsewhere performed by flush_dcache_page(). diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index cf2749488cd4..5fe138e0b828 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -37,11 +37,14 @@ }) #define ARM_CPU_IMP_ARM 0x41 +#define ARM_CPU_IMP_APM 0x50 #define ARM_CPU_PART_AEM_V8 0xD0F0 #define ARM_CPU_PART_FOUNDATION 0xD000 #define ARM_CPU_PART_CORTEX_A57 0xD070 +#define APM_CPU_PART_POTENZA 0x0000 + #ifndef __ASSEMBLY__ /* diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h index 7eaa0b302493..ef8235c68c09 100644 --- a/arch/arm64/include/asm/debug-monitors.h +++ b/arch/arm64/include/asm/debug-monitors.h @@ -83,6 +83,15 @@ static inline int reinstall_suspended_bps(struct pt_regs *regs) } #endif +#ifdef CONFIG_COMPAT +int aarch32_break_handler(struct pt_regs *regs); +#else +static int aarch32_break_handler(struct pt_regs *regs) +{ + return -EFAULT; +} +#endif + #endif /* __ASSEMBLY */ #endif /* __KERNEL__ */ #endif /* __ASM_DEBUG_MONITORS_H */ diff --git a/arch/arm64/include/asm/device.h b/arch/arm64/include/asm/device.h index 0d8453c755a8..cf98b362094b 100644 --- a/arch/arm64/include/asm/device.h +++ b/arch/arm64/include/asm/device.h @@ -18,6 +18,9 @@ struct dev_archdata { struct dma_map_ops *dma_ops; +#ifdef CONFIG_IOMMU_API + void *iommu; /* private IOMMU data */ +#endif }; struct pdev_archdata { diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index 994776894198..8d1810001aef 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -81,8 +81,12 @@ static inline void dma_mark_clean(void *addr, size_t size) { } -static inline void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flags) +#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) +#define dma_free_coherent(d, s, h, f) dma_free_attrs(d, s, h, f, NULL) + +static inline void *dma_alloc_attrs(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flags, + struct dma_attrs *attrs) { struct dma_map_ops *ops = get_dma_ops(dev); void *vaddr; @@ -90,13 +94,14 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size, if (dma_alloc_from_coherent(dev, size, dma_handle, &vaddr)) return vaddr; - vaddr = ops->alloc(dev, size, dma_handle, flags, NULL); + vaddr = ops->alloc(dev, size, dma_handle, flags, attrs); debug_dma_alloc_coherent(dev, size, *dma_handle, vaddr); return vaddr; } -static inline void dma_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dev_addr) +static inline void dma_free_attrs(struct device *dev, size_t size, + void *vaddr, dma_addr_t dev_addr, + struct dma_attrs *attrs) { struct dma_map_ops *ops = get_dma_ops(dev); @@ -104,7 +109,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size, return; debug_dma_free_coherent(dev, size, vaddr, dev_addr); - ops->free(dev, size, vaddr, dev_addr, NULL); + ops->free(dev, size, vaddr, dev_addr, attrs); } /* diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h new file mode 100644 index 000000000000..5b7ca8ace95f --- /dev/null +++ b/arch/arm64/include/asm/hugetlb.h @@ -0,0 +1,117 @@ +/* + * arch/arm64/include/asm/hugetlb.h + * + * Copyright (C) 2013 Linaro Ltd. + * + * Based on arch/x86/include/asm/hugetlb.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __ASM_HUGETLB_H +#define __ASM_HUGETLB_H + +#include <asm-generic/hugetlb.h> +#include <asm/page.h> + +static inline pte_t huge_ptep_get(pte_t *ptep) +{ + return *ptep; +} + +static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte) +{ + set_pte_at(mm, addr, ptep, pte); +} + +static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ + ptep_clear_flush(vma, addr, ptep); +} + +static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + ptep_set_wrprotect(mm, addr, ptep); +} + +static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + return ptep_get_and_clear(mm, addr, ptep); +} + +static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t pte, int dirty) +{ + return ptep_set_access_flags(vma, addr, ptep, pte, dirty); +} + +static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, + unsigned long addr, unsigned long end, + unsigned long floor, + unsigned long ceiling) +{ + free_pgd_range(tlb, addr, end, floor, ceiling); +} + +static inline int is_hugepage_only_range(struct mm_struct *mm, + unsigned long addr, unsigned long len) +{ + return 0; +} + +static inline int prepare_hugepage_range(struct file *file, + unsigned long addr, unsigned long len) +{ + struct hstate *h = hstate_file(file); + if (len & ~huge_page_mask(h)) + return -EINVAL; + if (addr & ~huge_page_mask(h)) + return -EINVAL; + return 0; +} + +static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) +{ +} + +static inline int huge_pte_none(pte_t pte) +{ + return pte_none(pte); +} + +static inline pte_t huge_pte_wrprotect(pte_t pte) +{ + return pte_wrprotect(pte); +} + +static inline int arch_prepare_hugepage(struct page *page) +{ + return 0; +} + +static inline void arch_release_hugepage(struct page *page) +{ +} + +static inline void arch_clear_hugepage_flags(struct page *page) +{ + clear_bit(PG_dcache_clean, &page->flags); +} + +#endif /* __ASM_HUGETLB_H */ diff --git a/arch/arm64/include/asm/hypervisor.h b/arch/arm64/include/asm/hypervisor.h new file mode 100644 index 000000000000..d2c79049ff11 --- /dev/null +++ b/arch/arm64/include/asm/hypervisor.h @@ -0,0 +1,6 @@ +#ifndef _ASM_ARM64_HYPERVISOR_H +#define _ASM_ARM64_HYPERVISOR_H + +#include <asm/xen/hypervisor.h> + +#endif diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 2e12258aa7e4..1d12f89140ba 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -228,10 +228,12 @@ extern void __iounmap(volatile void __iomem *addr); #define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY) #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC)) +#define PROT_NORMAL (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL)) #define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) #define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) #define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC)) +#define ioremap_cached(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL)) #define iounmap __iounmap #define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF) diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h new file mode 100644 index 000000000000..a5f28e2720c7 --- /dev/null +++ b/arch/arm64/include/asm/kvm_arm.h @@ -0,0 +1,245 @@ +/* + * Copyright (C) 2012,2013 - ARM Ltd + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __ARM64_KVM_ARM_H__ +#define __ARM64_KVM_ARM_H__ + +#include <asm/types.h> + +/* Hyp Configuration Register (HCR) bits */ +#define HCR_ID (UL(1) << 33) +#define HCR_CD (UL(1) << 32) +#define HCR_RW_SHIFT 31 +#define HCR_RW (UL(1) << HCR_RW_SHIFT) +#define HCR_TRVM (UL(1) << 30) +#define HCR_HCD (UL(1) << 29) +#define HCR_TDZ (UL(1) << 28) +#define HCR_TGE (UL(1) << 27) +#define HCR_TVM (UL(1) << 26) +#define HCR_TTLB (UL(1) << 25) +#define HCR_TPU (UL(1) << 24) +#define HCR_TPC (UL(1) << 23) +#define HCR_TSW (UL(1) << 22) +#define HCR_TAC (UL(1) << 21) +#define HCR_TIDCP (UL(1) << 20) +#define HCR_TSC (UL(1) << 19) +#define HCR_TID3 (UL(1) << 18) +#define HCR_TID2 (UL(1) << 17) +#define HCR_TID1 (UL(1) << 16) +#define HCR_TID0 (UL(1) << 15) +#define HCR_TWE (UL(1) << 14) +#define HCR_TWI (UL(1) << 13) +#define HCR_DC (UL(1) << 12) +#define HCR_BSU (3 << 10) +#define HCR_BSU_IS (UL(1) << 10) +#define HCR_FB (UL(1) << 9) +#define HCR_VA (UL(1) << 8) +#define HCR_VI (UL(1) << 7) +#define HCR_VF (UL(1) << 6) +#define HCR_AMO (UL(1) << 5) +#define HCR_IMO (UL(1) << 4) +#define HCR_FMO (UL(1) << 3) +#define HCR_PTW (UL(1) << 2) +#define HCR_SWIO (UL(1) << 1) +#define HCR_VM (UL(1) << 0) + +/* + * The bits we set in HCR: + * RW: 64bit by default, can be overriden for 32bit VMs + * TAC: Trap ACTLR + * TSC: Trap SMC + * TSW: Trap cache operations by set/way + * TWI: Trap WFI + * TIDCP: Trap L2CTLR/L2ECTLR + * BSU_IS: Upgrade barriers to the inner shareable domain + * FB: Force broadcast of all maintainance operations + * AMO: Override CPSR.A and enable signaling with VA + * IMO: Override CPSR.I and enable signaling with VI + * FMO: Override CPSR.F and enable signaling with VF + * SWIO: Turn set/way invalidates into set/way clean+invalidate + */ +#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \ + HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \ + HCR_SWIO | HCR_TIDCP | HCR_RW) +#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF) + +/* Hyp System Control Register (SCTLR_EL2) bits */ +#define SCTLR_EL2_EE (1 << 25) +#define SCTLR_EL2_WXN (1 << 19) +#define SCTLR_EL2_I (1 << 12) +#define SCTLR_EL2_SA (1 << 3) +#define SCTLR_EL2_C (1 << 2) +#define SCTLR_EL2_A (1 << 1) +#define SCTLR_EL2_M 1 +#define SCTLR_EL2_FLAGS (SCTLR_EL2_M | SCTLR_EL2_A | SCTLR_EL2_C | \ + SCTLR_EL2_SA | SCTLR_EL2_I) + +/* TCR_EL2 Registers bits */ +#define TCR_EL2_TBI (1 << 20) +#define TCR_EL2_PS (7 << 16) +#define TCR_EL2_PS_40B (2 << 16) +#define TCR_EL2_TG0 (1 << 14) +#define TCR_EL2_SH0 (3 << 12) +#define TCR_EL2_ORGN0 (3 << 10) +#define TCR_EL2_IRGN0 (3 << 8) +#define TCR_EL2_T0SZ 0x3f +#define TCR_EL2_MASK (TCR_EL2_TG0 | TCR_EL2_SH0 | \ + TCR_EL2_ORGN0 | TCR_EL2_IRGN0 | TCR_EL2_T0SZ) + +#define TCR_EL2_FLAGS (TCR_EL2_PS_40B) + +/* VTCR_EL2 Registers bits */ +#define VTCR_EL2_PS_MASK (7 << 16) +#define VTCR_EL2_PS_40B (2 << 16) +#define VTCR_EL2_TG0_MASK (1 << 14) +#define VTCR_EL2_TG0_4K (0 << 14) +#define VTCR_EL2_TG0_64K (1 << 14) +#define VTCR_EL2_SH0_MASK (3 << 12) +#define VTCR_EL2_SH0_INNER (3 << 12) +#define VTCR_EL2_ORGN0_MASK (3 << 10) +#define VTCR_EL2_ORGN0_WBWA (1 << 10) +#define VTCR_EL2_IRGN0_MASK (3 << 8) +#define VTCR_EL2_IRGN0_WBWA (1 << 8) +#define VTCR_EL2_SL0_MASK (3 << 6) +#define VTCR_EL2_SL0_LVL1 (1 << 6) +#define VTCR_EL2_T0SZ_MASK 0x3f +#define VTCR_EL2_T0SZ_40B 24 + +#ifdef CONFIG_ARM64_64K_PAGES +/* + * Stage2 translation configuration: + * 40bits output (PS = 2) + * 40bits input (T0SZ = 24) + * 64kB pages (TG0 = 1) + * 2 level page tables (SL = 1) + */ +#define VTCR_EL2_FLAGS (VTCR_EL2_PS_40B | VTCR_EL2_TG0_64K | \ + VTCR_EL2_SH0_INNER | VTCR_EL2_ORGN0_WBWA | \ + VTCR_EL2_IRGN0_WBWA | VTCR_EL2_SL0_LVL1 | \ + VTCR_EL2_T0SZ_40B) +#define VTTBR_X (38 - VTCR_EL2_T0SZ_40B) +#else +/* + * Stage2 translation configuration: + * 40bits output (PS = 2) + * 40bits input (T0SZ = 24) + * 4kB pages (TG0 = 0) + * 3 level page tables (SL = 1) + */ +#define VTCR_EL2_FLAGS (VTCR_EL2_PS_40B | VTCR_EL2_TG0_4K | \ + VTCR_EL2_SH0_INNER | VTCR_EL2_ORGN0_WBWA | \ + VTCR_EL2_IRGN0_WBWA | VTCR_EL2_SL0_LVL1 | \ + VTCR_EL2_T0SZ_40B) +#define VTTBR_X (37 - VTCR_EL2_T0SZ_40B) +#endif + +#define VTTBR_BADDR_SHIFT (VTTBR_X - 1) +#define VTTBR_BADDR_MASK (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT) +#define VTTBR_VMID_SHIFT (48LLU) +#define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT) + +/* Hyp System Trap Register */ +#define HSTR_EL2_TTEE (1 << 16) +#define HSTR_EL2_T(x) (1 << x) + +/* Hyp Coprocessor Trap Register */ +#define CPTR_EL2_TCPAC (1 << 31) +#define CPTR_EL2_TTA (1 << 20) +#define CPTR_EL2_TFP (1 << 10) + +/* Hyp Debug Configuration Register bits */ +#define MDCR_EL2_TDRA (1 << 11) +#define MDCR_EL2_TDOSA (1 << 10) +#define MDCR_EL2_TDA (1 << 9) +#define MDCR_EL2_TDE (1 << 8) +#define MDCR_EL2_HPME (1 << 7) +#define MDCR_EL2_TPM (1 << 6) +#define MDCR_EL2_TPMCR (1 << 5) +#define MDCR_EL2_HPMN_MASK (0x1F) + +/* Exception Syndrome Register (ESR) bits */ +#define ESR_EL2_EC_SHIFT (26) +#define ESR_EL2_EC (0x3fU << ESR_EL2_EC_SHIFT) +#define ESR_EL2_IL (1U << 25) +#define ESR_EL2_ISS (ESR_EL2_IL - 1) +#define ESR_EL2_ISV_SHIFT (24) +#define ESR_EL2_ISV (1U << ESR_EL2_ISV_SHIFT) +#define ESR_EL2_SAS_SHIFT (22) +#define ESR_EL2_SAS (3U << ESR_EL2_SAS_SHIFT) +#define ESR_EL2_SSE (1 << 21) +#define ESR_EL2_SRT_SHIFT (16) +#define ESR_EL2_SRT_MASK (0x1f << ESR_EL2_SRT_SHIFT) +#define ESR_EL2_SF (1 << 15) +#define ESR_EL2_AR (1 << 14) +#define ESR_EL2_EA (1 << 9) +#define ESR_EL2_CM (1 << 8) +#define ESR_EL2_S1PTW (1 << 7) +#define ESR_EL2_WNR (1 << 6) +#define ESR_EL2_FSC (0x3f) +#define ESR_EL2_FSC_TYPE (0x3c) + +#define ESR_EL2_CV_SHIFT (24) +#define ESR_EL2_CV (1U << ESR_EL2_CV_SHIFT) +#define ESR_EL2_COND_SHIFT (20) +#define ESR_EL2_COND (0xfU << ESR_EL2_COND_SHIFT) + + +#define FSC_FAULT (0x04) +#define FSC_PERM (0x0c) + +/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */ +#define HPFAR_MASK (~0xFUL) + +#define ESR_EL2_EC_UNKNOWN (0x00) +#define ESR_EL2_EC_WFI (0x01) +#define ESR_EL2_EC_CP15_32 (0x03) +#define ESR_EL2_EC_CP15_64 (0x04) +#define ESR_EL2_EC_CP14_MR (0x05) +#define ESR_EL2_EC_CP14_LS (0x06) +#define ESR_EL2_EC_FP_ASIMD (0x07) +#define ESR_EL2_EC_CP10_ID (0x08) +#define ESR_EL2_EC_CP14_64 (0x0C) +#define ESR_EL2_EC_ILL_ISS (0x0E) +#define ESR_EL2_EC_SVC32 (0x11) +#define ESR_EL2_EC_HVC32 (0x12) +#define ESR_EL2_EC_SMC32 (0x13) +#define ESR_EL2_EC_SVC64 (0x15) +#define ESR_EL2_EC_HVC64 (0x16) +#define ESR_EL2_EC_SMC64 (0x17) +#define ESR_EL2_EC_SYS64 (0x18) +#define ESR_EL2_EC_IABT (0x20) +#define ESR_EL2_EC_IABT_HYP (0x21) +#define ESR_EL2_EC_PC_ALIGN (0x22) +#define ESR_EL2_EC_DABT (0x24) +#define ESR_EL2_EC_DABT_HYP (0x25) +#define ESR_EL2_EC_SP_ALIGN (0x26) +#define ESR_EL2_EC_FP_EXC32 (0x28) +#define ESR_EL2_EC_FP_EXC64 (0x2C) +#define ESR_EL2_EC_SERRROR (0x2F) +#define ESR_EL2_EC_BREAKPT (0x30) +#define ESR_EL2_EC_BREAKPT_HYP (0x31) +#define ESR_EL2_EC_SOFTSTP (0x32) +#define ESR_EL2_EC_SOFTSTP_HYP (0x33) +#define ESR_EL2_EC_WATCHPT (0x34) +#define ESR_EL2_EC_WATCHPT_HYP (0x35) +#define ESR_EL2_EC_BKPT32 (0x38) +#define ESR_EL2_EC_VECTOR32 (0x3A) +#define ESR_EL2_EC_BRK64 (0x3C) + +#define ESR_EL2_EC_xABT_xFSR_EXTABT 0x10 + +#endif /* __ARM64_KVM_ARM_H__ */ diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h new file mode 100644 index 000000000000..c92de4163eba --- /dev/null +++ b/arch/arm64/include/asm/kvm_asm.h @@ -0,0 +1,104 @@ +/* + * Copyright (C) 2012,2013 - ARM Ltd + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __ARM_KVM_ASM_H__ +#define __ARM_KVM_ASM_H__ + +/* + * 0 is reserved as an invalid value. + * Order *must* be kept in sync with the hyp switch code. + */ +#define MPIDR_EL1 1 /* MultiProcessor Affinity Register */ +#define CSSELR_EL1 2 /* Cache Size Selection Register */ +#define SCTLR_EL1 3 /* System Control Register */ +#define ACTLR_EL1 4 /* Auxilliary Control Register */ +#define CPACR_EL1 5 /* Coprocessor Access Control */ +#define TTBR0_EL1 6 /* Translation Table Base Register 0 */ +#define TTBR1_EL1 7 /* Translation Table Base Register 1 */ +#define TCR_EL1 8 /* Translation Control Register */ +#define ESR_EL1 9 /* Exception Syndrome Register */ +#define AFSR0_EL1 10 /* Auxilary Fault Status Register 0 */ +#define AFSR1_EL1 11 /* Auxilary Fault Status Register 1 */ +#define FAR_EL1 12 /* Fault Address Register */ +#define MAIR_EL1 13 /* Memory Attribute Indirection Register */ +#define VBAR_EL1 14 /* Vector Base Address Register */ +#define CONTEXTIDR_EL1 15 /* Context ID Register */ +#define TPIDR_EL0 16 /* Thread ID, User R/W */ +#define TPIDRRO_EL0 17 /* Thread ID, User R/O */ +#define TPIDR_EL1 18 /* Thread ID, Privileged */ +#define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */ +#define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */ +/* 32bit specific registers. Keep them at the end of the range */ +#define DACR32_EL2 21 /* Domain Access Control Register */ +#define IFSR32_EL2 22 /* Instruction Fault Status Register */ +#define FPEXC32_EL2 23 /* Floating-Point Exception Control Register */ +#define DBGVCR32_EL2 24 /* Debug Vector Catch Register */ +#define TEECR32_EL1 25 /* ThumbEE Configuration Register */ +#define TEEHBR32_EL1 26 /* ThumbEE Handler Base Register */ +#define NR_SYS_REGS 27 + +/* 32bit mapping */ +#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */ +#define c0_CSSELR (CSSELR_EL1 * 2)/* Cache Size Selection Register */ +#define c1_SCTLR (SCTLR_EL1 * 2) /* System Control Register */ +#define c1_ACTLR (ACTLR_EL1 * 2) /* Auxiliary Control Register */ +#define c1_CPACR (CPACR_EL1 * 2) /* Coprocessor Access Control */ +#define c2_TTBR0 (TTBR0_EL1 * 2) /* Translation Table Base Register 0 */ +#define c2_TTBR0_high (c2_TTBR0 + 1) /* TTBR0 top 32 bits */ +#define c2_TTBR1 (TTBR1_EL1 * 2) /* Translation Table Base Register 1 */ +#define c2_TTBR1_high (c2_TTBR1 + 1) /* TTBR1 top 32 bits */ +#define c2_TTBCR (TCR_EL1 * 2) /* Translation Table Base Control R. */ +#define c3_DACR (DACR32_EL2 * 2)/* Domain Access Control Register */ +#define c5_DFSR (ESR_EL1 * 2) /* Data Fault Status Register */ +#define c5_IFSR (IFSR32_EL2 * 2)/* Instruction Fault Status Register */ +#define c5_ADFSR (AFSR0_EL1 * 2) /* Auxiliary Data Fault Status R */ +#define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */ +#define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */ +#define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */ +#define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */ +#define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */ +#define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */ +#define c13_CID (CONTEXTIDR_EL1 * 2) /* Context ID Register */ +#define c13_TID_URW (TPIDR_EL0 * 2) /* Thread ID, User R/W */ +#define c13_TID_URO (TPIDRRO_EL0 * 2)/* Thread ID, User R/O */ +#define c13_TID_PRIV (TPIDR_EL1 * 2) /* Thread ID, Privileged */ +#define c10_AMAIR (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */ +#define c14_CNTKCTL (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */ +#define NR_CP15_REGS (NR_SYS_REGS * 2) + +#define ARM_EXCEPTION_IRQ 0 +#define ARM_EXCEPTION_TRAP 1 + +#ifndef __ASSEMBLY__ +struct kvm; +struct kvm_vcpu; + +extern char __kvm_hyp_init[]; +extern char __kvm_hyp_init_end[]; + +extern char __kvm_hyp_vector[]; + +extern char __kvm_hyp_code_start[]; +extern char __kvm_hyp_code_end[]; + +extern void __kvm_flush_vm_context(void); +extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); + +extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); +#endif + +#endif /* __ARM_KVM_ASM_H__ */ diff --git a/arch/arm64/include/asm/kvm_coproc.h b/arch/arm64/include/asm/kvm_coproc.h new file mode 100644 index 000000000000..9a59301cd014 --- /dev/null +++ b/arch/arm64/include/asm/kvm_coproc.h @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2012,2013 - ARM Ltd + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * Derived from arch/arm/include/asm/kvm_coproc.h + * Copyright (C) 2012 Rusty Russell IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __ARM64_KVM_COPROC_H__ +#define __ARM64_KVM_COPROC_H__ + +#include <linux/kvm_host.h> + +void kvm_reset_sys_regs(struct kvm_vcpu *vcpu); + +struct kvm_sys_reg_table { + const struct sys_reg_desc *table; + size_t num; +}; + +struct kvm_sys_reg_target_table { + struct kvm_sys_reg_table table64; + struct kvm_sys_reg_table table32; +}; + +void kvm_register_target_sys_reg_table(unsigned int target, + struct kvm_sys_reg_target_table *table); + +int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run); +int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run); +int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run); +int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run); +int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run); + +#define kvm_coproc_table_init kvm_sys_reg_table_init +void kvm_sys_reg_table_init(void); + +struct kvm_one_reg; +int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices); +int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); +int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); +unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu); + +#endif /* __ARM64_KVM_COPROC_H__ */ diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h new file mode 100644 index 000000000000..eec073875218 --- /dev/null +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -0,0 +1,180 @@ +/* + * Copyright (C) 2012,2013 - ARM Ltd + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * Derived from arch/arm/include/kvm_emulate.h + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall <c.dall@virtualopensystems.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __ARM64_KVM_EMULATE_H__ +#define __ARM64_KVM_EMULATE_H__ + +#include <linux/kvm_host.h> +#include <asm/kvm_asm.h> +#include <asm/kvm_arm.h> +#include <asm/kvm_mmio.h> +#include <asm/ptrace.h> + +unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num); +unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu); + +bool kvm_condition_valid32(const struct kvm_vcpu *vcpu); +void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr); + +void kvm_inject_undefined(struct kvm_vcpu *vcpu); +void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); +void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); + +static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) +{ + return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc; +} + +static inline unsigned long *vcpu_elr_el1(const struct kvm_vcpu *vcpu) +{ + return (unsigned long *)&vcpu_gp_regs(vcpu)->elr_el1; +} + +static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu) +{ + return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate; +} + +static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu) +{ + return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT); +} + +static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu) +{ + if (vcpu_mode_is_32bit(vcpu)) + return kvm_condition_valid32(vcpu); + + return true; +} + +static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) +{ + if (vcpu_mode_is_32bit(vcpu)) + kvm_skip_instr32(vcpu, is_wide_instr); + else + *vcpu_pc(vcpu) += 4; +} + +static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) +{ + *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT; +} + +static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num) +{ + if (vcpu_mode_is_32bit(vcpu)) + return vcpu_reg32(vcpu, reg_num); + + return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num]; +} + +/* Get vcpu SPSR for current mode */ +static inline unsigned long *vcpu_spsr(const struct kvm_vcpu *vcpu) +{ + if (vcpu_mode_is_32bit(vcpu)) + return vcpu_spsr32(vcpu); + + return (unsigned long *)&vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1]; +} + +static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu) +{ + u32 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK; + + if (vcpu_mode_is_32bit(vcpu)) + return mode > COMPAT_PSR_MODE_USR; + + return mode != PSR_MODE_EL0t; +} + +static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu) +{ + return vcpu->arch.fault.esr_el2; +} + +static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu) +{ + return vcpu->arch.fault.far_el2; +} + +static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu) +{ + return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8; +} + +static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu) +{ + return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_ISV); +} + +static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) +{ + return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_WNR); +} + +static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu) +{ + return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SSE); +} + +static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu) +{ + return (kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SRT_MASK) >> ESR_EL2_SRT_SHIFT; +} + +static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) +{ + return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_EA); +} + +static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) +{ + return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_S1PTW); +} + +static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu) +{ + return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SAS) >> ESR_EL2_SAS_SHIFT); +} + +/* This one is not specific to Data Abort */ +static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu) +{ + return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_IL); +} + +static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_get_hsr(vcpu) >> ESR_EL2_EC_SHIFT; +} + +static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_trap_get_class(vcpu) == ESR_EL2_EC_IABT; +} + +static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE; +} + +#endif /* __ARM64_KVM_EMULATE_H__ */ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h new file mode 100644 index 000000000000..644d73956864 --- /dev/null +++ b/arch/arm64/include/asm/kvm_host.h @@ -0,0 +1,202 @@ +/* + * Copyright (C) 2012,2013 - ARM Ltd + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * Derived from arch/arm/include/asm/kvm_host.h: + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall <c.dall@virtualopensystems.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __ARM64_KVM_HOST_H__ +#define __ARM64_KVM_HOST_H__ + +#include <asm/kvm.h> +#include <asm/kvm_asm.h> +#include <asm/kvm_mmio.h> + +#define KVM_MAX_VCPUS 4 +#define KVM_USER_MEM_SLOTS 32 +#define KVM_PRIVATE_MEM_SLOTS 4 +#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 + +#include <kvm/arm_vgic.h> +#include <kvm/arm_arch_timer.h> + +#define KVM_VCPU_MAX_FEATURES 2 + +/* We don't currently support large pages. */ +#define KVM_HPAGE_GFN_SHIFT(x) 0 +#define KVM_NR_PAGE_SIZES 1 +#define KVM_PAGES_PER_HPAGE(x) (1UL<<31) + +struct kvm_vcpu; +int kvm_target_cpu(void); +int kvm_reset_vcpu(struct kvm_vcpu *vcpu); +int kvm_arch_dev_ioctl_check_extension(long ext); + +struct kvm_arch { + /* The VMID generation used for the virt. memory system */ + u64 vmid_gen; + u32 vmid; + + /* 1-level 2nd stage table and lock */ + spinlock_t pgd_lock; + pgd_t *pgd; + + /* VTTBR value associated with above pgd and vmid */ + u64 vttbr; + + /* Interrupt controller */ + struct vgic_dist vgic; + + /* Timer */ + struct arch_timer_kvm timer; +}; + +#define KVM_NR_MEM_OBJS 40 + +/* + * We don't want allocation failures within the mmu code, so we preallocate + * enough memory for a single page fault in a cache. + */ +struct kvm_mmu_memory_cache { + int nobjs; + void *objects[KVM_NR_MEM_OBJS]; +}; + +struct kvm_vcpu_fault_info { + u32 esr_el2; /* Hyp Syndrom Register */ + u64 far_el2; /* Hyp Fault Address Register */ + u64 hpfar_el2; /* Hyp IPA Fault Address Register */ +}; + +struct kvm_cpu_context { + struct kvm_regs gp_regs; + union { + u64 sys_regs[NR_SYS_REGS]; + u32 cp15[NR_CP15_REGS]; + }; +}; + +typedef struct kvm_cpu_context kvm_cpu_context_t; + +struct kvm_vcpu_arch { + struct kvm_cpu_context ctxt; + + /* HYP configuration */ + u64 hcr_el2; + + /* Exception Information */ + struct kvm_vcpu_fault_info fault; + + /* Pointer to host CPU context */ + kvm_cpu_context_t *host_cpu_context; + + /* VGIC state */ + struct vgic_cpu vgic_cpu; + struct arch_timer_cpu timer_cpu; + + /* + * Anything that is not used directly from assembly code goes + * here. + */ + /* dcache set/way operation pending */ + int last_pcpu; + cpumask_t require_dcache_flush; + + /* Don't run the guest */ + bool pause; + + /* IO related fields */ + struct kvm_decode mmio_decode; + + /* Interrupt related fields */ + u64 irq_lines; /* IRQ and FIQ levels */ + + /* Cache some mmu pages needed inside spinlock regions */ + struct kvm_mmu_memory_cache mmu_page_cache; + + /* Target CPU and feature flags */ + u32 target; + DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); + + /* Detect first run of a vcpu */ + bool has_run_once; +}; + +#define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs) +#define vcpu_sys_reg(v,r) ((v)->arch.ctxt.sys_regs[(r)]) +#define vcpu_cp15(v,r) ((v)->arch.ctxt.cp15[(r)]) + +struct kvm_vm_stat { + u32 remote_tlb_flush; +}; + +struct kvm_vcpu_stat { + u32 halt_wakeup; +}; + +struct kvm_vcpu_init; +int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, + const struct kvm_vcpu_init *init); +unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); +int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); +struct kvm_one_reg; +int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); +int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); + +#define KVM_ARCH_WANT_MMU_NOTIFIER +struct kvm; +int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); +int kvm_unmap_hva_range(struct kvm *kvm, + unsigned long start, unsigned long end); +void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); + +/* We do not have shadow page tables, hence the empty hooks */ +static inline int kvm_age_hva(struct kvm *kvm, unsigned long hva) +{ + return 0; +} + +static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) +{ + return 0; +} + +struct kvm_vcpu *kvm_arm_get_running_vcpu(void); +struct kvm_vcpu __percpu **kvm_get_running_vcpus(void); + +u64 kvm_call_hyp(void *hypfn, ...); + +int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, + int exception_index); + +int kvm_perf_init(void); +int kvm_perf_teardown(void); + +static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr, + phys_addr_t pgd_ptr, + unsigned long hyp_stack_ptr, + unsigned long vector_ptr) +{ + /* + * Call initialization code, and switch to the full blown + * HYP code. + */ + kvm_call_hyp((void *)boot_pgd_ptr, pgd_ptr, + hyp_stack_ptr, vector_ptr); +} + +#endif /* __ARM64_KVM_HOST_H__ */ diff --git a/arch/arm64/include/asm/kvm_mmio.h b/arch/arm64/include/asm/kvm_mmio.h new file mode 100644 index 000000000000..fc2f689c0694 --- /dev/null +++ b/arch/arm64/include/asm/kvm_mmio.h @@ -0,0 +1,59 @@ +/* + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall <c.dall@virtualopensystems.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __ARM64_KVM_MMIO_H__ +#define __ARM64_KVM_MMIO_H__ + +#include <linux/kvm_host.h> +#include <asm/kvm_asm.h> +#include <asm/kvm_arm.h> + +/* + * This is annoying. The mmio code requires this, even if we don't + * need any decoding. To be fixed. + */ +struct kvm_decode { + unsigned long rt; + bool sign_extend; +}; + +/* + * The in-kernel MMIO emulation code wants to use a copy of run->mmio, + * which is an anonymous type. Use our own type instead. + */ +struct kvm_exit_mmio { + phys_addr_t phys_addr; + u8 data[8]; + u32 len; + bool is_write; +}; + +static inline void kvm_prepare_mmio(struct kvm_run *run, + struct kvm_exit_mmio *mmio) +{ + run->mmio.phys_addr = mmio->phys_addr; + run->mmio.len = mmio->len; + run->mmio.is_write = mmio->is_write; + memcpy(run->mmio.data, mmio->data, mmio->len); + run->exit_reason = KVM_EXIT_MMIO; +} + +int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run); +int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, + phys_addr_t fault_ipa); + +#endif /* __ARM64_KVM_MMIO_H__ */ diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h new file mode 100644 index 000000000000..efe609c6a3c9 --- /dev/null +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -0,0 +1,135 @@ +/* + * Copyright (C) 2012,2013 - ARM Ltd + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __ARM64_KVM_MMU_H__ +#define __ARM64_KVM_MMU_H__ + +#include <asm/page.h> +#include <asm/memory.h> + +/* + * As we only have the TTBR0_EL2 register, we cannot express + * "negative" addresses. This makes it impossible to directly share + * mappings with the kernel. + * + * Instead, give the HYP mode its own VA region at a fixed offset from + * the kernel by just masking the top bits (which are all ones for a + * kernel address). + */ +#define HYP_PAGE_OFFSET_SHIFT VA_BITS +#define HYP_PAGE_OFFSET_MASK ((UL(1) << HYP_PAGE_OFFSET_SHIFT) - 1) +#define HYP_PAGE_OFFSET (PAGE_OFFSET & HYP_PAGE_OFFSET_MASK) + +/* + * Our virtual mapping for the idmap-ed MMU-enable code. Must be + * shared across all the page-tables. Conveniently, we use the last + * possible page, where no kernel mapping will ever exist. + */ +#define TRAMPOLINE_VA (HYP_PAGE_OFFSET_MASK & PAGE_MASK) + +#ifdef __ASSEMBLY__ + +/* + * Convert a kernel VA into a HYP VA. + * reg: VA to be converted. + */ +.macro kern_hyp_va reg + and \reg, \reg, #HYP_PAGE_OFFSET_MASK +.endm + +#else + +#include <asm/cachetype.h> +#include <asm/cacheflush.h> + +#define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET) + +/* + * Align KVM with the kernel's view of physical memory. Should be + * 40bit IPA, with PGD being 8kB aligned in the 4KB page configuration. + */ +#define KVM_PHYS_SHIFT PHYS_MASK_SHIFT +#define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT) +#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL) + +/* Make sure we get the right size, and thus the right alignment */ +#define PTRS_PER_S2_PGD (1 << (KVM_PHYS_SHIFT - PGDIR_SHIFT)) +#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t)) + +int create_hyp_mappings(void *from, void *to); +int create_hyp_io_mappings(void *from, void *to, phys_addr_t); +void free_boot_hyp_pgd(void); +void free_hyp_pgds(void); + +int kvm_alloc_stage2_pgd(struct kvm *kvm); +void kvm_free_stage2_pgd(struct kvm *kvm); +int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, + phys_addr_t pa, unsigned long size); + +int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run); + +void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); + +phys_addr_t kvm_mmu_get_httbr(void); +phys_addr_t kvm_mmu_get_boot_httbr(void); +phys_addr_t kvm_get_idmap_vector(void); +int kvm_mmu_init(void); +void kvm_clear_hyp_idmap(void); + +#define kvm_set_pte(ptep, pte) set_pte(ptep, pte) + +static inline bool kvm_is_write_fault(unsigned long esr) +{ + unsigned long esr_ec = esr >> ESR_EL2_EC_SHIFT; + + if (esr_ec == ESR_EL2_EC_IABT) + return false; + + if ((esr & ESR_EL2_ISV) && !(esr & ESR_EL2_WNR)) + return false; + + return true; +} + +static inline void kvm_clean_dcache_area(void *addr, size_t size) {} +static inline void kvm_clean_pgd(pgd_t *pgd) {} +static inline void kvm_clean_pmd_entry(pmd_t *pmd) {} +static inline void kvm_clean_pte(pte_t *pte) {} +static inline void kvm_clean_pte_entry(pte_t *pte) {} + +static inline void kvm_set_s2pte_writable(pte_t *pte) +{ + pte_val(*pte) |= PTE_S2_RDWR; +} + +struct kvm; + +static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn) +{ + if (!icache_is_aliasing()) { /* PIPT */ + unsigned long hva = gfn_to_hva(kvm, gfn); + flush_icache_range(hva, hva + PAGE_SIZE); + } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */ + /* any kind of VIPT cache */ + __flush_icache_all(); + } +} + +#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l)) + +#endif /* __ASSEMBLY__ */ +#endif /* __ARM64_KVM_MMU_H__ */ diff --git a/arch/arm64/include/asm/kvm_psci.h b/arch/arm64/include/asm/kvm_psci.h new file mode 100644 index 000000000000..e301a4816355 --- /dev/null +++ b/arch/arm64/include/asm/kvm_psci.h @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2012,2013 - ARM Ltd + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __ARM64_KVM_PSCI_H__ +#define __ARM64_KVM_PSCI_H__ + +bool kvm_psci_call(struct kvm_vcpu *vcpu); + +#endif /* __ARM64_KVM_PSCI_H__ */ diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 381f556b664e..20925bcf4e2a 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -90,6 +90,12 @@ #define MT_NORMAL_NC 3 #define MT_NORMAL 4 +/* + * Memory types for Stage-2 translation + */ +#define MT_S2_NORMAL 0xf +#define MT_S2_DEVICE_nGnRE 0x1 + #ifndef __ASSEMBLY__ extern phys_addr_t memstart_addr; diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index e2bc385adb6b..a9eee33dfa62 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -151,12 +151,6 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, { unsigned int cpu = smp_processor_id(); -#ifdef CONFIG_SMP - /* check for possible thread migration */ - if (!cpumask_empty(mm_cpumask(next)) && - !cpumask_test_cpu(cpu, mm_cpumask(next))) - __flush_icache_all(); -#endif if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) check_and_switch_context(next, tsk); } diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index 75fd13d289b9..e182a356c979 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -25,16 +25,27 @@ /* * Hardware page table definitions. * + * Level 1 descriptor (PUD). + */ + +#define PUD_TABLE_BIT (_AT(pgdval_t, 1) << 1) + +/* * Level 2 descriptor (PMD). */ #define PMD_TYPE_MASK (_AT(pmdval_t, 3) << 0) #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0) #define PMD_TYPE_TABLE (_AT(pmdval_t, 3) << 0) #define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0) +#define PMD_TABLE_BIT (_AT(pmdval_t, 1) << 1) /* * Section */ +#define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0) +#define PMD_SECT_PROT_NONE (_AT(pmdval_t, 1) << 2) +#define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */ +#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */ #define PMD_SECT_S (_AT(pmdval_t, 3) << 8) #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10) #define PMD_SECT_NG (_AT(pmdval_t, 1) << 11) @@ -53,6 +64,7 @@ #define PTE_TYPE_MASK (_AT(pteval_t, 3) << 0) #define PTE_TYPE_FAULT (_AT(pteval_t, 0) << 0) #define PTE_TYPE_PAGE (_AT(pteval_t, 3) << 0) +#define PTE_TABLE_BIT (_AT(pteval_t, 1) << 1) #define PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */ #define PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */ #define PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ @@ -68,6 +80,24 @@ #define PTE_ATTRINDX_MASK (_AT(pteval_t, 7) << 2) /* + * 2nd stage PTE definitions + */ +#define PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[2:1] */ +#define PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */ + +/* + * Memory Attribute override for Stage-2 (MemAttr[3:0]) + */ +#define PTE_S2_MEMATTR(t) (_AT(pteval_t, (t)) << 2) +#define PTE_S2_MEMATTR_MASK (_AT(pteval_t, 0xf) << 2) + +/* + * EL2/HYP PTE/PMD definitions + */ +#define PMD_HYP PMD_SECT_USER +#define PTE_HYP PTE_USER + +/* * 40-bit physical address supported. */ #define PHYS_MASK_SHIFT (40) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 3a768e96cf0e..f0bebc5e22cd 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -25,8 +25,8 @@ * Software defined PTE bits definition. */ #define PTE_VALID (_AT(pteval_t, 1) << 0) -#define PTE_PROT_NONE (_AT(pteval_t, 1) << 1) /* only when !PTE_VALID */ -#define PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !pte_present() */ +#define PTE_PROT_NONE (_AT(pteval_t, 1) << 2) /* only when !PTE_VALID */ +#define PTE_FILE (_AT(pteval_t, 1) << 3) /* only when !pte_present() */ #define PTE_DIRTY (_AT(pteval_t, 1) << 55) #define PTE_SPECIAL (_AT(pteval_t, 1) << 56) @@ -66,7 +66,7 @@ extern pgprot_t pgprot_default; #define _MOD_PROT(p, b) __pgprot_modify(p, 0, b) -#define PAGE_NONE __pgprot_modify(pgprot_default, PTE_TYPE_MASK, PTE_PROT_NONE) +#define PAGE_NONE __pgprot_modify(pgprot_default, PTE_TYPE_MASK, PTE_PROT_NONE | PTE_RDONLY | PTE_PXN | PTE_UXN) #define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) #define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN) #define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) @@ -76,7 +76,13 @@ extern pgprot_t pgprot_default; #define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY) #define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY) -#define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE) +#define PAGE_HYP _MOD_PROT(pgprot_default, PTE_HYP) +#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP) + +#define PAGE_S2 __pgprot_modify(pgprot_default, PTE_S2_MEMATTR_MASK, PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) +#define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDWR | PTE_UXN) + +#define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_RDONLY | PTE_PXN | PTE_UXN) #define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) #define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) #define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) @@ -119,7 +125,7 @@ extern struct page *empty_zero_page; #define pte_none(pte) (!pte_val(pte)) #define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0)) #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) -#define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr)) +#define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + pte_index(addr)) #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) #define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr)) @@ -173,12 +179,76 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, /* * Huge pte definitions. */ -#define pte_huge(pte) ((pte_val(pte) & PTE_TYPE_MASK) == PTE_TYPE_HUGEPAGE) -#define pte_mkhuge(pte) (__pte((pte_val(pte) & ~PTE_TYPE_MASK) | PTE_TYPE_HUGEPAGE)) +#define pte_huge(pte) (!(pte_val(pte) & PTE_TABLE_BIT)) +#define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT)) + +/* + * Hugetlb definitions. + */ +#define HUGE_MAX_HSTATE 2 +#define HPAGE_SHIFT PMD_SHIFT +#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) +#define HPAGE_MASK (~(HPAGE_SIZE - 1)) +#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) #define __HAVE_ARCH_PTE_SPECIAL /* + * Software PMD bits for THP + */ + +#define PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55) +#define PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 57) + +/* + * THP definitions. + */ +#define pmd_young(pmd) (pmd_val(pmd) & PMD_SECT_AF) + +#define __HAVE_ARCH_PMD_WRITE +#define pmd_write(pmd) (!(pmd_val(pmd) & PMD_SECT_RDONLY)) + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT)) +#define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING) +#endif + +#define PMD_BIT_FUNC(fn,op) \ +static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; } + +PMD_BIT_FUNC(wrprotect, |= PMD_SECT_RDONLY); +PMD_BIT_FUNC(mkold, &= ~PMD_SECT_AF); +PMD_BIT_FUNC(mksplitting, |= PMD_SECT_SPLITTING); +PMD_BIT_FUNC(mkwrite, &= ~PMD_SECT_RDONLY); +PMD_BIT_FUNC(mkdirty, |= PMD_SECT_DIRTY); +PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF); +PMD_BIT_FUNC(mknotpresent, &= ~PMD_TYPE_MASK); + +#define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT)) + +#define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT) +#define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) +#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) + +#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK)) + +static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) +{ + const pmdval_t mask = PMD_SECT_USER | PMD_SECT_PXN | PMD_SECT_UXN | + PMD_SECT_RDONLY | PMD_SECT_PROT_NONE | + PMD_SECT_VALID; + pmd_val(pmd) = (pmd_val(pmd) & ~mask) | (pgprot_val(newprot) & mask); + return pmd; +} + +#define set_pmd_at(mm, addr, pmdp, pmd) set_pmd(pmdp, pmd) + +static inline int has_transparent_hugepage(void) +{ + return 1; +} + +/* * Mark the prot value as uncacheable and unbufferable. */ #define pgprot_noncached(prot) \ @@ -197,6 +267,12 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, #define pmd_bad(pmd) (!(pmd_val(pmd) & 2)) +#define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ + PMD_TYPE_TABLE) +#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ + PMD_TYPE_SECT) + + static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) { *pmdp = pmd; @@ -263,7 +339,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) #endif /* Find an entry in the third-level page table.. */ -#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { @@ -281,12 +357,12 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; /* * Encode and decode a swap entry: - * bits 0-1: present (must be zero) - * bit 2: PTE_FILE - * bits 3-8: swap type + * bits 0, 2: present (must both be zero) + * bit 3: PTE_FILE + * bits 4-8: swap type * bits 9-63: swap offset */ -#define __SWP_TYPE_SHIFT 3 +#define __SWP_TYPE_SHIFT 4 #define __SWP_TYPE_BITS 6 #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) @@ -306,15 +382,15 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; /* * Encode and decode a file entry: - * bits 0-1: present (must be zero) - * bit 2: PTE_FILE - * bits 3-63: file offset / PAGE_SIZE + * bits 0, 2: present (must both be zero) + * bit 3: PTE_FILE + * bits 4-63: file offset / PAGE_SIZE */ #define pte_file(pte) (pte_val(pte) & PTE_FILE) -#define pte_to_pgoff(x) (pte_val(x) >> 3) -#define pgoff_to_pte(x) __pte(((x) << 3) | PTE_FILE) +#define pte_to_pgoff(x) (pte_val(x) >> 4) +#define pgoff_to_pte(x) __pte(((x) << 4) | PTE_FILE) -#define PTE_FILE_MAX_BITS 61 +#define PTE_FILE_MAX_BITS 60 extern int kern_addr_valid(unsigned long addr); diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 41a71ee4c3df..0dacbbf9458b 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -171,7 +171,5 @@ extern unsigned long profile_pc(struct pt_regs *regs); #define profile_pc(regs) instruction_pointer(regs) #endif -extern int aarch32_break_trap(struct pt_regs *regs); - #endif /* __ASSEMBLY__ */ #endif diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index 7065e920149d..0defa0728a9b 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h @@ -59,9 +59,10 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) unsigned int tmp; asm volatile( - " ldaxr %w0, %1\n" + "2: ldaxr %w0, %1\n" " cbnz %w0, 1f\n" " stxr %w0, %w2, %1\n" + " cbnz %w0, 2b\n" "1:\n" : "=&r" (tmp), "+Q" (lock->lock) : "r" (1) diff --git a/arch/arm64/include/asm/sync_bitops.h b/arch/arm64/include/asm/sync_bitops.h new file mode 100644 index 000000000000..8da0bf4f7659 --- /dev/null +++ b/arch/arm64/include/asm/sync_bitops.h @@ -0,0 +1,26 @@ +#ifndef __ASM_SYNC_BITOPS_H__ +#define __ASM_SYNC_BITOPS_H__ + +#include <asm/bitops.h> +#include <asm/cmpxchg.h> + +/* sync_bitops functions are equivalent to the SMP implementation of the + * original functions, independently from CONFIG_SMP being defined. + * + * We need them because _set_bit etc are not SMP safe if !CONFIG_SMP. But + * under Xen you might be communicating with a completely external entity + * who might be on another CPU (e.g. two uniprocessor guests communicating + * via event channels and grant tables). So we need a variant of the bit + * ops which are SMP safe even on a UP kernel. + */ + +#define sync_set_bit(nr, p) set_bit(nr, p) +#define sync_clear_bit(nr, p) clear_bit(nr, p) +#define sync_change_bit(nr, p) change_bit(nr, p) +#define sync_test_and_set_bit(nr, p) test_and_set_bit(nr, p) +#define sync_test_and_clear_bit(nr, p) test_and_clear_bit(nr, p) +#define sync_test_and_change_bit(nr, p) test_and_change_bit(nr, p) +#define sync_test_bit(nr, addr) test_bit(nr, addr) +#define sync_cmpxchg cmpxchg + +#endif diff --git a/arch/arm64/include/asm/timex.h b/arch/arm64/include/asm/timex.h index b24a31a7e2c9..81a076eb37fa 100644 --- a/arch/arm64/include/asm/timex.h +++ b/arch/arm64/include/asm/timex.h @@ -16,14 +16,14 @@ #ifndef __ASM_TIMEX_H #define __ASM_TIMEX_H +#include <asm/arch_timer.h> + /* * Use the current timer as a cycle counter since this is what we use for * the delay loop. */ -#define get_cycles() ({ cycles_t c; read_current_timer(&c); c; }) +#define get_cycles() arch_counter_get_cntvct() #include <asm-generic/timex.h> -#define ARCH_HAS_READ_CURRENT_TIMER - #endif diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index 654f0968030b..46b3beb4b773 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h @@ -187,4 +187,10 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, #define tlb_migrate_finish(mm) do { } while (0) +static inline void +tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr) +{ + tlb_add_flush(tlb, addr); +} + #endif diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index 122d6320f745..8b482035cfc2 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -117,6 +117,8 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, dsb(); } +#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) + #endif #endif diff --git a/arch/arm64/include/asm/xen/events.h b/arch/arm64/include/asm/xen/events.h new file mode 100644 index 000000000000..86553213c132 --- /dev/null +++ b/arch/arm64/include/asm/xen/events.h @@ -0,0 +1,21 @@ +#ifndef _ASM_ARM64_XEN_EVENTS_H +#define _ASM_ARM64_XEN_EVENTS_H + +#include <asm/ptrace.h> +#include <asm/atomic.h> + +enum ipi_vector { + XEN_PLACEHOLDER_VECTOR, + + /* Xen IPIs go here */ + XEN_NR_IPIS, +}; + +static inline int xen_irqs_disabled(struct pt_regs *regs) +{ + return raw_irqs_disabled_flags((unsigned long) regs->pstate); +} + +#define xchg_xen_ulong(ptr, val) xchg((ptr), (val)) + +#endif /* _ASM_ARM64_XEN_EVENTS_H */ diff --git a/arch/arm64/include/asm/xen/hypercall.h b/arch/arm64/include/asm/xen/hypercall.h new file mode 100644 index 000000000000..74b0c423ff5b --- /dev/null +++ b/arch/arm64/include/asm/xen/hypercall.h @@ -0,0 +1 @@ +#include <../../arm/include/asm/xen/hypercall.h> diff --git a/arch/arm64/include/asm/xen/hypervisor.h b/arch/arm64/include/asm/xen/hypervisor.h new file mode 100644 index 000000000000..f263da8e8769 --- /dev/null +++ b/arch/arm64/include/asm/xen/hypervisor.h @@ -0,0 +1 @@ +#include <../../arm/include/asm/xen/hypervisor.h> diff --git a/arch/arm64/include/asm/xen/interface.h b/arch/arm64/include/asm/xen/interface.h new file mode 100644 index 000000000000..44457aebeed4 --- /dev/null +++ b/arch/arm64/include/asm/xen/interface.h @@ -0,0 +1 @@ +#include <../../arm/include/asm/xen/interface.h> diff --git a/arch/arm64/include/asm/xen/page.h b/arch/arm64/include/asm/xen/page.h new file mode 100644 index 000000000000..bed87ec36780 --- /dev/null +++ b/arch/arm64/include/asm/xen/page.h @@ -0,0 +1 @@ +#include <../../arm/include/asm/xen/page.h> diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h new file mode 100644 index 000000000000..5031f4263937 --- /dev/null +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -0,0 +1,168 @@ +/* + * Copyright (C) 2012,2013 - ARM Ltd + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * Derived from arch/arm/include/uapi/asm/kvm.h: + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall <c.dall@virtualopensystems.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __ARM_KVM_H__ +#define __ARM_KVM_H__ + +#define KVM_SPSR_EL1 0 +#define KVM_SPSR_SVC KVM_SPSR_EL1 +#define KVM_SPSR_ABT 1 +#define KVM_SPSR_UND 2 +#define KVM_SPSR_IRQ 3 +#define KVM_SPSR_FIQ 4 +#define KVM_NR_SPSR 5 + +#ifndef __ASSEMBLY__ +#include <asm/types.h> +#include <asm/ptrace.h> + +#define __KVM_HAVE_GUEST_DEBUG +#define __KVM_HAVE_IRQ_LINE + +#define KVM_REG_SIZE(id) \ + (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) + +struct kvm_regs { + struct user_pt_regs regs; /* sp = sp_el0 */ + + __u64 sp_el1; + __u64 elr_el1; + + __u64 spsr[KVM_NR_SPSR]; + + struct user_fpsimd_state fp_regs; +}; + +/* Supported Processor Types */ +#define KVM_ARM_TARGET_AEM_V8 0 +#define KVM_ARM_TARGET_FOUNDATION_V8 1 +#define KVM_ARM_TARGET_CORTEX_A57 2 + +#define KVM_ARM_NUM_TARGETS 3 + +/* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */ +#define KVM_ARM_DEVICE_TYPE_SHIFT 0 +#define KVM_ARM_DEVICE_TYPE_MASK (0xffff << KVM_ARM_DEVICE_TYPE_SHIFT) +#define KVM_ARM_DEVICE_ID_SHIFT 16 +#define KVM_ARM_DEVICE_ID_MASK (0xffff << KVM_ARM_DEVICE_ID_SHIFT) + +/* Supported device IDs */ +#define KVM_ARM_DEVICE_VGIC_V2 0 + +/* Supported VGIC address types */ +#define KVM_VGIC_V2_ADDR_TYPE_DIST 0 +#define KVM_VGIC_V2_ADDR_TYPE_CPU 1 + +#define KVM_VGIC_V2_DIST_SIZE 0x1000 +#define KVM_VGIC_V2_CPU_SIZE 0x2000 + +#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */ +#define KVM_ARM_VCPU_EL1_32BIT 1 /* CPU running a 32bit VM */ + +struct kvm_vcpu_init { + __u32 target; + __u32 features[7]; +}; + +struct kvm_sregs { +}; + +struct kvm_fpu { +}; + +struct kvm_guest_debug_arch { +}; + +struct kvm_debug_exit_arch { +}; + +struct kvm_sync_regs { +}; + +struct kvm_arch_memory_slot { +}; + +/* If you need to interpret the index values, here is the key: */ +#define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000 +#define KVM_REG_ARM_COPROC_SHIFT 16 + +/* Normal registers are mapped as coprocessor 16. */ +#define KVM_REG_ARM_CORE (0x0010 << KVM_REG_ARM_COPROC_SHIFT) +#define KVM_REG_ARM_CORE_REG(name) (offsetof(struct kvm_regs, name) / sizeof(__u32)) + +/* Some registers need more space to represent values. */ +#define KVM_REG_ARM_DEMUX (0x0011 << KVM_REG_ARM_COPROC_SHIFT) +#define KVM_REG_ARM_DEMUX_ID_MASK 0x000000000000FF00 +#define KVM_REG_ARM_DEMUX_ID_SHIFT 8 +#define KVM_REG_ARM_DEMUX_ID_CCSIDR (0x00 << KVM_REG_ARM_DEMUX_ID_SHIFT) +#define KVM_REG_ARM_DEMUX_VAL_MASK 0x00000000000000FF +#define KVM_REG_ARM_DEMUX_VAL_SHIFT 0 + +/* AArch64 system registers */ +#define KVM_REG_ARM64_SYSREG (0x0013 << KVM_REG_ARM_COPROC_SHIFT) +#define KVM_REG_ARM64_SYSREG_OP0_MASK 0x000000000000c000 +#define KVM_REG_ARM64_SYSREG_OP0_SHIFT 14 +#define KVM_REG_ARM64_SYSREG_OP1_MASK 0x0000000000003800 +#define KVM_REG_ARM64_SYSREG_OP1_SHIFT 11 +#define KVM_REG_ARM64_SYSREG_CRN_MASK 0x0000000000000780 +#define KVM_REG_ARM64_SYSREG_CRN_SHIFT 7 +#define KVM_REG_ARM64_SYSREG_CRM_MASK 0x0000000000000078 +#define KVM_REG_ARM64_SYSREG_CRM_SHIFT 3 +#define KVM_REG_ARM64_SYSREG_OP2_MASK 0x0000000000000007 +#define KVM_REG_ARM64_SYSREG_OP2_SHIFT 0 + +/* KVM_IRQ_LINE irq field index values */ +#define KVM_ARM_IRQ_TYPE_SHIFT 24 +#define KVM_ARM_IRQ_TYPE_MASK 0xff +#define KVM_ARM_IRQ_VCPU_SHIFT 16 +#define KVM_ARM_IRQ_VCPU_MASK 0xff +#define KVM_ARM_IRQ_NUM_SHIFT 0 +#define KVM_ARM_IRQ_NUM_MASK 0xffff + +/* irq_type field */ +#define KVM_ARM_IRQ_TYPE_CPU 0 +#define KVM_ARM_IRQ_TYPE_SPI 1 +#define KVM_ARM_IRQ_TYPE_PPI 2 + +/* out-of-kernel GIC cpu interrupt injection irq_number field */ +#define KVM_ARM_IRQ_CPU_IRQ 0 +#define KVM_ARM_IRQ_CPU_FIQ 1 + +/* Highest supported SPI, from VGIC_NR_IRQS */ +#define KVM_ARM_IRQ_GIC_MAX 127 + +/* PSCI interface */ +#define KVM_PSCI_FN_BASE 0x95c1ba5e +#define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n)) + +#define KVM_PSCI_FN_CPU_SUSPEND KVM_PSCI_FN(0) +#define KVM_PSCI_FN_CPU_OFF KVM_PSCI_FN(1) +#define KVM_PSCI_FN_CPU_ON KVM_PSCI_FN(2) +#define KVM_PSCI_FN_MIGRATE KVM_PSCI_FN(3) + +#define KVM_PSCI_RET_SUCCESS 0 +#define KVM_PSCI_RET_NI ((unsigned long)-1) +#define KVM_PSCI_RET_INVAL ((unsigned long)-2) +#define KVM_PSCI_RET_DENIED ((unsigned long)-3) + +#endif + +#endif /* __ARM_KVM_H__ */ diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index a2a4d810bea3..666e231d410b 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -21,6 +21,7 @@ #include <linux/sched.h> #include <linux/mm.h> #include <linux/dma-mapping.h> +#include <linux/kvm_host.h> #include <asm/thread_info.h> #include <asm/memory.h> #include <asm/cputable.h> @@ -104,5 +105,38 @@ int main(void) BLANK(); DEFINE(TZ_MINWEST, offsetof(struct timezone, tz_minuteswest)); DEFINE(TZ_DSTTIME, offsetof(struct timezone, tz_dsttime)); + BLANK(); +#ifdef CONFIG_KVM_ARM_HOST + DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt)); + DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs)); + DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs)); + DEFINE(CPU_FP_REGS, offsetof(struct kvm_regs, fp_regs)); + DEFINE(CPU_SP_EL1, offsetof(struct kvm_regs, sp_el1)); + DEFINE(CPU_ELR_EL1, offsetof(struct kvm_regs, elr_el1)); + DEFINE(CPU_SPSR, offsetof(struct kvm_regs, spsr)); + DEFINE(CPU_SYSREGS, offsetof(struct kvm_cpu_context, sys_regs)); + DEFINE(VCPU_ESR_EL2, offsetof(struct kvm_vcpu, arch.fault.esr_el2)); + DEFINE(VCPU_FAR_EL2, offsetof(struct kvm_vcpu, arch.fault.far_el2)); + DEFINE(VCPU_HPFAR_EL2, offsetof(struct kvm_vcpu, arch.fault.hpfar_el2)); + DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2)); + DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines)); + DEFINE(VCPU_HOST_CONTEXT, offsetof(struct kvm_vcpu, arch.host_cpu_context)); + DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl)); + DEFINE(VCPU_TIMER_CNTV_CVAL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval)); + DEFINE(KVM_TIMER_CNTVOFF, offsetof(struct kvm, arch.timer.cntvoff)); + DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled)); + DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); + DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu)); + DEFINE(VGIC_CPU_HCR, offsetof(struct vgic_cpu, vgic_hcr)); + DEFINE(VGIC_CPU_VMCR, offsetof(struct vgic_cpu, vgic_vmcr)); + DEFINE(VGIC_CPU_MISR, offsetof(struct vgic_cpu, vgic_misr)); + DEFINE(VGIC_CPU_EISR, offsetof(struct vgic_cpu, vgic_eisr)); + DEFINE(VGIC_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_elrsr)); + DEFINE(VGIC_CPU_APR, offsetof(struct vgic_cpu, vgic_apr)); + DEFINE(VGIC_CPU_LR, offsetof(struct vgic_cpu, vgic_lr)); + DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr)); + DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr)); + DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base)); +#endif return 0; } diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index f4726dc054b3..08018e3df580 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c @@ -24,6 +24,7 @@ #include <linux/init.h> #include <linux/ptrace.h> #include <linux/stat.h> +#include <linux/uaccess.h> #include <asm/debug-monitors.h> #include <asm/local.h> @@ -226,13 +227,74 @@ static int single_step_handler(unsigned long addr, unsigned int esr, return 0; } -static int __init single_step_init(void) +static int brk_handler(unsigned long addr, unsigned int esr, + struct pt_regs *regs) +{ + siginfo_t info; + + if (!user_mode(regs)) + return -EFAULT; + + info = (siginfo_t) { + .si_signo = SIGTRAP, + .si_errno = 0, + .si_code = TRAP_BRKPT, + .si_addr = (void __user *)instruction_pointer(regs), + }; + + force_sig_info(SIGTRAP, &info, current); + return 0; +} + +int aarch32_break_handler(struct pt_regs *regs) +{ + siginfo_t info; + unsigned int instr; + bool bp = false; + void __user *pc = (void __user *)instruction_pointer(regs); + + if (!compat_user_mode(regs)) + return -EFAULT; + + if (compat_thumb_mode(regs)) { + /* get 16-bit Thumb instruction */ + get_user(instr, (u16 __user *)pc); + if (instr == AARCH32_BREAK_THUMB2_LO) { + /* get second half of 32-bit Thumb-2 instruction */ + get_user(instr, (u16 __user *)(pc + 2)); + bp = instr == AARCH32_BREAK_THUMB2_HI; + } else { + bp = instr == AARCH32_BREAK_THUMB; + } + } else { + /* 32-bit ARM instruction */ + get_user(instr, (u32 __user *)pc); + bp = (instr & ~0xf0000000) == AARCH32_BREAK_ARM; + } + + if (!bp) + return -EFAULT; + + info = (siginfo_t) { + .si_signo = SIGTRAP, + .si_errno = 0, + .si_code = TRAP_BRKPT, + .si_addr = pc, + }; + + force_sig_info(SIGTRAP, &info, current); + return 0; +} + +static int __init debug_traps_init(void) { hook_debug_fault_code(DBG_ESR_EVT_HWSS, single_step_handler, SIGTRAP, TRAP_HWBKPT, "single-step handler"); + hook_debug_fault_code(DBG_ESR_EVT_BRK, brk_handler, SIGTRAP, + TRAP_BRKPT, "ptrace BRK handler"); return 0; } -arch_initcall(single_step_init); +arch_initcall(debug_traps_init); /* Re-enable single step for syscall restarting. */ void user_rewind_single_step(struct task_struct *task) diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 6e1e77f1831c..fecdbf7de82e 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -53,28 +53,6 @@ void ptrace_disable(struct task_struct *child) { } -/* - * Handle hitting a breakpoint. - */ -static int ptrace_break(struct pt_regs *regs) -{ - siginfo_t info = { - .si_signo = SIGTRAP, - .si_errno = 0, - .si_code = TRAP_BRKPT, - .si_addr = (void __user *)instruction_pointer(regs), - }; - - force_sig_info(SIGTRAP, &info, current); - return 0; -} - -static int arm64_break_trap(unsigned long addr, unsigned int esr, - struct pt_regs *regs) -{ - return ptrace_break(regs); -} - #ifdef CONFIG_HAVE_HW_BREAKPOINT /* * Handle hitting a HW-breakpoint. @@ -817,33 +795,6 @@ static const struct user_regset_view user_aarch32_view = { .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets) }; -int aarch32_break_trap(struct pt_regs *regs) -{ - unsigned int instr; - bool bp = false; - void __user *pc = (void __user *)instruction_pointer(regs); - - if (compat_thumb_mode(regs)) { - /* get 16-bit Thumb instruction */ - get_user(instr, (u16 __user *)pc); - if (instr == AARCH32_BREAK_THUMB2_LO) { - /* get second half of 32-bit Thumb-2 instruction */ - get_user(instr, (u16 __user *)(pc + 2)); - bp = instr == AARCH32_BREAK_THUMB2_HI; - } else { - bp = instr == AARCH32_BREAK_THUMB; - } - } else { - /* 32-bit ARM instruction */ - get_user(instr, (u32 __user *)pc); - bp = (instr & ~0xf0000000) == AARCH32_BREAK_ARM; - } - - if (bp) - return ptrace_break(regs); - return 1; -} - static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off, compat_ulong_t __user *ret) { @@ -1111,16 +1062,6 @@ long arch_ptrace(struct task_struct *child, long request, return ptrace_request(child, request, addr, data); } - -static int __init ptrace_break_init(void) -{ - hook_debug_fault_code(DBG_ESR_EVT_BRK, arm64_break_trap, SIGTRAP, - TRAP_BRKPT, "ptrace BRK handler"); - return 0; -} -core_initcall(ptrace_break_init); - - asmlinkage int syscall_trace(int dir, struct pt_regs *regs) { unsigned long saved_reg; diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c index a551f88ae2c1..03dc3718eb13 100644 --- a/arch/arm64/kernel/time.c +++ b/arch/arm64/kernel/time.c @@ -68,12 +68,6 @@ unsigned long long notrace sched_clock(void) return arch_timer_read_counter() * sched_clock_mult; } -int read_current_timer(unsigned long *timer_value) -{ - *timer_value = arch_timer_read_counter(); - return 0; -} - void __init time_init(void) { u32 arch_timer_rate; diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index f30852d28590..7ffadddb645d 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -32,6 +32,7 @@ #include <linux/syscalls.h> #include <asm/atomic.h> +#include <asm/debug-monitors.h> #include <asm/traps.h> #include <asm/stacktrace.h> #include <asm/exception.h> @@ -261,11 +262,9 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs) siginfo_t info; void __user *pc = (void __user *)instruction_pointer(regs); -#ifdef CONFIG_COMPAT /* check for AArch32 breakpoint instructions */ - if (compat_user_mode(regs) && aarch32_break_trap(regs) == 0) + if (!aarch32_break_handler(regs)) return; -#endif if (show_unhandled_signals && unhandled_signal(current, SIGILL) && printk_ratelimit()) { diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 3fae2be8b016..f5e55747242f 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -17,6 +17,19 @@ ENTRY(stext) jiffies = jiffies_64; +#define HYPERVISOR_TEXT \ + /* \ + * Force the alignment to be compatible with \ + * the vectors requirements \ + */ \ + . = ALIGN(2048); \ + VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \ + *(.hyp.idmap.text) \ + VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; \ + VMLINUX_SYMBOL(__hyp_text_start) = .; \ + *(.hyp.text) \ + VMLINUX_SYMBOL(__hyp_text_end) = .; + SECTIONS { /* @@ -49,6 +62,7 @@ SECTIONS TEXT_TEXT SCHED_TEXT LOCK_TEXT + HYPERVISOR_TEXT *(.fixup) *(.gnu.warning) . = ALIGN(16); @@ -56,7 +70,7 @@ SECTIONS } RO_DATA(PAGE_SIZE) - + EXCEPTION_TABLE(8) _etext = .; /* End of text and rodata section */ . = ALIGN(PAGE_SIZE); @@ -99,14 +113,6 @@ SECTIONS READ_MOSTLY_DATA(64) /* - * The exception fixup table (might need resorting at runtime) - */ - . = ALIGN(32); - __start___ex_table = .; - *(__ex_table) - __stop___ex_table = .; - - /* * and the usual data section */ DATA_DATA @@ -124,3 +130,9 @@ SECTIONS STABS_DEBUG .comment 0 : { *(.comment) } } + +/* + * The HYP init code can't be more than a page long. + */ +ASSERT(((__hyp_idmap_text_start + PAGE_SIZE) > __hyp_idmap_text_end), + "HYP init code too big") diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig new file mode 100644 index 000000000000..21e90820bd23 --- /dev/null +++ b/arch/arm64/kvm/Kconfig @@ -0,0 +1,51 @@ +# +# KVM configuration +# + +source "virt/kvm/Kconfig" + +menuconfig VIRTUALIZATION + bool "Virtualization" + ---help--- + Say Y here to get to see options for using your Linux host to run + other operating systems inside virtual machines (guests). + This option alone does not add any kernel code. + + If you say N, all options in this submenu will be skipped and + disabled. + +if VIRTUALIZATION + +config KVM + bool "Kernel-based Virtual Machine (KVM) support" + select MMU_NOTIFIER + select PREEMPT_NOTIFIERS + select ANON_INODES + select KVM_MMIO + select KVM_ARM_HOST + select KVM_ARM_VGIC + select KVM_ARM_TIMER + ---help--- + Support hosting virtualized guest machines. + + If unsure, say N. + +config KVM_ARM_HOST + bool + ---help--- + Provides host support for ARM processors. + +config KVM_ARM_VGIC + bool + depends on KVM_ARM_HOST && OF + select HAVE_KVM_IRQCHIP + ---help--- + Adds support for a hardware assisted, in-kernel GIC emulation. + +config KVM_ARM_TIMER + bool + depends on KVM_ARM_VGIC + ---help--- + Adds support for the Architected Timers in virtual machines. + +endif # VIRTUALIZATION diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile new file mode 100644 index 000000000000..72a9fd583ad3 --- /dev/null +++ b/arch/arm64/kvm/Makefile @@ -0,0 +1,23 @@ +# +# Makefile for Kernel-based Virtual Machine module +# + +ccflags-y += -Ivirt/kvm -Iarch/arm64/kvm +CFLAGS_arm.o := -I. +CFLAGS_mmu.o := -I. + +KVM=../../../virt/kvm +ARM=../../../arch/arm/kvm + +obj-$(CONFIG_KVM_ARM_HOST) += kvm.o + +kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o +kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/arm.o $(ARM)/mmu.o $(ARM)/mmio.o +kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/psci.o $(ARM)/perf.o + +kvm-$(CONFIG_KVM_ARM_HOST) += emulate.o inject_fault.o regmap.o +kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o +kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o + +kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o +kvm-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o diff --git a/arch/arm64/kvm/emulate.c b/arch/arm64/kvm/emulate.c new file mode 100644 index 000000000000..124418d17049 --- /dev/null +++ b/arch/arm64/kvm/emulate.c @@ -0,0 +1,158 @@ +/* + * (not much of an) Emulation layer for 32bit guests. + * + * Copyright (C) 2012,2013 - ARM Ltd + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * based on arch/arm/kvm/emulate.c + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall <c.dall@virtualopensystems.com> + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/kvm_host.h> +#include <asm/kvm_emulate.h> + +/* + * stolen from arch/arm/kernel/opcodes.c + * + * condition code lookup table + * index into the table is test code: EQ, NE, ... LT, GT, AL, NV + * + * bit position in short is condition code: NZCV + */ +static const unsigned short cc_map[16] = { + 0xF0F0, /* EQ == Z set */ + 0x0F0F, /* NE */ + 0xCCCC, /* CS == C set */ + 0x3333, /* CC */ + 0xFF00, /* MI == N set */ + 0x00FF, /* PL */ + 0xAAAA, /* VS == V set */ + 0x5555, /* VC */ + 0x0C0C, /* HI == C set && Z clear */ + 0xF3F3, /* LS == C clear || Z set */ + 0xAA55, /* GE == (N==V) */ + 0x55AA, /* LT == (N!=V) */ + 0x0A05, /* GT == (!Z && (N==V)) */ + 0xF5FA, /* LE == (Z || (N!=V)) */ + 0xFFFF, /* AL always */ + 0 /* NV */ +}; + +static int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) +{ + u32 esr = kvm_vcpu_get_hsr(vcpu); + + if (esr & ESR_EL2_CV) + return (esr & ESR_EL2_COND) >> ESR_EL2_COND_SHIFT; + + return -1; +} + +/* + * Check if a trapped instruction should have been executed or not. + */ +bool kvm_condition_valid32(const struct kvm_vcpu *vcpu) +{ + unsigned long cpsr; + u32 cpsr_cond; + int cond; + + /* Top two bits non-zero? Unconditional. */ + if (kvm_vcpu_get_hsr(vcpu) >> 30) + return true; + + /* Is condition field valid? */ + cond = kvm_vcpu_get_condition(vcpu); + if (cond == 0xE) + return true; + + cpsr = *vcpu_cpsr(vcpu); + + if (cond < 0) { + /* This can happen in Thumb mode: examine IT state. */ + unsigned long it; + + it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3); + + /* it == 0 => unconditional. */ + if (it == 0) + return true; + + /* The cond for this insn works out as the top 4 bits. */ + cond = (it >> 4); + } + + cpsr_cond = cpsr >> 28; + + if (!((cc_map[cond] >> cpsr_cond) & 1)) + return false; + + return true; +} + +/** + * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block + * @vcpu: The VCPU pointer + * + * When exceptions occur while instructions are executed in Thumb IF-THEN + * blocks, the ITSTATE field of the CPSR is not advanced (updated), so we have + * to do this little bit of work manually. The fields map like this: + * + * IT[7:0] -> CPSR[26:25],CPSR[15:10] + */ +static void kvm_adjust_itstate(struct kvm_vcpu *vcpu) +{ + unsigned long itbits, cond; + unsigned long cpsr = *vcpu_cpsr(vcpu); + bool is_arm = !(cpsr & COMPAT_PSR_T_BIT); + + BUG_ON(is_arm && (cpsr & COMPAT_PSR_IT_MASK)); + + if (!(cpsr & COMPAT_PSR_IT_MASK)) + return; + + cond = (cpsr & 0xe000) >> 13; + itbits = (cpsr & 0x1c00) >> (10 - 2); + itbits |= (cpsr & (0x3 << 25)) >> 25; + + /* Perform ITAdvance (see page A2-52 in ARM DDI 0406C) */ + if ((itbits & 0x7) == 0) + itbits = cond = 0; + else + itbits = (itbits << 1) & 0x1f; + + cpsr &= ~COMPAT_PSR_IT_MASK; + cpsr |= cond << 13; + cpsr |= (itbits & 0x1c) << (10 - 2); + cpsr |= (itbits & 0x3) << 25; + *vcpu_cpsr(vcpu) = cpsr; +} + +/** + * kvm_skip_instr - skip a trapped instruction and proceed to the next + * @vcpu: The vcpu pointer + */ +void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr) +{ + bool is_thumb; + + is_thumb = !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_T_BIT); + if (is_thumb && !is_wide_instr) + *vcpu_pc(vcpu) += 2; + else + *vcpu_pc(vcpu) += 4; + kvm_adjust_itstate(vcpu); +} diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c new file mode 100644 index 000000000000..2c3ff67a8ecb --- /dev/null +++ b/arch/arm64/kvm/guest.c @@ -0,0 +1,265 @@ +/* + * Copyright (C) 2012,2013 - ARM Ltd + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * Derived from arch/arm/kvm/guest.c: + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall <c.dall@virtualopensystems.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/kvm_host.h> +#include <linux/module.h> +#include <linux/vmalloc.h> +#include <linux/fs.h> +#include <asm/cputype.h> +#include <asm/uaccess.h> +#include <asm/kvm.h> +#include <asm/kvm_asm.h> +#include <asm/kvm_emulate.h> +#include <asm/kvm_coproc.h> + +struct kvm_stats_debugfs_item debugfs_entries[] = { + { NULL } +}; + +int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) +{ + vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS; + return 0; +} + +static u64 core_reg_offset_from_id(u64 id) +{ + return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE); +} + +static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) +{ + /* + * Because the kvm_regs structure is a mix of 32, 64 and + * 128bit fields, we index it as if it was a 32bit + * array. Hence below, nr_regs is the number of entries, and + * off the index in the "array". + */ + __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr; + struct kvm_regs *regs = vcpu_gp_regs(vcpu); + int nr_regs = sizeof(*regs) / sizeof(__u32); + u32 off; + + /* Our ID is an index into the kvm_regs struct. */ + off = core_reg_offset_from_id(reg->id); + if (off >= nr_regs || + (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs) + return -ENOENT; + + if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id))) + return -EFAULT; + + return 0; +} + +static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) +{ + __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr; + struct kvm_regs *regs = vcpu_gp_regs(vcpu); + int nr_regs = sizeof(*regs) / sizeof(__u32); + __uint128_t tmp; + void *valp = &tmp; + u64 off; + int err = 0; + + /* Our ID is an index into the kvm_regs struct. */ + off = core_reg_offset_from_id(reg->id); + if (off >= nr_regs || + (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs) + return -ENOENT; + + if (KVM_REG_SIZE(reg->id) > sizeof(tmp)) + return -EINVAL; + + if (copy_from_user(valp, uaddr, KVM_REG_SIZE(reg->id))) { + err = -EFAULT; + goto out; + } + + if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) { + u32 mode = (*(u32 *)valp) & COMPAT_PSR_MODE_MASK; + switch (mode) { + case COMPAT_PSR_MODE_USR: + case COMPAT_PSR_MODE_FIQ: + case COMPAT_PSR_MODE_IRQ: + case COMPAT_PSR_MODE_SVC: + case COMPAT_PSR_MODE_ABT: + case COMPAT_PSR_MODE_UND: + case PSR_MODE_EL0t: + case PSR_MODE_EL1t: + case PSR_MODE_EL1h: + break; + default: + err = -EINVAL; + goto out; + } + } + + memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id)); +out: + return err; +} + +int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) +{ + return -EINVAL; +} + +int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) +{ + return -EINVAL; +} + +static unsigned long num_core_regs(void) +{ + return sizeof(struct kvm_regs) / sizeof(__u32); +} + +/** + * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG + * + * This is for all registers. + */ +unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) +{ + return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu); +} + +/** + * kvm_arm_copy_reg_indices - get indices of all registers. + * + * We do core registers right here, then we apppend system regs. + */ +int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) +{ + unsigned int i; + const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE; + + for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) { + if (put_user(core_reg | i, uindices)) + return -EFAULT; + uindices++; + } + + return kvm_arm_copy_sys_reg_indices(vcpu, uindices); +} + +int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) +{ + /* We currently use nothing arch-specific in upper 32 bits */ + if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32) + return -EINVAL; + + /* Register group 16 means we want a core register. */ + if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) + return get_core_reg(vcpu, reg); + + return kvm_arm_sys_reg_get_reg(vcpu, reg); +} + +int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) +{ + /* We currently use nothing arch-specific in upper 32 bits */ + if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32) + return -EINVAL; + + /* Register group 16 means we set a core register. */ + if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) + return set_core_reg(vcpu, reg); + + return kvm_arm_sys_reg_set_reg(vcpu, reg); +} + +int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, + struct kvm_sregs *sregs) +{ + return -EINVAL; +} + +int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, + struct kvm_sregs *sregs) +{ + return -EINVAL; +} + +int __attribute_const__ kvm_target_cpu(void) +{ + unsigned long implementor = read_cpuid_implementor(); + unsigned long part_number = read_cpuid_part_number(); + + if (implementor != ARM_CPU_IMP_ARM) + return -EINVAL; + + switch (part_number) { + case ARM_CPU_PART_AEM_V8: + return KVM_ARM_TARGET_AEM_V8; + case ARM_CPU_PART_FOUNDATION: + return KVM_ARM_TARGET_FOUNDATION_V8; + case ARM_CPU_PART_CORTEX_A57: + /* Currently handled by the generic backend */ + return KVM_ARM_TARGET_CORTEX_A57; + default: + return -EINVAL; + } +} + +int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, + const struct kvm_vcpu_init *init) +{ + unsigned int i; + int phys_target = kvm_target_cpu(); + + if (init->target != phys_target) + return -EINVAL; + + vcpu->arch.target = phys_target; + bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES); + + /* -ENOENT for unknown features, -EINVAL for invalid combinations. */ + for (i = 0; i < sizeof(init->features) * 8; i++) { + if (init->features[i / 32] & (1 << (i % 32))) { + if (i >= KVM_VCPU_MAX_FEATURES) + return -ENOENT; + set_bit(i, vcpu->arch.features); + } + } + + /* Now we know what it is, we can reset it. */ + return kvm_reset_vcpu(vcpu); +} + +int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) +{ + return -EINVAL; +} + +int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) +{ + return -EINVAL; +} + +int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, + struct kvm_translation *tr) +{ + return -EINVAL; +} diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c new file mode 100644 index 000000000000..9beaca033437 --- /dev/null +++ b/arch/arm64/kvm/handle_exit.c @@ -0,0 +1,124 @@ +/* + * Copyright (C) 2012,2013 - ARM Ltd + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * Derived from arch/arm/kvm/handle_exit.c: + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall <c.dall@virtualopensystems.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/kvm.h> +#include <linux/kvm_host.h> +#include <asm/kvm_emulate.h> +#include <asm/kvm_coproc.h> +#include <asm/kvm_mmu.h> +#include <asm/kvm_psci.h> + +typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *); + +static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + if (kvm_psci_call(vcpu)) + return 1; + + kvm_inject_undefined(vcpu); + return 1; +} + +static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + if (kvm_psci_call(vcpu)) + return 1; + + kvm_inject_undefined(vcpu); + return 1; +} + +/** + * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest + * @vcpu: the vcpu pointer + * + * Simply call kvm_vcpu_block(), which will halt execution of + * world-switches and schedule other host processes until there is an + * incoming IRQ or FIQ to the VM. + */ +static int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + kvm_vcpu_block(vcpu); + return 1; +} + +static exit_handle_fn arm_exit_handlers[] = { + [ESR_EL2_EC_WFI] = kvm_handle_wfi, + [ESR_EL2_EC_CP15_32] = kvm_handle_cp15_32, + [ESR_EL2_EC_CP15_64] = kvm_handle_cp15_64, + [ESR_EL2_EC_CP14_MR] = kvm_handle_cp14_access, + [ESR_EL2_EC_CP14_LS] = kvm_handle_cp14_load_store, + [ESR_EL2_EC_CP14_64] = kvm_handle_cp14_access, + [ESR_EL2_EC_HVC32] = handle_hvc, + [ESR_EL2_EC_SMC32] = handle_smc, + [ESR_EL2_EC_HVC64] = handle_hvc, + [ESR_EL2_EC_SMC64] = handle_smc, + [ESR_EL2_EC_SYS64] = kvm_handle_sys_reg, + [ESR_EL2_EC_IABT] = kvm_handle_guest_abort, + [ESR_EL2_EC_DABT] = kvm_handle_guest_abort, +}; + +static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu) +{ + u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu); + + if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) || + !arm_exit_handlers[hsr_ec]) { + kvm_err("Unkown exception class: hsr: %#08x\n", + (unsigned int)kvm_vcpu_get_hsr(vcpu)); + BUG(); + } + + return arm_exit_handlers[hsr_ec]; +} + +/* + * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on + * proper exit to userspace. + */ +int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, + int exception_index) +{ + exit_handle_fn exit_handler; + + switch (exception_index) { + case ARM_EXCEPTION_IRQ: + return 1; + case ARM_EXCEPTION_TRAP: + /* + * See ARM ARM B1.14.1: "Hyp traps on instructions + * that fail their condition code check" + */ + if (!kvm_condition_valid(vcpu)) { + kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); + return 1; + } + + exit_handler = kvm_get_exit_handler(vcpu); + + return exit_handler(vcpu, run); + default: + kvm_pr_unimpl("Unsupported exception type: %d", + exception_index); + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + return 0; + } +} diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S new file mode 100644 index 000000000000..ba84e6705e20 --- /dev/null +++ b/arch/arm64/kvm/hyp-init.S @@ -0,0 +1,107 @@ +/* + * Copyright (C) 2012,2013 - ARM Ltd + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/linkage.h> + +#include <asm/assembler.h> +#include <asm/kvm_arm.h> +#include <asm/kvm_mmu.h> + + .text + .pushsection .hyp.idmap.text, "ax" + + .align 11 + +ENTRY(__kvm_hyp_init) + ventry __invalid // Synchronous EL2t + ventry __invalid // IRQ EL2t + ventry __invalid // FIQ EL2t + ventry __invalid // Error EL2t + + ventry __invalid // Synchronous EL2h + ventry __invalid // IRQ EL2h + ventry __invalid // FIQ EL2h + ventry __invalid // Error EL2h + + ventry __do_hyp_init // Synchronous 64-bit EL1 + ventry __invalid // IRQ 64-bit EL1 + ventry __invalid // FIQ 64-bit EL1 + ventry __invalid // Error 64-bit EL1 + + ventry __invalid // Synchronous 32-bit EL1 + ventry __invalid // IRQ 32-bit EL1 + ventry __invalid // FIQ 32-bit EL1 + ventry __invalid // Error 32-bit EL1 + +__invalid: + b . + + /* + * x0: HYP boot pgd + * x1: HYP pgd + * x2: HYP stack + * x3: HYP vectors + */ +__do_hyp_init: + + msr ttbr0_el2, x0 + + mrs x4, tcr_el1 + ldr x5, =TCR_EL2_MASK + and x4, x4, x5 + ldr x5, =TCR_EL2_FLAGS + orr x4, x4, x5 + msr tcr_el2, x4 + + ldr x4, =VTCR_EL2_FLAGS + msr vtcr_el2, x4 + + mrs x4, mair_el1 + msr mair_el2, x4 + isb + + mov x4, #SCTLR_EL2_FLAGS + msr sctlr_el2, x4 + isb + + /* MMU is now enabled. Get ready for the trampoline dance */ + ldr x4, =TRAMPOLINE_VA + adr x5, target + bfi x4, x5, #0, #PAGE_SHIFT + br x4 + +target: /* We're now in the trampoline code, switch page tables */ + msr ttbr0_el2, x1 + isb + + /* Invalidate the old TLBs */ + tlbi alle2 + dsb sy + + /* Set the stack and new vectors */ + kern_hyp_va x2 + mov sp, x2 + kern_hyp_va x3 + msr vbar_el2, x3 + + /* Hello, World! */ + eret +ENDPROC(__kvm_hyp_init) + + .ltorg + + .popsection diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S new file mode 100644 index 000000000000..ff985e3d8b72 --- /dev/null +++ b/arch/arm64/kvm/hyp.S @@ -0,0 +1,831 @@ +/* + * Copyright (C) 2012,2013 - ARM Ltd + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/linkage.h> +#include <linux/irqchip/arm-gic.h> + +#include <asm/assembler.h> +#include <asm/memory.h> +#include <asm/asm-offsets.h> +#include <asm/fpsimdmacros.h> +#include <asm/kvm.h> +#include <asm/kvm_asm.h> +#include <asm/kvm_arm.h> +#include <asm/kvm_mmu.h> + +#define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x) +#define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x) +#define CPU_SPSR_OFFSET(x) CPU_GP_REG_OFFSET(CPU_SPSR + 8*x) +#define CPU_SYSREG_OFFSET(x) (CPU_SYSREGS + 8*x) + + .text + .pushsection .hyp.text, "ax" + .align PAGE_SHIFT + +__kvm_hyp_code_start: + .globl __kvm_hyp_code_start + +.macro save_common_regs + // x2: base address for cpu context + // x3: tmp register + + add x3, x2, #CPU_XREG_OFFSET(19) + stp x19, x20, [x3] + stp x21, x22, [x3, #16] + stp x23, x24, [x3, #32] + stp x25, x26, [x3, #48] + stp x27, x28, [x3, #64] + stp x29, lr, [x3, #80] + + mrs x19, sp_el0 + mrs x20, elr_el2 // EL1 PC + mrs x21, spsr_el2 // EL1 pstate + + stp x19, x20, [x3, #96] + str x21, [x3, #112] + + mrs x22, sp_el1 + mrs x23, elr_el1 + mrs x24, spsr_el1 + + str x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)] + str x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)] + str x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)] +.endm + +.macro restore_common_regs + // x2: base address for cpu context + // x3: tmp register + + ldr x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)] + ldr x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)] + ldr x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)] + + msr sp_el1, x22 + msr elr_el1, x23 + msr spsr_el1, x24 + + add x3, x2, #CPU_XREG_OFFSET(31) // SP_EL0 + ldp x19, x20, [x3] + ldr x21, [x3, #16] + + msr sp_el0, x19 + msr elr_el2, x20 // EL1 PC + msr spsr_el2, x21 // EL1 pstate + + add x3, x2, #CPU_XREG_OFFSET(19) + ldp x19, x20, [x3] + ldp x21, x22, [x3, #16] + ldp x23, x24, [x3, #32] + ldp x25, x26, [x3, #48] + ldp x27, x28, [x3, #64] + ldp x29, lr, [x3, #80] +.endm + +.macro save_host_regs + save_common_regs +.endm + +.macro restore_host_regs + restore_common_regs +.endm + +.macro save_fpsimd + // x2: cpu context address + // x3, x4: tmp regs + add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS) + fpsimd_save x3, 4 +.endm + +.macro restore_fpsimd + // x2: cpu context address + // x3, x4: tmp regs + add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS) + fpsimd_restore x3, 4 +.endm + +.macro save_guest_regs + // x0 is the vcpu address + // x1 is the return code, do not corrupt! + // x2 is the cpu context + // x3 is a tmp register + // Guest's x0-x3 are on the stack + + // Compute base to save registers + add x3, x2, #CPU_XREG_OFFSET(4) + stp x4, x5, [x3] + stp x6, x7, [x3, #16] + stp x8, x9, [x3, #32] + stp x10, x11, [x3, #48] + stp x12, x13, [x3, #64] + stp x14, x15, [x3, #80] + stp x16, x17, [x3, #96] + str x18, [x3, #112] + + pop x6, x7 // x2, x3 + pop x4, x5 // x0, x1 + + add x3, x2, #CPU_XREG_OFFSET(0) + stp x4, x5, [x3] + stp x6, x7, [x3, #16] + + save_common_regs +.endm + +.macro restore_guest_regs + // x0 is the vcpu address. + // x2 is the cpu context + // x3 is a tmp register + + // Prepare x0-x3 for later restore + add x3, x2, #CPU_XREG_OFFSET(0) + ldp x4, x5, [x3] + ldp x6, x7, [x3, #16] + push x4, x5 // Push x0-x3 on the stack + push x6, x7 + + // x4-x18 + ldp x4, x5, [x3, #32] + ldp x6, x7, [x3, #48] + ldp x8, x9, [x3, #64] + ldp x10, x11, [x3, #80] + ldp x12, x13, [x3, #96] + ldp x14, x15, [x3, #112] + ldp x16, x17, [x3, #128] + ldr x18, [x3, #144] + + // x19-x29, lr, sp*, elr*, spsr* + restore_common_regs + + // Last bits of the 64bit state + pop x2, x3 + pop x0, x1 + + // Do not touch any register after this! +.endm + +/* + * Macros to perform system register save/restore. + * + * Ordering here is absolutely critical, and must be kept consistent + * in {save,restore}_sysregs, {save,restore}_guest_32bit_state, + * and in kvm_asm.h. + * + * In other words, don't touch any of these unless you know what + * you are doing. + */ +.macro save_sysregs + // x2: base address for cpu context + // x3: tmp register + + add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1) + + mrs x4, vmpidr_el2 + mrs x5, csselr_el1 + mrs x6, sctlr_el1 + mrs x7, actlr_el1 + mrs x8, cpacr_el1 + mrs x9, ttbr0_el1 + mrs x10, ttbr1_el1 + mrs x11, tcr_el1 + mrs x12, esr_el1 + mrs x13, afsr0_el1 + mrs x14, afsr1_el1 + mrs x15, far_el1 + mrs x16, mair_el1 + mrs x17, vbar_el1 + mrs x18, contextidr_el1 + mrs x19, tpidr_el0 + mrs x20, tpidrro_el0 + mrs x21, tpidr_el1 + mrs x22, amair_el1 + mrs x23, cntkctl_el1 + + stp x4, x5, [x3] + stp x6, x7, [x3, #16] + stp x8, x9, [x3, #32] + stp x10, x11, [x3, #48] + stp x12, x13, [x3, #64] + stp x14, x15, [x3, #80] + stp x16, x17, [x3, #96] + stp x18, x19, [x3, #112] + stp x20, x21, [x3, #128] + stp x22, x23, [x3, #144] +.endm + +.macro restore_sysregs + // x2: base address for cpu context + // x3: tmp register + + add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1) + + ldp x4, x5, [x3] + ldp x6, x7, [x3, #16] + ldp x8, x9, [x3, #32] + ldp x10, x11, [x3, #48] + ldp x12, x13, [x3, #64] + ldp x14, x15, [x3, #80] + ldp x16, x17, [x3, #96] + ldp x18, x19, [x3, #112] + ldp x20, x21, [x3, #128] + ldp x22, x23, [x3, #144] + + msr vmpidr_el2, x4 + msr csselr_el1, x5 + msr sctlr_el1, x6 + msr actlr_el1, x7 + msr cpacr_el1, x8 + msr ttbr0_el1, x9 + msr ttbr1_el1, x10 + msr tcr_el1, x11 + msr esr_el1, x12 + msr afsr0_el1, x13 + msr afsr1_el1, x14 + msr far_el1, x15 + msr mair_el1, x16 + msr vbar_el1, x17 + msr contextidr_el1, x18 + msr tpidr_el0, x19 + msr tpidrro_el0, x20 + msr tpidr_el1, x21 + msr amair_el1, x22 + msr cntkctl_el1, x23 +.endm + +.macro skip_32bit_state tmp, target + // Skip 32bit state if not needed + mrs \tmp, hcr_el2 + tbnz \tmp, #HCR_RW_SHIFT, \target +.endm + +.macro skip_tee_state tmp, target + // Skip ThumbEE state if not needed + mrs \tmp, id_pfr0_el1 + tbz \tmp, #12, \target +.endm + +.macro save_guest_32bit_state + skip_32bit_state x3, 1f + + add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT) + mrs x4, spsr_abt + mrs x5, spsr_und + mrs x6, spsr_irq + mrs x7, spsr_fiq + stp x4, x5, [x3] + stp x6, x7, [x3, #16] + + add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2) + mrs x4, dacr32_el2 + mrs x5, ifsr32_el2 + mrs x6, fpexc32_el2 + mrs x7, dbgvcr32_el2 + stp x4, x5, [x3] + stp x6, x7, [x3, #16] + + skip_tee_state x8, 1f + + add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1) + mrs x4, teecr32_el1 + mrs x5, teehbr32_el1 + stp x4, x5, [x3] +1: +.endm + +.macro restore_guest_32bit_state + skip_32bit_state x3, 1f + + add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT) + ldp x4, x5, [x3] + ldp x6, x7, [x3, #16] + msr spsr_abt, x4 + msr spsr_und, x5 + msr spsr_irq, x6 + msr spsr_fiq, x7 + + add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2) + ldp x4, x5, [x3] + ldp x6, x7, [x3, #16] + msr dacr32_el2, x4 + msr ifsr32_el2, x5 + msr fpexc32_el2, x6 + msr dbgvcr32_el2, x7 + + skip_tee_state x8, 1f + + add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1) + ldp x4, x5, [x3] + msr teecr32_el1, x4 + msr teehbr32_el1, x5 +1: +.endm + +.macro activate_traps + ldr x2, [x0, #VCPU_IRQ_LINES] + ldr x1, [x0, #VCPU_HCR_EL2] + orr x2, x2, x1 + msr hcr_el2, x2 + + ldr x2, =(CPTR_EL2_TTA) + msr cptr_el2, x2 + + ldr x2, =(1 << 15) // Trap CP15 Cr=15 + msr hstr_el2, x2 + + mrs x2, mdcr_el2 + and x2, x2, #MDCR_EL2_HPMN_MASK + orr x2, x2, #(MDCR_EL2_TPM | MDCR_EL2_TPMCR) + msr mdcr_el2, x2 +.endm + +.macro deactivate_traps + mov x2, #HCR_RW + msr hcr_el2, x2 + msr cptr_el2, xzr + msr hstr_el2, xzr + + mrs x2, mdcr_el2 + and x2, x2, #MDCR_EL2_HPMN_MASK + msr mdcr_el2, x2 +.endm + +.macro activate_vm + ldr x1, [x0, #VCPU_KVM] + kern_hyp_va x1 + ldr x2, [x1, #KVM_VTTBR] + msr vttbr_el2, x2 +.endm + +.macro deactivate_vm + msr vttbr_el2, xzr +.endm + +/* + * Save the VGIC CPU state into memory + * x0: Register pointing to VCPU struct + * Do not corrupt x1!!! + */ +.macro save_vgic_state + /* Get VGIC VCTRL base into x2 */ + ldr x2, [x0, #VCPU_KVM] + kern_hyp_va x2 + ldr x2, [x2, #KVM_VGIC_VCTRL] + kern_hyp_va x2 + cbz x2, 2f // disabled + + /* Compute the address of struct vgic_cpu */ + add x3, x0, #VCPU_VGIC_CPU + + /* Save all interesting registers */ + ldr w4, [x2, #GICH_HCR] + ldr w5, [x2, #GICH_VMCR] + ldr w6, [x2, #GICH_MISR] + ldr w7, [x2, #GICH_EISR0] + ldr w8, [x2, #GICH_EISR1] + ldr w9, [x2, #GICH_ELRSR0] + ldr w10, [x2, #GICH_ELRSR1] + ldr w11, [x2, #GICH_APR] + + str w4, [x3, #VGIC_CPU_HCR] + str w5, [x3, #VGIC_CPU_VMCR] + str w6, [x3, #VGIC_CPU_MISR] + str w7, [x3, #VGIC_CPU_EISR] + str w8, [x3, #(VGIC_CPU_EISR + 4)] + str w9, [x3, #VGIC_CPU_ELRSR] + str w10, [x3, #(VGIC_CPU_ELRSR + 4)] + str w11, [x3, #VGIC_CPU_APR] + + /* Clear GICH_HCR */ + str wzr, [x2, #GICH_HCR] + + /* Save list registers */ + add x2, x2, #GICH_LR0 + ldr w4, [x3, #VGIC_CPU_NR_LR] + add x3, x3, #VGIC_CPU_LR +1: ldr w5, [x2], #4 + str w5, [x3], #4 + sub w4, w4, #1 + cbnz w4, 1b +2: +.endm + +/* + * Restore the VGIC CPU state from memory + * x0: Register pointing to VCPU struct + */ +.macro restore_vgic_state + /* Get VGIC VCTRL base into x2 */ + ldr x2, [x0, #VCPU_KVM] + kern_hyp_va x2 + ldr x2, [x2, #KVM_VGIC_VCTRL] + kern_hyp_va x2 + cbz x2, 2f // disabled + + /* Compute the address of struct vgic_cpu */ + add x3, x0, #VCPU_VGIC_CPU + + /* We only restore a minimal set of registers */ + ldr w4, [x3, #VGIC_CPU_HCR] + ldr w5, [x3, #VGIC_CPU_VMCR] + ldr w6, [x3, #VGIC_CPU_APR] + + str w4, [x2, #GICH_HCR] + str w5, [x2, #GICH_VMCR] + str w6, [x2, #GICH_APR] + + /* Restore list registers */ + add x2, x2, #GICH_LR0 + ldr w4, [x3, #VGIC_CPU_NR_LR] + add x3, x3, #VGIC_CPU_LR +1: ldr w5, [x3], #4 + str w5, [x2], #4 + sub w4, w4, #1 + cbnz w4, 1b +2: +.endm + +.macro save_timer_state + // x0: vcpu pointer + ldr x2, [x0, #VCPU_KVM] + kern_hyp_va x2 + ldr w3, [x2, #KVM_TIMER_ENABLED] + cbz w3, 1f + + mrs x3, cntv_ctl_el0 + and x3, x3, #3 + str w3, [x0, #VCPU_TIMER_CNTV_CTL] + bic x3, x3, #1 // Clear Enable + msr cntv_ctl_el0, x3 + + isb + + mrs x3, cntv_cval_el0 + str x3, [x0, #VCPU_TIMER_CNTV_CVAL] + +1: + // Allow physical timer/counter access for the host + mrs x2, cnthctl_el2 + orr x2, x2, #3 + msr cnthctl_el2, x2 + + // Clear cntvoff for the host + msr cntvoff_el2, xzr +.endm + +.macro restore_timer_state + // x0: vcpu pointer + // Disallow physical timer access for the guest + // Physical counter access is allowed + mrs x2, cnthctl_el2 + orr x2, x2, #1 + bic x2, x2, #2 + msr cnthctl_el2, x2 + + ldr x2, [x0, #VCPU_KVM] + kern_hyp_va x2 + ldr w3, [x2, #KVM_TIMER_ENABLED] + cbz w3, 1f + + ldr x3, [x2, #KVM_TIMER_CNTVOFF] + msr cntvoff_el2, x3 + ldr x2, [x0, #VCPU_TIMER_CNTV_CVAL] + msr cntv_cval_el0, x2 + isb + + ldr w2, [x0, #VCPU_TIMER_CNTV_CTL] + and x2, x2, #3 + msr cntv_ctl_el0, x2 +1: +.endm + +__save_sysregs: + save_sysregs + ret + +__restore_sysregs: + restore_sysregs + ret + +__save_fpsimd: + save_fpsimd + ret + +__restore_fpsimd: + restore_fpsimd + ret + +/* + * u64 __kvm_vcpu_run(struct kvm_vcpu *vcpu); + * + * This is the world switch. The first half of the function + * deals with entering the guest, and anything from __kvm_vcpu_return + * to the end of the function deals with reentering the host. + * On the enter path, only x0 (vcpu pointer) must be preserved until + * the last moment. On the exit path, x0 (vcpu pointer) and x1 (exception + * code) must both be preserved until the epilogue. + * In both cases, x2 points to the CPU context we're saving/restoring from/to. + */ +ENTRY(__kvm_vcpu_run) + kern_hyp_va x0 + msr tpidr_el2, x0 // Save the vcpu register + + // Host context + ldr x2, [x0, #VCPU_HOST_CONTEXT] + kern_hyp_va x2 + + save_host_regs + bl __save_fpsimd + bl __save_sysregs + + activate_traps + activate_vm + + restore_vgic_state + restore_timer_state + + // Guest context + add x2, x0, #VCPU_CONTEXT + + bl __restore_sysregs + bl __restore_fpsimd + restore_guest_32bit_state + restore_guest_regs + + // That's it, no more messing around. + eret + +__kvm_vcpu_return: + // Assume x0 is the vcpu pointer, x1 the return code + // Guest's x0-x3 are on the stack + + // Guest context + add x2, x0, #VCPU_CONTEXT + + save_guest_regs + bl __save_fpsimd + bl __save_sysregs + save_guest_32bit_state + + save_timer_state + save_vgic_state + + deactivate_traps + deactivate_vm + + // Host context + ldr x2, [x0, #VCPU_HOST_CONTEXT] + kern_hyp_va x2 + + bl __restore_sysregs + bl __restore_fpsimd + restore_host_regs + + mov x0, x1 + ret +END(__kvm_vcpu_run) + +// void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); +ENTRY(__kvm_tlb_flush_vmid_ipa) + kern_hyp_va x0 + ldr x2, [x0, #KVM_VTTBR] + msr vttbr_el2, x2 + isb + + /* + * We could do so much better if we had the VA as well. + * Instead, we invalidate Stage-2 for this IPA, and the + * whole of Stage-1. Weep... + */ + tlbi ipas2e1is, x1 + dsb sy + tlbi vmalle1is + dsb sy + isb + + msr vttbr_el2, xzr + ret +ENDPROC(__kvm_tlb_flush_vmid_ipa) + +ENTRY(__kvm_flush_vm_context) + tlbi alle1is + ic ialluis + dsb sy + ret +ENDPROC(__kvm_flush_vm_context) + +__kvm_hyp_panic: + // Guess the context by looking at VTTBR: + // If zero, then we're already a host. + // Otherwise restore a minimal host context before panicing. + mrs x0, vttbr_el2 + cbz x0, 1f + + mrs x0, tpidr_el2 + + deactivate_traps + deactivate_vm + + ldr x2, [x0, #VCPU_HOST_CONTEXT] + kern_hyp_va x2 + + bl __restore_sysregs + +1: adr x0, __hyp_panic_str + adr x1, 2f + ldp x2, x3, [x1] + sub x0, x0, x2 + add x0, x0, x3 + mrs x1, spsr_el2 + mrs x2, elr_el2 + mrs x3, esr_el2 + mrs x4, far_el2 + mrs x5, hpfar_el2 + mrs x6, par_el1 + mrs x7, tpidr_el2 + + mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ + PSR_MODE_EL1h) + msr spsr_el2, lr + ldr lr, =panic + msr elr_el2, lr + eret + + .align 3 +2: .quad HYP_PAGE_OFFSET + .quad PAGE_OFFSET +ENDPROC(__kvm_hyp_panic) + +__hyp_panic_str: + .ascii "HYP panic:\nPS:%08x PC:%p ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n\0" + + .align 2 + +ENTRY(kvm_call_hyp) + hvc #0 + ret +ENDPROC(kvm_call_hyp) + +.macro invalid_vector label, target + .align 2 +\label: + b \target +ENDPROC(\label) +.endm + + /* None of these should ever happen */ + invalid_vector el2t_sync_invalid, __kvm_hyp_panic + invalid_vector el2t_irq_invalid, __kvm_hyp_panic + invalid_vector el2t_fiq_invalid, __kvm_hyp_panic + invalid_vector el2t_error_invalid, __kvm_hyp_panic + invalid_vector el2h_sync_invalid, __kvm_hyp_panic + invalid_vector el2h_irq_invalid, __kvm_hyp_panic + invalid_vector el2h_fiq_invalid, __kvm_hyp_panic + invalid_vector el2h_error_invalid, __kvm_hyp_panic + invalid_vector el1_sync_invalid, __kvm_hyp_panic + invalid_vector el1_irq_invalid, __kvm_hyp_panic + invalid_vector el1_fiq_invalid, __kvm_hyp_panic + invalid_vector el1_error_invalid, __kvm_hyp_panic + +el1_sync: // Guest trapped into EL2 + push x0, x1 + push x2, x3 + + mrs x1, esr_el2 + lsr x2, x1, #ESR_EL2_EC_SHIFT + + cmp x2, #ESR_EL2_EC_HVC64 + b.ne el1_trap + + mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest + cbnz x3, el1_trap // called HVC + + /* Here, we're pretty sure the host called HVC. */ + pop x2, x3 + pop x0, x1 + + push lr, xzr + + /* + * Compute the function address in EL2, and shuffle the parameters. + */ + kern_hyp_va x0 + mov lr, x0 + mov x0, x1 + mov x1, x2 + mov x2, x3 + blr lr + + pop lr, xzr + eret + +el1_trap: + /* + * x1: ESR + * x2: ESR_EC + */ + cmp x2, #ESR_EL2_EC_DABT + mov x0, #ESR_EL2_EC_IABT + ccmp x2, x0, #4, ne + b.ne 1f // Not an abort we care about + + /* This is an abort. Check for permission fault */ + and x2, x1, #ESR_EL2_FSC_TYPE + cmp x2, #FSC_PERM + b.ne 1f // Not a permission fault + + /* + * Check for Stage-1 page table walk, which is guaranteed + * to give a valid HPFAR_EL2. + */ + tbnz x1, #7, 1f // S1PTW is set + + /* + * Permission fault, HPFAR_EL2 is invalid. + * Resolve the IPA the hard way using the guest VA. + * Stage-1 translation already validated the memory access rights. + * As such, we can use the EL1 translation regime, and don't have + * to distinguish between EL0 and EL1 access. + */ + mrs x2, far_el2 + at s1e1r, x2 + isb + + /* Read result */ + mrs x3, par_el1 + tbnz x3, #0, 3f // Bail out if we failed the translation + ubfx x3, x3, #12, #36 // Extract IPA + lsl x3, x3, #4 // and present it like HPFAR + b 2f + +1: mrs x3, hpfar_el2 + mrs x2, far_el2 + +2: mrs x0, tpidr_el2 + str x1, [x0, #VCPU_ESR_EL2] + str x2, [x0, #VCPU_FAR_EL2] + str x3, [x0, #VCPU_HPFAR_EL2] + + mov x1, #ARM_EXCEPTION_TRAP + b __kvm_vcpu_return + + /* + * Translation failed. Just return to the guest and + * let it fault again. Another CPU is probably playing + * behind our back. + */ +3: pop x2, x3 + pop x0, x1 + + eret + +el1_irq: + push x0, x1 + push x2, x3 + mrs x0, tpidr_el2 + mov x1, #ARM_EXCEPTION_IRQ + b __kvm_vcpu_return + + .ltorg + + .align 11 + +ENTRY(__kvm_hyp_vector) + ventry el2t_sync_invalid // Synchronous EL2t + ventry el2t_irq_invalid // IRQ EL2t + ventry el2t_fiq_invalid // FIQ EL2t + ventry el2t_error_invalid // Error EL2t + + ventry el2h_sync_invalid // Synchronous EL2h + ventry el2h_irq_invalid // IRQ EL2h + ventry el2h_fiq_invalid // FIQ EL2h + ventry el2h_error_invalid // Error EL2h + + ventry el1_sync // Synchronous 64-bit EL1 + ventry el1_irq // IRQ 64-bit EL1 + ventry el1_fiq_invalid // FIQ 64-bit EL1 + ventry el1_error_invalid // Error 64-bit EL1 + + ventry el1_sync // Synchronous 32-bit EL1 + ventry el1_irq // IRQ 32-bit EL1 + ventry el1_fiq_invalid // FIQ 32-bit EL1 + ventry el1_error_invalid // Error 32-bit EL1 +ENDPROC(__kvm_hyp_vector) + +__kvm_hyp_code_end: + .globl __kvm_hyp_code_end + + .popsection diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c new file mode 100644 index 000000000000..81a02a8762b0 --- /dev/null +++ b/arch/arm64/kvm/inject_fault.c @@ -0,0 +1,203 @@ +/* + * Fault injection for both 32 and 64bit guests. + * + * Copyright (C) 2012,2013 - ARM Ltd + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * Based on arch/arm/kvm/emulate.c + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall <c.dall@virtualopensystems.com> + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/kvm_host.h> +#include <asm/kvm_emulate.h> +#include <asm/esr.h> + +#define PSTATE_FAULT_BITS_64 (PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \ + PSR_I_BIT | PSR_D_BIT) +#define EL1_EXCEPT_SYNC_OFFSET 0x200 + +static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset) +{ + unsigned long cpsr; + unsigned long new_spsr_value = *vcpu_cpsr(vcpu); + bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT); + u32 return_offset = (is_thumb) ? 4 : 0; + u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR); + + cpsr = mode | COMPAT_PSR_I_BIT; + + if (sctlr & (1 << 30)) + cpsr |= COMPAT_PSR_T_BIT; + if (sctlr & (1 << 25)) + cpsr |= COMPAT_PSR_E_BIT; + + *vcpu_cpsr(vcpu) = cpsr; + + /* Note: These now point to the banked copies */ + *vcpu_spsr(vcpu) = new_spsr_value; + *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset; + + /* Branch to exception vector */ + if (sctlr & (1 << 13)) + vect_offset += 0xffff0000; + else /* always have security exceptions */ + vect_offset += vcpu_cp15(vcpu, c12_VBAR); + + *vcpu_pc(vcpu) = vect_offset; +} + +static void inject_undef32(struct kvm_vcpu *vcpu) +{ + prepare_fault32(vcpu, COMPAT_PSR_MODE_UND, 4); +} + +/* + * Modelled after TakeDataAbortException() and TakePrefetchAbortException + * pseudocode. + */ +static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, + unsigned long addr) +{ + u32 vect_offset; + u32 *far, *fsr; + bool is_lpae; + + if (is_pabt) { + vect_offset = 12; + far = &vcpu_cp15(vcpu, c6_IFAR); + fsr = &vcpu_cp15(vcpu, c5_IFSR); + } else { /* !iabt */ + vect_offset = 16; + far = &vcpu_cp15(vcpu, c6_DFAR); + fsr = &vcpu_cp15(vcpu, c5_DFSR); + } + + prepare_fault32(vcpu, COMPAT_PSR_MODE_ABT | COMPAT_PSR_A_BIT, vect_offset); + + *far = addr; + + /* Give the guest an IMPLEMENTATION DEFINED exception */ + is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31); + if (is_lpae) + *fsr = 1 << 9 | 0x34; + else + *fsr = 0x14; +} + +static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr) +{ + unsigned long cpsr = *vcpu_cpsr(vcpu); + bool is_aarch32; + u32 esr = 0; + + is_aarch32 = vcpu_mode_is_32bit(vcpu); + + *vcpu_spsr(vcpu) = cpsr; + *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu); + + *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64; + *vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET; + + vcpu_sys_reg(vcpu, FAR_EL1) = addr; + + /* + * Build an {i,d}abort, depending on the level and the + * instruction set. Report an external synchronous abort. + */ + if (kvm_vcpu_trap_il_is32bit(vcpu)) + esr |= ESR_EL1_IL; + + /* + * Here, the guest runs in AArch64 mode when in EL1. If we get + * an AArch32 fault, it means we managed to trap an EL0 fault. + */ + if (is_aarch32 || (cpsr & PSR_MODE_MASK) == PSR_MODE_EL0t) + esr |= (ESR_EL1_EC_IABT_EL0 << ESR_EL1_EC_SHIFT); + else + esr |= (ESR_EL1_EC_IABT_EL1 << ESR_EL1_EC_SHIFT); + + if (!is_iabt) + esr |= ESR_EL1_EC_DABT_EL0; + + vcpu_sys_reg(vcpu, ESR_EL1) = esr | ESR_EL2_EC_xABT_xFSR_EXTABT; +} + +static void inject_undef64(struct kvm_vcpu *vcpu) +{ + unsigned long cpsr = *vcpu_cpsr(vcpu); + u32 esr = (ESR_EL1_EC_UNKNOWN << ESR_EL1_EC_SHIFT); + + *vcpu_spsr(vcpu) = cpsr; + *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu); + + *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64; + *vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET; + + /* + * Build an unknown exception, depending on the instruction + * set. + */ + if (kvm_vcpu_trap_il_is32bit(vcpu)) + esr |= ESR_EL1_IL; + + vcpu_sys_reg(vcpu, ESR_EL1) = esr; +} + +/** + * kvm_inject_dabt - inject a data abort into the guest + * @vcpu: The VCPU to receive the undefined exception + * @addr: The address to report in the DFAR + * + * It is assumed that this code is called from the VCPU thread and that the + * VCPU therefore is not currently executing guest code. + */ +void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) +{ + if (!(vcpu->arch.hcr_el2 & HCR_RW)) + inject_abt32(vcpu, false, addr); + + inject_abt64(vcpu, false, addr); +} + +/** + * kvm_inject_pabt - inject a prefetch abort into the guest + * @vcpu: The VCPU to receive the undefined exception + * @addr: The address to report in the DFAR + * + * It is assumed that this code is called from the VCPU thread and that the + * VCPU therefore is not currently executing guest code. + */ +void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) +{ + if (!(vcpu->arch.hcr_el2 & HCR_RW)) + inject_abt32(vcpu, true, addr); + + inject_abt64(vcpu, true, addr); +} + +/** + * kvm_inject_undefined - inject an undefined instruction into the guest + * + * It is assumed that this code is called from the VCPU thread and that the + * VCPU therefore is not currently executing guest code. + */ +void kvm_inject_undefined(struct kvm_vcpu *vcpu) +{ + if (!(vcpu->arch.hcr_el2 & HCR_RW)) + inject_undef32(vcpu); + + inject_undef64(vcpu); +} diff --git a/arch/arm64/kvm/regmap.c b/arch/arm64/kvm/regmap.c new file mode 100644 index 000000000000..bbc6ae32e4af --- /dev/null +++ b/arch/arm64/kvm/regmap.c @@ -0,0 +1,168 @@ +/* + * Copyright (C) 2012,2013 - ARM Ltd + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * Derived from arch/arm/kvm/emulate.c: + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall <c.dall@virtualopensystems.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/mm.h> +#include <linux/kvm_host.h> +#include <asm/kvm_emulate.h> +#include <asm/ptrace.h> + +#define VCPU_NR_MODES 6 +#define REG_OFFSET(_reg) \ + (offsetof(struct user_pt_regs, _reg) / sizeof(unsigned long)) + +#define USR_REG_OFFSET(R) REG_OFFSET(compat_usr(R)) + +static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][16] = { + /* USR Registers */ + { + USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), + USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), + USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), + USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), + USR_REG_OFFSET(12), USR_REG_OFFSET(13), USR_REG_OFFSET(14), + REG_OFFSET(pc) + }, + + /* FIQ Registers */ + { + USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), + USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), + USR_REG_OFFSET(6), USR_REG_OFFSET(7), + REG_OFFSET(compat_r8_fiq), /* r8 */ + REG_OFFSET(compat_r9_fiq), /* r9 */ + REG_OFFSET(compat_r10_fiq), /* r10 */ + REG_OFFSET(compat_r11_fiq), /* r11 */ + REG_OFFSET(compat_r12_fiq), /* r12 */ + REG_OFFSET(compat_sp_fiq), /* r13 */ + REG_OFFSET(compat_lr_fiq), /* r14 */ + REG_OFFSET(pc) + }, + + /* IRQ Registers */ + { + USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), + USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), + USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), + USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), + USR_REG_OFFSET(12), + REG_OFFSET(compat_sp_irq), /* r13 */ + REG_OFFSET(compat_lr_irq), /* r14 */ + REG_OFFSET(pc) + }, + + /* SVC Registers */ + { + USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), + USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), + USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), + USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), + USR_REG_OFFSET(12), + REG_OFFSET(compat_sp_svc), /* r13 */ + REG_OFFSET(compat_lr_svc), /* r14 */ + REG_OFFSET(pc) + }, + + /* ABT Registers */ + { + USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), + USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), + USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), + USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), + USR_REG_OFFSET(12), + REG_OFFSET(compat_sp_abt), /* r13 */ + REG_OFFSET(compat_lr_abt), /* r14 */ + REG_OFFSET(pc) + }, + + /* UND Registers */ + { + USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), + USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), + USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), + USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), + USR_REG_OFFSET(12), + REG_OFFSET(compat_sp_und), /* r13 */ + REG_OFFSET(compat_lr_und), /* r14 */ + REG_OFFSET(pc) + }, +}; + +/* + * Return a pointer to the register number valid in the current mode of + * the virtual CPU. + */ +unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num) +{ + unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs.regs; + unsigned long mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK; + + switch (mode) { + case COMPAT_PSR_MODE_USR ... COMPAT_PSR_MODE_SVC: + mode &= ~PSR_MODE32_BIT; /* 0 ... 3 */ + break; + + case COMPAT_PSR_MODE_ABT: + mode = 4; + break; + + case COMPAT_PSR_MODE_UND: + mode = 5; + break; + + case COMPAT_PSR_MODE_SYS: + mode = 0; /* SYS maps to USR */ + break; + + default: + BUG(); + } + + return reg_array + vcpu_reg_offsets[mode][reg_num]; +} + +/* + * Return the SPSR for the current mode of the virtual CPU. + */ +unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu) +{ + unsigned long mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK; + switch (mode) { + case COMPAT_PSR_MODE_SVC: + mode = KVM_SPSR_SVC; + break; + case COMPAT_PSR_MODE_ABT: + mode = KVM_SPSR_ABT; + break; + case COMPAT_PSR_MODE_UND: + mode = KVM_SPSR_UND; + break; + case COMPAT_PSR_MODE_IRQ: + mode = KVM_SPSR_IRQ; + break; + case COMPAT_PSR_MODE_FIQ: + mode = KVM_SPSR_FIQ; + break; + default: + BUG(); + } + + return (unsigned long *)&vcpu_gp_regs(vcpu)->spsr[mode]; +} diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c new file mode 100644 index 000000000000..70a7816535cd --- /dev/null +++ b/arch/arm64/kvm/reset.c @@ -0,0 +1,112 @@ +/* + * Copyright (C) 2012,2013 - ARM Ltd + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * Derived from arch/arm/kvm/reset.c + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall <c.dall@virtualopensystems.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/errno.h> +#include <linux/kvm_host.h> +#include <linux/kvm.h> + +#include <kvm/arm_arch_timer.h> + +#include <asm/cputype.h> +#include <asm/ptrace.h> +#include <asm/kvm_arm.h> +#include <asm/kvm_coproc.h> + +/* + * ARMv8 Reset Values + */ +static const struct kvm_regs default_regs_reset = { + .regs.pstate = (PSR_MODE_EL1h | PSR_A_BIT | PSR_I_BIT | + PSR_F_BIT | PSR_D_BIT), +}; + +static const struct kvm_regs default_regs_reset32 = { + .regs.pstate = (COMPAT_PSR_MODE_SVC | COMPAT_PSR_A_BIT | + COMPAT_PSR_I_BIT | COMPAT_PSR_F_BIT), +}; + +static const struct kvm_irq_level default_vtimer_irq = { + .irq = 27, + .level = 1, +}; + +static bool cpu_has_32bit_el1(void) +{ + u64 pfr0; + + pfr0 = read_cpuid(ID_AA64PFR0_EL1); + return !!(pfr0 & 0x20); +} + +int kvm_arch_dev_ioctl_check_extension(long ext) +{ + int r; + + switch (ext) { + case KVM_CAP_ARM_EL1_32BIT: + r = cpu_has_32bit_el1(); + break; + default: + r = 0; + } + + return r; +} + +/** + * kvm_reset_vcpu - sets core registers and sys_regs to reset value + * @vcpu: The VCPU pointer + * + * This function finds the right table above and sets the registers on + * the virtual CPU struct to their architectually defined reset + * values. + */ +int kvm_reset_vcpu(struct kvm_vcpu *vcpu) +{ + const struct kvm_irq_level *cpu_vtimer_irq; + const struct kvm_regs *cpu_reset; + + switch (vcpu->arch.target) { + default: + if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) { + if (!cpu_has_32bit_el1()) + return -EINVAL; + cpu_reset = &default_regs_reset32; + vcpu->arch.hcr_el2 &= ~HCR_RW; + } else { + cpu_reset = &default_regs_reset; + } + + cpu_vtimer_irq = &default_vtimer_irq; + break; + } + + /* Reset core registers */ + memcpy(vcpu_gp_regs(vcpu), cpu_reset, sizeof(*cpu_reset)); + + /* Reset system registers */ + kvm_reset_sys_regs(vcpu); + + /* Reset timer */ + kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq); + + return 0; +} diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c new file mode 100644 index 000000000000..94923609753b --- /dev/null +++ b/arch/arm64/kvm/sys_regs.c @@ -0,0 +1,1050 @@ +/* + * Copyright (C) 2012,2013 - ARM Ltd + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * Derived from arch/arm/kvm/coproc.c: + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Authors: Rusty Russell <rusty@rustcorp.com.au> + * Christoffer Dall <c.dall@virtualopensystems.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/mm.h> +#include <linux/kvm_host.h> +#include <linux/uaccess.h> +#include <asm/kvm_arm.h> +#include <asm/kvm_host.h> +#include <asm/kvm_emulate.h> +#include <asm/kvm_coproc.h> +#include <asm/cacheflush.h> +#include <asm/cputype.h> +#include <trace/events/kvm.h> + +#include "sys_regs.h" + +/* + * All of this file is extremly similar to the ARM coproc.c, but the + * types are different. My gut feeling is that it should be pretty + * easy to merge, but that would be an ABI breakage -- again. VFP + * would also need to be abstracted. + * + * For AArch32, we only take care of what is being trapped. Anything + * that has to do with init and userspace access has to go via the + * 64bit interface. + */ + +/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */ +static u32 cache_levels; + +/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */ +#define CSSELR_MAX 12 + +/* Which cache CCSIDR represents depends on CSSELR value. */ +static u32 get_ccsidr(u32 csselr) +{ + u32 ccsidr; + + /* Make sure noone else changes CSSELR during this! */ + local_irq_disable(); + /* Put value into CSSELR */ + asm volatile("msr csselr_el1, %x0" : : "r" (csselr)); + isb(); + /* Read result out of CCSIDR */ + asm volatile("mrs %0, ccsidr_el1" : "=r" (ccsidr)); + local_irq_enable(); + + return ccsidr; +} + +static void do_dc_cisw(u32 val) +{ + asm volatile("dc cisw, %x0" : : "r" (val)); + dsb(); +} + +static void do_dc_csw(u32 val) +{ + asm volatile("dc csw, %x0" : : "r" (val)); + dsb(); +} + +/* See note at ARM ARM B1.14.4 */ +static bool access_dcsw(struct kvm_vcpu *vcpu, + const struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + unsigned long val; + int cpu; + + if (!p->is_write) + return read_from_write_only(vcpu, p); + + cpu = get_cpu(); + + cpumask_setall(&vcpu->arch.require_dcache_flush); + cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush); + + /* If we were already preempted, take the long way around */ + if (cpu != vcpu->arch.last_pcpu) { + flush_cache_all(); + goto done; + } + + val = *vcpu_reg(vcpu, p->Rt); + + switch (p->CRm) { + case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */ + case 14: /* DCCISW */ + do_dc_cisw(val); + break; + + case 10: /* DCCSW */ + do_dc_csw(val); + break; + } + +done: + put_cpu(); + + return true; +} + +/* + * We could trap ID_DFR0 and tell the guest we don't support performance + * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was + * NAKed, so it will read the PMCR anyway. + * + * Therefore we tell the guest we have 0 counters. Unfortunately, we + * must always support PMCCNTR (the cycle counter): we just RAZ/WI for + * all PM registers, which doesn't crash the guest kernel at least. + */ +static bool pm_fake(struct kvm_vcpu *vcpu, + const struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + if (p->is_write) + return ignore_write(vcpu, p); + else + return read_zero(vcpu, p); +} + +static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) +{ + u64 amair; + + asm volatile("mrs %0, amair_el1\n" : "=r" (amair)); + vcpu_sys_reg(vcpu, AMAIR_EL1) = amair; +} + +static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) +{ + /* + * Simply map the vcpu_id into the Aff0 field of the MPIDR. + */ + vcpu_sys_reg(vcpu, MPIDR_EL1) = (1UL << 31) | (vcpu->vcpu_id & 0xff); +} + +/* + * Architected system registers. + * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 + */ +static const struct sys_reg_desc sys_reg_descs[] = { + /* DC ISW */ + { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b0110), Op2(0b010), + access_dcsw }, + /* DC CSW */ + { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1010), Op2(0b010), + access_dcsw }, + /* DC CISW */ + { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010), + access_dcsw }, + + /* TEECR32_EL1 */ + { Op0(0b10), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000), + NULL, reset_val, TEECR32_EL1, 0 }, + /* TEEHBR32_EL1 */ + { Op0(0b10), Op1(0b010), CRn(0b0001), CRm(0b0000), Op2(0b000), + NULL, reset_val, TEEHBR32_EL1, 0 }, + /* DBGVCR32_EL2 */ + { Op0(0b10), Op1(0b100), CRn(0b0000), CRm(0b0111), Op2(0b000), + NULL, reset_val, DBGVCR32_EL2, 0 }, + + /* MPIDR_EL1 */ + { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b101), + NULL, reset_mpidr, MPIDR_EL1 }, + /* SCTLR_EL1 */ + { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000), + NULL, reset_val, SCTLR_EL1, 0x00C50078 }, + /* CPACR_EL1 */ + { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010), + NULL, reset_val, CPACR_EL1, 0 }, + /* TTBR0_EL1 */ + { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b000), + NULL, reset_unknown, TTBR0_EL1 }, + /* TTBR1_EL1 */ + { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b001), + NULL, reset_unknown, TTBR1_EL1 }, + /* TCR_EL1 */ + { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010), + NULL, reset_val, TCR_EL1, 0 }, + + /* AFSR0_EL1 */ + { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000), + NULL, reset_unknown, AFSR0_EL1 }, + /* AFSR1_EL1 */ + { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b001), + NULL, reset_unknown, AFSR1_EL1 }, + /* ESR_EL1 */ + { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0010), Op2(0b000), + NULL, reset_unknown, ESR_EL1 }, + /* FAR_EL1 */ + { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000), + NULL, reset_unknown, FAR_EL1 }, + + /* PMINTENSET_EL1 */ + { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001), + pm_fake }, + /* PMINTENCLR_EL1 */ + { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010), + pm_fake }, + + /* MAIR_EL1 */ + { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000), + NULL, reset_unknown, MAIR_EL1 }, + /* AMAIR_EL1 */ + { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0011), Op2(0b000), + NULL, reset_amair_el1, AMAIR_EL1 }, + + /* VBAR_EL1 */ + { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000), + NULL, reset_val, VBAR_EL1, 0 }, + /* CONTEXTIDR_EL1 */ + { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001), + NULL, reset_val, CONTEXTIDR_EL1, 0 }, + /* TPIDR_EL1 */ + { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b100), + NULL, reset_unknown, TPIDR_EL1 }, + + /* CNTKCTL_EL1 */ + { Op0(0b11), Op1(0b000), CRn(0b1110), CRm(0b0001), Op2(0b000), + NULL, reset_val, CNTKCTL_EL1, 0}, + + /* CSSELR_EL1 */ + { Op0(0b11), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000), + NULL, reset_unknown, CSSELR_EL1 }, + + /* PMCR_EL0 */ + { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000), + pm_fake }, + /* PMCNTENSET_EL0 */ + { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001), + pm_fake }, + /* PMCNTENCLR_EL0 */ + { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010), + pm_fake }, + /* PMOVSCLR_EL0 */ + { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011), + pm_fake }, + /* PMSWINC_EL0 */ + { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100), + pm_fake }, + /* PMSELR_EL0 */ + { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101), + pm_fake }, + /* PMCEID0_EL0 */ + { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110), + pm_fake }, + /* PMCEID1_EL0 */ + { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111), + pm_fake }, + /* PMCCNTR_EL0 */ + { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000), + pm_fake }, + /* PMXEVTYPER_EL0 */ + { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001), + pm_fake }, + /* PMXEVCNTR_EL0 */ + { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010), + pm_fake }, + /* PMUSERENR_EL0 */ + { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000), + pm_fake }, + /* PMOVSSET_EL0 */ + { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011), + pm_fake }, + + /* TPIDR_EL0 */ + { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010), + NULL, reset_unknown, TPIDR_EL0 }, + /* TPIDRRO_EL0 */ + { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011), + NULL, reset_unknown, TPIDRRO_EL0 }, + + /* DACR32_EL2 */ + { Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000), + NULL, reset_unknown, DACR32_EL2 }, + /* IFSR32_EL2 */ + { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0000), Op2(0b001), + NULL, reset_unknown, IFSR32_EL2 }, + /* FPEXC32_EL2 */ + { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0011), Op2(0b000), + NULL, reset_val, FPEXC32_EL2, 0x70 }, +}; + +/* Trapped cp15 registers */ +static const struct sys_reg_desc cp15_regs[] = { + /* + * DC{C,I,CI}SW operations: + */ + { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw }, + { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw }, + { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw }, + { Op1( 0), CRn( 9), CRm(12), Op2( 0), pm_fake }, + { Op1( 0), CRn( 9), CRm(12), Op2( 1), pm_fake }, + { Op1( 0), CRn( 9), CRm(12), Op2( 2), pm_fake }, + { Op1( 0), CRn( 9), CRm(12), Op2( 3), pm_fake }, + { Op1( 0), CRn( 9), CRm(12), Op2( 5), pm_fake }, + { Op1( 0), CRn( 9), CRm(12), Op2( 6), pm_fake }, + { Op1( 0), CRn( 9), CRm(12), Op2( 7), pm_fake }, + { Op1( 0), CRn( 9), CRm(13), Op2( 0), pm_fake }, + { Op1( 0), CRn( 9), CRm(13), Op2( 1), pm_fake }, + { Op1( 0), CRn( 9), CRm(13), Op2( 2), pm_fake }, + { Op1( 0), CRn( 9), CRm(14), Op2( 0), pm_fake }, + { Op1( 0), CRn( 9), CRm(14), Op2( 1), pm_fake }, + { Op1( 0), CRn( 9), CRm(14), Op2( 2), pm_fake }, +}; + +/* Target specific emulation tables */ +static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS]; + +void kvm_register_target_sys_reg_table(unsigned int target, + struct kvm_sys_reg_target_table *table) +{ + target_tables[target] = table; +} + +/* Get specific register table for this target. */ +static const struct sys_reg_desc *get_target_table(unsigned target, + bool mode_is_64, + size_t *num) +{ + struct kvm_sys_reg_target_table *table; + + table = target_tables[target]; + if (mode_is_64) { + *num = table->table64.num; + return table->table64.table; + } else { + *num = table->table32.num; + return table->table32.table; + } +} + +static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params, + const struct sys_reg_desc table[], + unsigned int num) +{ + unsigned int i; + + for (i = 0; i < num; i++) { + const struct sys_reg_desc *r = &table[i]; + + if (params->Op0 != r->Op0) + continue; + if (params->Op1 != r->Op1) + continue; + if (params->CRn != r->CRn) + continue; + if (params->CRm != r->CRm) + continue; + if (params->Op2 != r->Op2) + continue; + + return r; + } + return NULL; +} + +int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + kvm_inject_undefined(vcpu); + return 1; +} + +int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + kvm_inject_undefined(vcpu); + return 1; +} + +static void emulate_cp15(struct kvm_vcpu *vcpu, + const struct sys_reg_params *params) +{ + size_t num; + const struct sys_reg_desc *table, *r; + + table = get_target_table(vcpu->arch.target, false, &num); + + /* Search target-specific then generic table. */ + r = find_reg(params, table, num); + if (!r) + r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs)); + + if (likely(r)) { + /* + * Not having an accessor means that we have + * configured a trap that we don't know how to + * handle. This certainly qualifies as a gross bug + * that should be fixed right away. + */ + BUG_ON(!r->access); + + if (likely(r->access(vcpu, params, r))) { + /* Skip instruction, since it was emulated */ + kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); + return; + } + /* If access function fails, it should complain. */ + } + + kvm_err("Unsupported guest CP15 access at: %08lx\n", *vcpu_pc(vcpu)); + print_sys_reg_instr(params); + kvm_inject_undefined(vcpu); +} + +/** + * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access + * @vcpu: The VCPU pointer + * @run: The kvm_run struct + */ +int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + struct sys_reg_params params; + u32 hsr = kvm_vcpu_get_hsr(vcpu); + int Rt2 = (hsr >> 10) & 0xf; + + params.CRm = (hsr >> 1) & 0xf; + params.Rt = (hsr >> 5) & 0xf; + params.is_write = ((hsr & 1) == 0); + + params.Op0 = 0; + params.Op1 = (hsr >> 16) & 0xf; + params.Op2 = 0; + params.CRn = 0; + + /* + * Massive hack here. Store Rt2 in the top 32bits so we only + * have one register to deal with. As we use the same trap + * backends between AArch32 and AArch64, we get away with it. + */ + if (params.is_write) { + u64 val = *vcpu_reg(vcpu, params.Rt); + val &= 0xffffffff; + val |= *vcpu_reg(vcpu, Rt2) << 32; + *vcpu_reg(vcpu, params.Rt) = val; + } + + emulate_cp15(vcpu, ¶ms); + + /* Do the opposite hack for the read side */ + if (!params.is_write) { + u64 val = *vcpu_reg(vcpu, params.Rt); + val >>= 32; + *vcpu_reg(vcpu, Rt2) = val; + } + + return 1; +} + +/** + * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access + * @vcpu: The VCPU pointer + * @run: The kvm_run struct + */ +int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + struct sys_reg_params params; + u32 hsr = kvm_vcpu_get_hsr(vcpu); + + params.CRm = (hsr >> 1) & 0xf; + params.Rt = (hsr >> 5) & 0xf; + params.is_write = ((hsr & 1) == 0); + params.CRn = (hsr >> 10) & 0xf; + params.Op0 = 0; + params.Op1 = (hsr >> 14) & 0x7; + params.Op2 = (hsr >> 17) & 0x7; + + emulate_cp15(vcpu, ¶ms); + return 1; +} + +static int emulate_sys_reg(struct kvm_vcpu *vcpu, + const struct sys_reg_params *params) +{ + size_t num; + const struct sys_reg_desc *table, *r; + + table = get_target_table(vcpu->arch.target, true, &num); + + /* Search target-specific then generic table. */ + r = find_reg(params, table, num); + if (!r) + r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); + + if (likely(r)) { + /* + * Not having an accessor means that we have + * configured a trap that we don't know how to + * handle. This certainly qualifies as a gross bug + * that should be fixed right away. + */ + BUG_ON(!r->access); + + if (likely(r->access(vcpu, params, r))) { + /* Skip instruction, since it was emulated */ + kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); + return 1; + } + /* If access function fails, it should complain. */ + } else { + kvm_err("Unsupported guest sys_reg access at: %lx\n", + *vcpu_pc(vcpu)); + print_sys_reg_instr(params); + } + kvm_inject_undefined(vcpu); + return 1; +} + +static void reset_sys_reg_descs(struct kvm_vcpu *vcpu, + const struct sys_reg_desc *table, size_t num) +{ + unsigned long i; + + for (i = 0; i < num; i++) + if (table[i].reset) + table[i].reset(vcpu, &table[i]); +} + +/** + * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access + * @vcpu: The VCPU pointer + * @run: The kvm_run struct + */ +int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + struct sys_reg_params params; + unsigned long esr = kvm_vcpu_get_hsr(vcpu); + + params.Op0 = (esr >> 20) & 3; + params.Op1 = (esr >> 14) & 0x7; + params.CRn = (esr >> 10) & 0xf; + params.CRm = (esr >> 1) & 0xf; + params.Op2 = (esr >> 17) & 0x7; + params.Rt = (esr >> 5) & 0x1f; + params.is_write = !(esr & 1); + + return emulate_sys_reg(vcpu, ¶ms); +} + +/****************************************************************************** + * Userspace API + *****************************************************************************/ + +static bool index_to_params(u64 id, struct sys_reg_params *params) +{ + switch (id & KVM_REG_SIZE_MASK) { + case KVM_REG_SIZE_U64: + /* Any unused index bits means it's not valid. */ + if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK + | KVM_REG_ARM_COPROC_MASK + | KVM_REG_ARM64_SYSREG_OP0_MASK + | KVM_REG_ARM64_SYSREG_OP1_MASK + | KVM_REG_ARM64_SYSREG_CRN_MASK + | KVM_REG_ARM64_SYSREG_CRM_MASK + | KVM_REG_ARM64_SYSREG_OP2_MASK)) + return false; + params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK) + >> KVM_REG_ARM64_SYSREG_OP0_SHIFT); + params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK) + >> KVM_REG_ARM64_SYSREG_OP1_SHIFT); + params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK) + >> KVM_REG_ARM64_SYSREG_CRN_SHIFT); + params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK) + >> KVM_REG_ARM64_SYSREG_CRM_SHIFT); + params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK) + >> KVM_REG_ARM64_SYSREG_OP2_SHIFT); + return true; + default: + return false; + } +} + +/* Decode an index value, and find the sys_reg_desc entry. */ +static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu, + u64 id) +{ + size_t num; + const struct sys_reg_desc *table, *r; + struct sys_reg_params params; + + /* We only do sys_reg for now. */ + if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG) + return NULL; + + if (!index_to_params(id, ¶ms)) + return NULL; + + table = get_target_table(vcpu->arch.target, true, &num); + r = find_reg(¶ms, table, num); + if (!r) + r = find_reg(¶ms, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); + + /* Not saved in the sys_reg array? */ + if (r && !r->reg) + r = NULL; + + return r; +} + +/* + * These are the invariant sys_reg registers: we let the guest see the + * host versions of these, so they're part of the guest state. + * + * A future CPU may provide a mechanism to present different values to + * the guest, or a future kvm may trap them. + */ + +#define FUNCTION_INVARIANT(reg) \ + static void get_##reg(struct kvm_vcpu *v, \ + const struct sys_reg_desc *r) \ + { \ + u64 val; \ + \ + asm volatile("mrs %0, " __stringify(reg) "\n" \ + : "=r" (val)); \ + ((struct sys_reg_desc *)r)->val = val; \ + } + +FUNCTION_INVARIANT(midr_el1) +FUNCTION_INVARIANT(ctr_el0) +FUNCTION_INVARIANT(revidr_el1) +FUNCTION_INVARIANT(id_pfr0_el1) +FUNCTION_INVARIANT(id_pfr1_el1) +FUNCTION_INVARIANT(id_dfr0_el1) +FUNCTION_INVARIANT(id_afr0_el1) +FUNCTION_INVARIANT(id_mmfr0_el1) +FUNCTION_INVARIANT(id_mmfr1_el1) +FUNCTION_INVARIANT(id_mmfr2_el1) +FUNCTION_INVARIANT(id_mmfr3_el1) +FUNCTION_INVARIANT(id_isar0_el1) +FUNCTION_INVARIANT(id_isar1_el1) +FUNCTION_INVARIANT(id_isar2_el1) +FUNCTION_INVARIANT(id_isar3_el1) +FUNCTION_INVARIANT(id_isar4_el1) +FUNCTION_INVARIANT(id_isar5_el1) +FUNCTION_INVARIANT(clidr_el1) +FUNCTION_INVARIANT(aidr_el1) + +/* ->val is filled in by kvm_sys_reg_table_init() */ +static struct sys_reg_desc invariant_sys_regs[] = { + { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b000), + NULL, get_midr_el1 }, + { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b110), + NULL, get_revidr_el1 }, + { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b000), + NULL, get_id_pfr0_el1 }, + { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b001), + NULL, get_id_pfr1_el1 }, + { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b010), + NULL, get_id_dfr0_el1 }, + { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b011), + NULL, get_id_afr0_el1 }, + { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b100), + NULL, get_id_mmfr0_el1 }, + { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b101), + NULL, get_id_mmfr1_el1 }, + { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b110), + NULL, get_id_mmfr2_el1 }, + { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b111), + NULL, get_id_mmfr3_el1 }, + { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000), + NULL, get_id_isar0_el1 }, + { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b001), + NULL, get_id_isar1_el1 }, + { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010), + NULL, get_id_isar2_el1 }, + { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b011), + NULL, get_id_isar3_el1 }, + { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b100), + NULL, get_id_isar4_el1 }, + { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b101), + NULL, get_id_isar5_el1 }, + { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b001), + NULL, get_clidr_el1 }, + { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b111), + NULL, get_aidr_el1 }, + { Op0(0b11), Op1(0b011), CRn(0b0000), CRm(0b0000), Op2(0b001), + NULL, get_ctr_el0 }, +}; + +static int reg_from_user(void *val, const void __user *uaddr, u64 id) +{ + /* This Just Works because we are little endian. */ + if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0) + return -EFAULT; + return 0; +} + +static int reg_to_user(void __user *uaddr, const void *val, u64 id) +{ + /* This Just Works because we are little endian. */ + if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0) + return -EFAULT; + return 0; +} + +static int get_invariant_sys_reg(u64 id, void __user *uaddr) +{ + struct sys_reg_params params; + const struct sys_reg_desc *r; + + if (!index_to_params(id, ¶ms)) + return -ENOENT; + + r = find_reg(¶ms, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)); + if (!r) + return -ENOENT; + + return reg_to_user(uaddr, &r->val, id); +} + +static int set_invariant_sys_reg(u64 id, void __user *uaddr) +{ + struct sys_reg_params params; + const struct sys_reg_desc *r; + int err; + u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */ + + if (!index_to_params(id, ¶ms)) + return -ENOENT; + r = find_reg(¶ms, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)); + if (!r) + return -ENOENT; + + err = reg_from_user(&val, uaddr, id); + if (err) + return err; + + /* This is what we mean by invariant: you can't change it. */ + if (r->val != val) + return -EINVAL; + + return 0; +} + +static bool is_valid_cache(u32 val) +{ + u32 level, ctype; + + if (val >= CSSELR_MAX) + return -ENOENT; + + /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */ + level = (val >> 1); + ctype = (cache_levels >> (level * 3)) & 7; + + switch (ctype) { + case 0: /* No cache */ + return false; + case 1: /* Instruction cache only */ + return (val & 1); + case 2: /* Data cache only */ + case 4: /* Unified cache */ + return !(val & 1); + case 3: /* Separate instruction and data caches */ + return true; + default: /* Reserved: we can't know instruction or data. */ + return false; + } +} + +static int demux_c15_get(u64 id, void __user *uaddr) +{ + u32 val; + u32 __user *uval = uaddr; + + /* Fail if we have unknown bits set. */ + if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK + | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) + return -ENOENT; + + switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { + case KVM_REG_ARM_DEMUX_ID_CCSIDR: + if (KVM_REG_SIZE(id) != 4) + return -ENOENT; + val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) + >> KVM_REG_ARM_DEMUX_VAL_SHIFT; + if (!is_valid_cache(val)) + return -ENOENT; + + return put_user(get_ccsidr(val), uval); + default: + return -ENOENT; + } +} + +static int demux_c15_set(u64 id, void __user *uaddr) +{ + u32 val, newval; + u32 __user *uval = uaddr; + + /* Fail if we have unknown bits set. */ + if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK + | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) + return -ENOENT; + + switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { + case KVM_REG_ARM_DEMUX_ID_CCSIDR: + if (KVM_REG_SIZE(id) != 4) + return -ENOENT; + val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) + >> KVM_REG_ARM_DEMUX_VAL_SHIFT; + if (!is_valid_cache(val)) + return -ENOENT; + + if (get_user(newval, uval)) + return -EFAULT; + + /* This is also invariant: you can't change it. */ + if (newval != get_ccsidr(val)) + return -EINVAL; + return 0; + default: + return -ENOENT; + } +} + +int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) +{ + const struct sys_reg_desc *r; + void __user *uaddr = (void __user *)(unsigned long)reg->addr; + + if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) + return demux_c15_get(reg->id, uaddr); + + if (KVM_REG_SIZE(reg->id) != sizeof(__u64)) + return -ENOENT; + + r = index_to_sys_reg_desc(vcpu, reg->id); + if (!r) + return get_invariant_sys_reg(reg->id, uaddr); + + return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id); +} + +int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) +{ + const struct sys_reg_desc *r; + void __user *uaddr = (void __user *)(unsigned long)reg->addr; + + if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) + return demux_c15_set(reg->id, uaddr); + + if (KVM_REG_SIZE(reg->id) != sizeof(__u64)) + return -ENOENT; + + r = index_to_sys_reg_desc(vcpu, reg->id); + if (!r) + return set_invariant_sys_reg(reg->id, uaddr); + + return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id); +} + +static unsigned int num_demux_regs(void) +{ + unsigned int i, count = 0; + + for (i = 0; i < CSSELR_MAX; i++) + if (is_valid_cache(i)) + count++; + + return count; +} + +static int write_demux_regids(u64 __user *uindices) +{ + u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX; + unsigned int i; + + val |= KVM_REG_ARM_DEMUX_ID_CCSIDR; + for (i = 0; i < CSSELR_MAX; i++) { + if (!is_valid_cache(i)) + continue; + if (put_user(val | i, uindices)) + return -EFAULT; + uindices++; + } + return 0; +} + +static u64 sys_reg_to_index(const struct sys_reg_desc *reg) +{ + return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | + KVM_REG_ARM64_SYSREG | + (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) | + (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) | + (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) | + (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) | + (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT)); +} + +static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind) +{ + if (!*uind) + return true; + + if (put_user(sys_reg_to_index(reg), *uind)) + return false; + + (*uind)++; + return true; +} + +/* Assumed ordered tables, see kvm_sys_reg_table_init. */ +static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind) +{ + const struct sys_reg_desc *i1, *i2, *end1, *end2; + unsigned int total = 0; + size_t num; + + /* We check for duplicates here, to allow arch-specific overrides. */ + i1 = get_target_table(vcpu->arch.target, true, &num); + end1 = i1 + num; + i2 = sys_reg_descs; + end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs); + + BUG_ON(i1 == end1 || i2 == end2); + + /* Walk carefully, as both tables may refer to the same register. */ + while (i1 || i2) { + int cmp = cmp_sys_reg(i1, i2); + /* target-specific overrides generic entry. */ + if (cmp <= 0) { + /* Ignore registers we trap but don't save. */ + if (i1->reg) { + if (!copy_reg_to_user(i1, &uind)) + return -EFAULT; + total++; + } + } else { + /* Ignore registers we trap but don't save. */ + if (i2->reg) { + if (!copy_reg_to_user(i2, &uind)) + return -EFAULT; + total++; + } + } + + if (cmp <= 0 && ++i1 == end1) + i1 = NULL; + if (cmp >= 0 && ++i2 == end2) + i2 = NULL; + } + return total; +} + +unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu) +{ + return ARRAY_SIZE(invariant_sys_regs) + + num_demux_regs() + + walk_sys_regs(vcpu, (u64 __user *)NULL); +} + +int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) +{ + unsigned int i; + int err; + + /* Then give them all the invariant registers' indices. */ + for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) { + if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices)) + return -EFAULT; + uindices++; + } + + err = walk_sys_regs(vcpu, uindices); + if (err < 0) + return err; + uindices += err; + + return write_demux_regids(uindices); +} + +void kvm_sys_reg_table_init(void) +{ + unsigned int i; + struct sys_reg_desc clidr; + + /* Make sure tables are unique and in order. */ + for (i = 1; i < ARRAY_SIZE(sys_reg_descs); i++) + BUG_ON(cmp_sys_reg(&sys_reg_descs[i-1], &sys_reg_descs[i]) >= 0); + + /* We abuse the reset function to overwrite the table itself. */ + for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) + invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]); + + /* + * CLIDR format is awkward, so clean it up. See ARM B4.1.20: + * + * If software reads the Cache Type fields from Ctype1 + * upwards, once it has seen a value of 0b000, no caches + * exist at further-out levels of the hierarchy. So, for + * example, if Ctype3 is the first Cache Type field with a + * value of 0b000, the values of Ctype4 to Ctype7 must be + * ignored. + */ + get_clidr_el1(NULL, &clidr); /* Ugly... */ + cache_levels = clidr.val; + for (i = 0; i < 7; i++) + if (((cache_levels >> (i*3)) & 7) == 0) + break; + /* Clear all higher bits. */ + cache_levels &= (1 << (i*3))-1; +} + +/** + * kvm_reset_sys_regs - sets system registers to reset value + * @vcpu: The VCPU pointer + * + * This function finds the right table above and sets the registers on the + * virtual CPU struct to their architecturally defined reset values. + */ +void kvm_reset_sys_regs(struct kvm_vcpu *vcpu) +{ + size_t num; + const struct sys_reg_desc *table; + + /* Catch someone adding a register without putting in reset entry. */ + memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs)); + + /* Generic chip reset first (so target could override). */ + reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); + + table = get_target_table(vcpu->arch.target, true, &num); + reset_sys_reg_descs(vcpu, table, num); + + for (num = 1; num < NR_SYS_REGS; num++) + if (vcpu_sys_reg(vcpu, num) == 0x4242424242424242) + panic("Didn't reset vcpu_sys_reg(%zi)", num); +} diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h new file mode 100644 index 000000000000..d50d3722998e --- /dev/null +++ b/arch/arm64/kvm/sys_regs.h @@ -0,0 +1,138 @@ +/* + * Copyright (C) 2012,2013 - ARM Ltd + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * Derived from arch/arm/kvm/coproc.h + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Authors: Christoffer Dall <c.dall@virtualopensystems.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __ARM64_KVM_SYS_REGS_LOCAL_H__ +#define __ARM64_KVM_SYS_REGS_LOCAL_H__ + +struct sys_reg_params { + u8 Op0; + u8 Op1; + u8 CRn; + u8 CRm; + u8 Op2; + u8 Rt; + bool is_write; +}; + +struct sys_reg_desc { + /* MRS/MSR instruction which accesses it. */ + u8 Op0; + u8 Op1; + u8 CRn; + u8 CRm; + u8 Op2; + + /* Trapped access from guest, if non-NULL. */ + bool (*access)(struct kvm_vcpu *, + const struct sys_reg_params *, + const struct sys_reg_desc *); + + /* Initialization for vcpu. */ + void (*reset)(struct kvm_vcpu *, const struct sys_reg_desc *); + + /* Index into sys_reg[], or 0 if we don't need to save it. */ + int reg; + + /* Value (usually reset value) */ + u64 val; +}; + +static inline void print_sys_reg_instr(const struct sys_reg_params *p) +{ + /* Look, we even formatted it for you to paste into the table! */ + kvm_pr_unimpl(" { Op0(%2u), Op1(%2u), CRn(%2u), CRm(%2u), Op2(%2u), func_%s },\n", + p->Op0, p->Op1, p->CRn, p->CRm, p->Op2, p->is_write ? "write" : "read"); +} + +static inline bool ignore_write(struct kvm_vcpu *vcpu, + const struct sys_reg_params *p) +{ + return true; +} + +static inline bool read_zero(struct kvm_vcpu *vcpu, + const struct sys_reg_params *p) +{ + *vcpu_reg(vcpu, p->Rt) = 0; + return true; +} + +static inline bool write_to_read_only(struct kvm_vcpu *vcpu, + const struct sys_reg_params *params) +{ + kvm_debug("sys_reg write to read-only register at: %lx\n", + *vcpu_pc(vcpu)); + print_sys_reg_instr(params); + return false; +} + +static inline bool read_from_write_only(struct kvm_vcpu *vcpu, + const struct sys_reg_params *params) +{ + kvm_debug("sys_reg read to write-only register at: %lx\n", + *vcpu_pc(vcpu)); + print_sys_reg_instr(params); + return false; +} + +/* Reset functions */ +static inline void reset_unknown(struct kvm_vcpu *vcpu, + const struct sys_reg_desc *r) +{ + BUG_ON(!r->reg); + BUG_ON(r->reg >= NR_SYS_REGS); + vcpu_sys_reg(vcpu, r->reg) = 0x1de7ec7edbadc0deULL; +} + +static inline void reset_val(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) +{ + BUG_ON(!r->reg); + BUG_ON(r->reg >= NR_SYS_REGS); + vcpu_sys_reg(vcpu, r->reg) = r->val; +} + +static inline int cmp_sys_reg(const struct sys_reg_desc *i1, + const struct sys_reg_desc *i2) +{ + BUG_ON(i1 == i2); + if (!i1) + return 1; + else if (!i2) + return -1; + if (i1->Op0 != i2->Op0) + return i1->Op0 - i2->Op0; + if (i1->Op1 != i2->Op1) + return i1->Op1 - i2->Op1; + if (i1->CRn != i2->CRn) + return i1->CRn - i2->CRn; + if (i1->CRm != i2->CRm) + return i1->CRm - i2->CRm; + return i1->Op2 - i2->Op2; +} + + +#define Op0(_x) .Op0 = _x +#define Op1(_x) .Op1 = _x +#define CRn(_x) .CRn = _x +#define CRm(_x) .CRm = _x +#define Op2(_x) .Op2 = _x + +#endif /* __ARM64_KVM_SYS_REGS_LOCAL_H__ */ diff --git a/arch/arm64/kvm/sys_regs_generic_v8.c b/arch/arm64/kvm/sys_regs_generic_v8.c new file mode 100644 index 000000000000..4268ab9356b1 --- /dev/null +++ b/arch/arm64/kvm/sys_regs_generic_v8.c @@ -0,0 +1,95 @@ +/* + * Copyright (C) 2012,2013 - ARM Ltd + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * Based on arch/arm/kvm/coproc_a15.c: + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Authors: Rusty Russell <rusty@rustcorp.au> + * Christoffer Dall <c.dall@virtualopensystems.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#include <linux/kvm_host.h> +#include <asm/cputype.h> +#include <asm/kvm_arm.h> +#include <asm/kvm_asm.h> +#include <asm/kvm_host.h> +#include <asm/kvm_emulate.h> +#include <asm/kvm_coproc.h> +#include <linux/init.h> + +#include "sys_regs.h" + +static bool access_actlr(struct kvm_vcpu *vcpu, + const struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + if (p->is_write) + return ignore_write(vcpu, p); + + *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, ACTLR_EL1); + return true; +} + +static void reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) +{ + u64 actlr; + + asm volatile("mrs %0, actlr_el1\n" : "=r" (actlr)); + vcpu_sys_reg(vcpu, ACTLR_EL1) = actlr; +} + +/* + * Implementation specific sys-reg registers. + * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 + */ +static const struct sys_reg_desc genericv8_sys_regs[] = { + /* ACTLR_EL1 */ + { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b001), + access_actlr, reset_actlr, ACTLR_EL1 }, +}; + +static const struct sys_reg_desc genericv8_cp15_regs[] = { + /* ACTLR */ + { Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b001), + access_actlr }, +}; + +static struct kvm_sys_reg_target_table genericv8_target_table = { + .table64 = { + .table = genericv8_sys_regs, + .num = ARRAY_SIZE(genericv8_sys_regs), + }, + .table32 = { + .table = genericv8_cp15_regs, + .num = ARRAY_SIZE(genericv8_cp15_regs), + }, +}; + +static int __init sys_reg_genericv8_init(void) +{ + unsigned int i; + + for (i = 1; i < ARRAY_SIZE(genericv8_sys_regs); i++) + BUG_ON(cmp_sys_reg(&genericv8_sys_regs[i-1], + &genericv8_sys_regs[i]) >= 0); + + kvm_register_target_sys_reg_table(KVM_ARM_TARGET_AEM_V8, + &genericv8_target_table); + kvm_register_target_sys_reg_table(KVM_ARM_TARGET_FOUNDATION_V8, + &genericv8_target_table); + kvm_register_target_sys_reg_table(KVM_ARM_TARGET_CORTEX_A57, + &genericv8_target_table); + return 0; +} +late_initcall(sys_reg_genericv8_init); diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile index 3140a2abcdc2..b51d36401d83 100644 --- a/arch/arm64/mm/Makefile +++ b/arch/arm64/mm/Makefile @@ -2,3 +2,4 @@ obj-y := dma-mapping.o extable.o fault.o init.o \ cache.o copypage.o flush.o \ ioremap.o mmap.o pgd.o mmu.o \ context.o tlb.o proc.o +obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 1426468b77f3..0ecac8980aae 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -365,17 +365,6 @@ static int __kprobes do_translation_fault(unsigned long addr, } /* - * Some section permission faults need to be handled gracefully. They can - * happen due to a __{get,put}_user during an oops. - */ -static int do_sect_fault(unsigned long addr, unsigned int esr, - struct pt_regs *regs) -{ - do_bad_area(addr, esr, regs); - return 0; -} - -/* * This abort handler always returns "fault". */ static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs) @@ -398,12 +387,12 @@ static struct fault_info { { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" }, { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, { do_bad, SIGBUS, 0, "reserved access flag fault" }, - { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" }, - { do_bad, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, + { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" }, + { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" }, { do_bad, SIGBUS, 0, "reserved permission fault" }, - { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" }, - { do_sect_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" }, + { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" }, + { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" }, { do_bad, SIGBUS, 0, "synchronous external abort" }, { do_bad, SIGBUS, 0, "asynchronous external abort" }, diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c index 88611c3a421a..e4193e3adc7f 100644 --- a/arch/arm64/mm/flush.c +++ b/arch/arm64/mm/flush.c @@ -70,23 +70,16 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, #endif } -void __flush_dcache_page(struct page *page) -{ - __flush_dcache_area(page_address(page), PAGE_SIZE); -} - void __sync_icache_dcache(pte_t pte, unsigned long addr) { - unsigned long pfn; - struct page *page; + struct page *page = pte_page(pte); - pfn = pte_pfn(pte); - if (!pfn_valid(pfn)) + /* no flushing needed for anonymous pages */ + if (!page_mapping(page)) return; - page = pfn_to_page(pfn); if (!test_and_set_bit(PG_dcache_clean, &page->flags)) { - __flush_dcache_page(page); + __flush_dcache_area(page_address(page), PAGE_SIZE); __flush_icache_all(); } else if (icache_is_aivivt()) { __flush_icache_all(); @@ -94,28 +87,14 @@ void __sync_icache_dcache(pte_t pte, unsigned long addr) } /* - * Ensure cache coherency between kernel mapping and userspace mapping of this - * page. + * This function is called when a page has been modified by the kernel. Mark + * it as dirty for later flushing when mapped in user space (if executable, + * see __sync_icache_dcache). */ void flush_dcache_page(struct page *page) { - struct address_space *mapping; - - /* - * The zero page is never written to, so never has any dirty cache - * lines, and therefore never needs to be flushed. - */ - if (page == ZERO_PAGE(0)) - return; - - mapping = page_mapping(page); - if (mapping && mapping_mapped(mapping)) { - __flush_dcache_page(page); - __flush_icache_all(); - set_bit(PG_dcache_clean, &page->flags); - } else { + if (test_bit(PG_dcache_clean, &page->flags)) clear_bit(PG_dcache_clean, &page->flags); - } } EXPORT_SYMBOL(flush_dcache_page); diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c new file mode 100644 index 000000000000..2fc8258bab2d --- /dev/null +++ b/arch/arm64/mm/hugetlbpage.c @@ -0,0 +1,70 @@ +/* + * arch/arm64/mm/hugetlbpage.c + * + * Copyright (C) 2013 Linaro Ltd. + * + * Based on arch/x86/mm/hugetlbpage.c. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/init.h> +#include <linux/fs.h> +#include <linux/mm.h> +#include <linux/hugetlb.h> +#include <linux/pagemap.h> +#include <linux/err.h> +#include <linux/sysctl.h> +#include <asm/mman.h> +#include <asm/tlb.h> +#include <asm/tlbflush.h> +#include <asm/pgalloc.h> + +#ifndef CONFIG_ARCH_WANT_HUGE_PMD_SHARE +int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) +{ + return 0; +} +#endif + +struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, + int write) +{ + return ERR_PTR(-EINVAL); +} + +int pmd_huge(pmd_t pmd) +{ + return !(pmd_val(pmd) & PMD_TABLE_BIT); +} + +int pud_huge(pud_t pud) +{ + return !(pud_val(pud) & PUD_TABLE_BIT); +} + +static __init int setup_hugepagesz(char *opt) +{ + unsigned long ps = memparse(opt, &opt); + if (ps == PMD_SIZE) { + hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT); + } else if (ps == PUD_SIZE) { + hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); + } else { + pr_err("hugepagesz: Unsupported page size %lu M\n", ps >> 20); + return 0; + } + return 1; +} +__setup("hugepagesz=", setup_hugepagesz); diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index f497ca77925a..67e8d7ce3fe7 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -197,14 +197,6 @@ void __init bootmem_init(void) max_pfn = max_low_pfn = max; } -/* - * Poison init memory with an undefined instruction (0x0). - */ -static inline void poison_init_mem(void *s, size_t count) -{ - memset(s, 0, count); -} - #ifndef CONFIG_SPARSEMEM_VMEMMAP static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn) { @@ -280,59 +272,17 @@ static void __init free_unused_memmap(void) */ void __init mem_init(void) { - unsigned long reserved_pages, free_pages; - struct memblock_region *reg; - arm64_swiotlb_init(); max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map; #ifndef CONFIG_SPARSEMEM_VMEMMAP - /* this will put all unused low memory onto the freelists */ free_unused_memmap(); #endif + /* this will put all unused low memory onto the freelists */ + free_all_bootmem(); - totalram_pages += free_all_bootmem(); - - reserved_pages = free_pages = 0; - - for_each_memblock(memory, reg) { - unsigned int pfn1, pfn2; - struct page *page, *end; - - pfn1 = __phys_to_pfn(reg->base); - pfn2 = pfn1 + __phys_to_pfn(reg->size); - - page = pfn_to_page(pfn1); - end = pfn_to_page(pfn2 - 1) + 1; - - do { - if (PageReserved(page)) - reserved_pages++; - else if (!page_count(page)) - free_pages++; - page++; - } while (page < end); - } - - /* - * Since our memory may not be contiguous, calculate the real number - * of pages we have in this system. - */ - pr_info("Memory:"); - num_physpages = 0; - for_each_memblock(memory, reg) { - unsigned long pages = memblock_region_memory_end_pfn(reg) - - memblock_region_memory_base_pfn(reg); - num_physpages += pages; - printk(" %ldMB", pages >> (20 - PAGE_SHIFT)); - } - printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); - - pr_notice("Memory: %luk/%luk available, %luk reserved\n", - nr_free_pages() << (PAGE_SHIFT-10), - free_pages << (PAGE_SHIFT-10), - reserved_pages << (PAGE_SHIFT-10)); + mem_init_print_info(NULL); #define MLK(b, t) b, t, ((t) - (b)) >> 10 #define MLM(b, t) b, t, ((t) - (b)) >> 20 @@ -374,7 +324,7 @@ void __init mem_init(void) BUILD_BUG_ON(TASK_SIZE_64 > MODULES_VADDR); BUG_ON(TASK_SIZE_64 > MODULES_VADDR); - if (PAGE_SIZE >= 16384 && num_physpages <= 128) { + if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { extern int sysctl_overcommit_memory; /* * On a machine this small we won't get anywhere without @@ -386,7 +336,6 @@ void __init mem_init(void) void free_initmem(void) { - poison_init_mem(__init_begin, __init_end - __init_begin); free_initmem_default(0); } @@ -396,10 +345,8 @@ static int keep_initrd; void free_initrd_mem(unsigned long start, unsigned long end) { - if (!keep_initrd) { - poison_init_mem((void *)start, PAGE_ALIGN(end) - start); - free_reserved_area(start, end, 0, "initrd"); - } + if (!keep_initrd) + free_reserved_area((void *)start, (void *)end, 0, "initrd"); } static int __init keepinitrd_setup(char *__unused) diff --git a/arch/arm64/mm/mm.h b/arch/arm64/mm/mm.h index 916701e6d040..d519f4f50c8c 100644 --- a/arch/arm64/mm/mm.h +++ b/arch/arm64/mm/mm.h @@ -1,3 +1,2 @@ -extern void __flush_dcache_page(struct page *page); extern void __init bootmem_init(void); extern void __init arm64_swiotlb_init(void); diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c index 7c7be7855638..8ed6cb1a900f 100644 --- a/arch/arm64/mm/mmap.c +++ b/arch/arm64/mm/mmap.c @@ -90,11 +90,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm) if (mmap_is_legacy()) { mm->mmap_base = TASK_UNMAPPED_BASE; mm->get_unmapped_area = arch_get_unmapped_area; - mm->unmap_area = arch_unmap_area; } else { mm->mmap_base = mmap_base(); mm->get_unmapped_area = arch_get_unmapped_area_topdown; - mm->unmap_area = arch_unmap_area_topdown; } } EXPORT_SYMBOL_GPL(arch_pick_mmap_layout); diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index eeecc9c8ed68..a8d1059b91b2 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -297,6 +297,16 @@ static void __init map_mem(void) { struct memblock_region *reg; + /* + * Temporarily limit the memblock range. We need to do this as + * create_mapping requires puds, pmds and ptes to be allocated from + * memory addressable from the initial direct kernel mapping. + * + * The initial direct kernel mapping, located at swapper_pg_dir, + * gives us PGDIR_SIZE memory starting from PHYS_OFFSET (aligned). + */ + memblock_set_current_limit((PHYS_OFFSET & PGDIR_MASK) + PGDIR_SIZE); + /* map all the memory banks */ for_each_memblock(memory, reg) { phys_addr_t start = reg->base; @@ -307,6 +317,9 @@ static void __init map_mem(void) create_mapping(start, __phys_to_virt(start), end - start); } + + /* Limit no longer required. */ + memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE); } /* @@ -317,12 +330,6 @@ void __init paging_init(void) { void *zero_page; - /* - * Maximum PGDIR_SIZE addressable via the initial direct kernel - * mapping in swapper_pg_dir. - */ - memblock_set_current_limit((PHYS_OFFSET & PGDIR_MASK) + PGDIR_SIZE); - init_mem_pgprot(); map_mem(); @@ -339,7 +346,6 @@ void __init paging_init(void) bootmem_init(); empty_zero_page = virt_to_page(zero_page); - __flush_dcache_page(empty_zero_page); /* * TTBR0 is only used for the identity mapping at this stage. Make it diff --git a/arch/arm64/xen/Makefile b/arch/arm64/xen/Makefile new file mode 100644 index 000000000000..be240404ba96 --- /dev/null +++ b/arch/arm64/xen/Makefile @@ -0,0 +1,2 @@ +xen-arm-y += $(addprefix ../../arm/xen/, enlighten.o grant-table.o) +obj-y := xen-arm.o hypercall.o diff --git a/arch/arm64/xen/hypercall.S b/arch/arm64/xen/hypercall.S new file mode 100644 index 000000000000..531342ec4bcf --- /dev/null +++ b/arch/arm64/xen/hypercall.S @@ -0,0 +1,93 @@ +/****************************************************************************** + * hypercall.S + * + * Xen hypercall wrappers + * + * Stefano Stabellini <stefano.stabellini@eu.citrix.com>, Citrix, 2012 + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation; or, when distributed + * separately from the Linux kernel or incorporated into other + * software packages, subject to the following license: + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this source file (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, modify, + * merge, publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +/* + * The Xen hypercall calling convention is very similar to the procedure + * call standard for the ARM 64-bit architecture: the first parameter is + * passed in x0, the second in x1, the third in x2, the fourth in x3 and + * the fifth in x4. + * + * The hypercall number is passed in x16. + * + * The return value is in x0. + * + * The hvc ISS is required to be 0xEA1, that is the Xen specific ARM + * hypercall tag. + * + * Parameter structs passed to hypercalls are laid out according to + * the ARM 64-bit EABI standard. + */ + +#include <linux/linkage.h> +#include <asm/assembler.h> +#include <xen/interface/xen.h> + + +#define XEN_IMM 0xEA1 + +#define HYPERCALL_SIMPLE(hypercall) \ +ENTRY(HYPERVISOR_##hypercall) \ + mov x16, #__HYPERVISOR_##hypercall; \ + hvc XEN_IMM; \ + ret; \ +ENDPROC(HYPERVISOR_##hypercall) + +#define HYPERCALL0 HYPERCALL_SIMPLE +#define HYPERCALL1 HYPERCALL_SIMPLE +#define HYPERCALL2 HYPERCALL_SIMPLE +#define HYPERCALL3 HYPERCALL_SIMPLE +#define HYPERCALL4 HYPERCALL_SIMPLE +#define HYPERCALL5 HYPERCALL_SIMPLE + + .text + +HYPERCALL2(xen_version); +HYPERCALL3(console_io); +HYPERCALL3(grant_table_op); +HYPERCALL2(sched_op); +HYPERCALL2(event_channel_op); +HYPERCALL2(hvm_op); +HYPERCALL2(memory_op); +HYPERCALL2(physdev_op); +HYPERCALL3(vcpu_op); +HYPERCALL1(tmem_op); + +ENTRY(privcmd_call) + mov x16, x0 + mov x0, x1 + mov x1, x2 + mov x2, x3 + mov x3, x4 + mov x4, x5 + hvc XEN_IMM + ret +ENDPROC(privcmd_call); diff --git a/arch/arm/include/asm/kvm_arch_timer.h b/include/kvm/arm_arch_timer.h index 68cb9e1dfb81..6d9aeddc09bf 100644 --- a/arch/arm/include/asm/kvm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h @@ -61,6 +61,8 @@ struct arch_timer_cpu { #ifdef CONFIG_KVM_ARM_TIMER int kvm_timer_hyp_init(void); int kvm_timer_init(struct kvm *kvm); +void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu, + const struct kvm_irq_level *irq); void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu); void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu); void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu); @@ -76,6 +78,8 @@ static inline int kvm_timer_init(struct kvm *kvm) return 0; } +static inline void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu, + const struct kvm_irq_level *irq) {} static inline void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) {} static inline void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) {} static inline void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) {} diff --git a/arch/arm/include/asm/kvm_vgic.h b/include/kvm/arm_vgic.h index 343744e4809c..343744e4809c 100644 --- a/arch/arm/include/asm/kvm_vgic.h +++ b/include/kvm/arm_vgic.h diff --git a/arch/arm/kernel/sched_clock.c b/kernel/time/sched_clock.c index e8edcaa0e432..a326f27d7f09 100644 --- a/arch/arm/kernel/sched_clock.c +++ b/kernel/time/sched_clock.c @@ -13,8 +13,7 @@ #include <linux/sched.h> #include <linux/syscore_ops.h> #include <linux/timer.h> - -#include <asm/sched_clock.h> +#include <linux/sched_clock.h> struct clock_data { u64 epoch_ns; @@ -24,7 +23,6 @@ struct clock_data { u32 mult; u32 shift; bool suspended; - bool needs_suspend; }; static void sched_clock_poll(unsigned long wrap_ticks); @@ -51,10 +49,11 @@ static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift) return (cyc * mult) >> shift; } -static unsigned long long notrace cyc_to_sched_clock(u32 cyc, u32 mask) +static unsigned long long notrace sched_clock_32(void) { u64 epoch_ns; u32 epoch_cyc; + u32 cyc; if (cd.suspended) return cd.epoch_ns; @@ -73,7 +72,9 @@ static unsigned long long notrace cyc_to_sched_clock(u32 cyc, u32 mask) smp_rmb(); } while (epoch_cyc != cd.epoch_cyc_copy); - return epoch_ns + cyc_to_ns((cyc - epoch_cyc) & mask, cd.mult, cd.shift); + cyc = read_sched_clock(); + cyc = (cyc - epoch_cyc) & sched_clock_mask; + return epoch_ns + cyc_to_ns(cyc, cd.mult, cd.shift); } /* @@ -165,12 +166,6 @@ void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate) pr_debug("Registered %pF as sched_clock source\n", read); } -static unsigned long long notrace sched_clock_32(void) -{ - u32 cyc = read_sched_clock(); - return cyc_to_sched_clock(cyc, sched_clock_mask); -} - unsigned long long __read_mostly (*sched_clock_func)(void) = sched_clock_32; unsigned long long notrace sched_clock(void) diff --git a/arch/arm/kvm/arch_timer.c b/virt/kvm/arm/arch_timer.c index c55b6089e923..c2e1ef4604e8 100644 --- a/arch/arm/kvm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c @@ -25,14 +25,12 @@ #include <clocksource/arm_arch_timer.h> #include <asm/arch_timer.h> -#include <asm/kvm_vgic.h> -#include <asm/kvm_arch_timer.h> +#include <kvm/arm_vgic.h> +#include <kvm/arm_arch_timer.h> static struct timecounter *timecounter; static struct workqueue_struct *wqueue; -static struct kvm_irq_level timer_irq = { - .level = 1, -}; +static unsigned int host_vtimer_irq; static cycle_t kvm_phys_timer_read(void) { @@ -67,8 +65,8 @@ static void kvm_timer_inject_irq(struct kvm_vcpu *vcpu) timer->cntv_ctl |= ARCH_TIMER_CTRL_IT_MASK; kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, - vcpu->arch.timer_cpu.irq->irq, - vcpu->arch.timer_cpu.irq->level); + timer->irq->irq, + timer->irq->level); } static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id) @@ -156,6 +154,20 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) timer_arm(timer, ns); } +void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu, + const struct kvm_irq_level *irq) +{ + struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; + + /* + * The vcpu timer irq number cannot be determined in + * kvm_timer_vcpu_init() because it is called much before + * kvm_vcpu_set_target(). To handle this, we determine + * vcpu timer irq number when the vcpu is reset. + */ + timer->irq = irq; +} + void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) { struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; @@ -163,12 +175,11 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) INIT_WORK(&timer->expired, kvm_timer_inject_irq_work); hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); timer->timer.function = kvm_timer_expire; - timer->irq = &timer_irq; } static void kvm_timer_init_interrupt(void *info) { - enable_percpu_irq(timer_irq.irq, 0); + enable_percpu_irq(host_vtimer_irq, 0); } @@ -182,7 +193,7 @@ static int kvm_timer_cpu_notify(struct notifier_block *self, break; case CPU_DYING: case CPU_DYING_FROZEN: - disable_percpu_irq(timer_irq.irq); + disable_percpu_irq(host_vtimer_irq); break; } @@ -195,6 +206,7 @@ static struct notifier_block kvm_timer_cpu_nb = { static const struct of_device_id arch_timer_of_match[] = { { .compatible = "arm,armv7-timer", }, + { .compatible = "arm,armv8-timer", }, {}, }; @@ -229,7 +241,7 @@ int kvm_timer_hyp_init(void) goto out; } - timer_irq.irq = ppi; + host_vtimer_irq = ppi; err = register_cpu_notifier(&kvm_timer_cpu_nb); if (err) { diff --git a/arch/arm/kvm/vgic.c b/virt/kvm/arm/vgic.c index 17c5ac7d10ed..17c5ac7d10ed 100644 --- a/arch/arm/kvm/vgic.c +++ b/virt/kvm/arm/vgic.c |