aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r--arch/arm64/kernel/Makefile11
-rw-r--r--arch/arm64/kernel/arm64ksyms.c9
-rw-r--r--arch/arm64/kernel/early_printk.c156
-rw-r--r--arch/arm64/kernel/efi-entry.S108
-rw-r--r--arch/arm64/kernel/efi-stub.c81
-rw-r--r--arch/arm64/kernel/efi.c469
-rw-r--r--arch/arm64/kernel/entry-fpsimd.S24
-rw-r--r--arch/arm64/kernel/entry-ftrace.S218
-rw-r--r--arch/arm64/kernel/entry.S91
-rw-r--r--arch/arm64/kernel/fpsimd.c186
-rw-r--r--arch/arm64/kernel/ftrace.c176
-rw-r--r--arch/arm64/kernel/head.S123
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c2
-rw-r--r--arch/arm64/kernel/process.c49
-rw-r--r--arch/arm64/kernel/psci.c231
-rw-r--r--arch/arm64/kernel/ptrace.c92
-rw-r--r--arch/arm64/kernel/return_address.c55
-rw-r--r--arch/arm64/kernel/setup.c21
-rw-r--r--arch/arm64/kernel/signal.c52
-rw-r--r--arch/arm64/kernel/signal32.c16
-rw-r--r--arch/arm64/kernel/smp.c41
-rw-r--r--arch/arm64/kernel/smp_spin_table.c39
-rw-r--r--arch/arm64/kernel/stacktrace.c2
-rw-r--r--arch/arm64/kernel/time.c3
-rw-r--r--arch/arm64/kernel/topology.c212
-rw-r--r--arch/arm64/kernel/traps.c7
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S2
27 files changed, 2080 insertions, 396 deletions
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 7d811d9522bc..cdaedad3afe5 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -4,24 +4,31 @@
CPPFLAGS_vmlinux.lds := -DTEXT_OFFSET=$(TEXT_OFFSET)
AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
+CFLAGS_efi-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET) \
+ -I$(src)/../../../scripts/dtc/libfdt
+
+CFLAGS_REMOVE_ftrace.o = -pg
+CFLAGS_REMOVE_insn.o = -pg
+CFLAGS_REMOVE_return_address.o = -pg
# Object file lists.
arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \
entry-fpsimd.o process.o ptrace.o setup.o signal.o \
sys.o stacktrace.o time.o traps.o io.o vdso.o \
- hyp-stub.o psci.o cpu_ops.o insn.o
+ hyp-stub.o psci.o cpu_ops.o insn.o return_address.o
arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
sys_compat.o
+arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o
arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o topology.o
arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o
arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
-arm64-obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
arm64-obj-$(CONFIG_ARM64_CPU_SUSPEND) += sleep.o suspend.o
arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o
arm64-obj-$(CONFIG_KGDB) += kgdb.o
+arm64-obj-$(CONFIG_EFI) += efi.o efi-stub.o efi-entry.o
obj-y += $(arm64-obj-y) vdso/
obj-m += $(arm64-obj-m)
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index 338b568cd8ae..a85843ddbde8 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -44,10 +44,15 @@ EXPORT_SYMBOL(memstart_addr);
/* string / mem functions */
EXPORT_SYMBOL(strchr);
EXPORT_SYMBOL(strrchr);
+EXPORT_SYMBOL(strcmp);
+EXPORT_SYMBOL(strncmp);
+EXPORT_SYMBOL(strlen);
+EXPORT_SYMBOL(strnlen);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(memchr);
+EXPORT_SYMBOL(memcmp);
/* atomic bitops */
EXPORT_SYMBOL(set_bit);
@@ -56,3 +61,7 @@ EXPORT_SYMBOL(clear_bit);
EXPORT_SYMBOL(test_and_clear_bit);
EXPORT_SYMBOL(change_bit);
EXPORT_SYMBOL(test_and_change_bit);
+
+#ifdef CONFIG_FUNCTION_TRACER
+EXPORT_SYMBOL(_mcount);
+#endif
diff --git a/arch/arm64/kernel/early_printk.c b/arch/arm64/kernel/early_printk.c
deleted file mode 100644
index 2dc36d00addf..000000000000
--- a/arch/arm64/kernel/early_printk.c
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * Earlyprintk support.
- *
- * Copyright (C) 2012 ARM Ltd.
- * Author: Catalin Marinas <catalin.marinas@arm.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-#include <linux/kernel.h>
-#include <linux/console.h>
-#include <linux/init.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/io.h>
-
-#include <linux/amba/serial.h>
-#include <linux/serial_reg.h>
-
-#include <asm/fixmap.h>
-
-static void __iomem *early_base;
-static void (*printch)(char ch);
-
-/*
- * PL011 single character TX.
- */
-static void pl011_printch(char ch)
-{
- while (readl_relaxed(early_base + UART01x_FR) & UART01x_FR_TXFF)
- ;
- writeb_relaxed(ch, early_base + UART01x_DR);
- while (readl_relaxed(early_base + UART01x_FR) & UART01x_FR_BUSY)
- ;
-}
-
-/*
- * Semihosting-based debug console
- */
-static void smh_printch(char ch)
-{
- asm volatile("mov x1, %0\n"
- "mov x0, #3\n"
- "hlt 0xf000\n"
- : : "r" (&ch) : "x0", "x1", "memory");
-}
-
-/*
- * 8250/16550 (8-bit aligned registers) single character TX.
- */
-static void uart8250_8bit_printch(char ch)
-{
- while (!(readb_relaxed(early_base + UART_LSR) & UART_LSR_THRE))
- ;
- writeb_relaxed(ch, early_base + UART_TX);
-}
-
-/*
- * 8250/16550 (32-bit aligned registers) single character TX.
- */
-static void uart8250_32bit_printch(char ch)
-{
- while (!(readl_relaxed(early_base + (UART_LSR << 2)) & UART_LSR_THRE))
- ;
- writel_relaxed(ch, early_base + (UART_TX << 2));
-}
-
-struct earlycon_match {
- const char *name;
- void (*printch)(char ch);
-};
-
-static const struct earlycon_match earlycon_match[] __initconst = {
- { .name = "pl011", .printch = pl011_printch, },
- { .name = "smh", .printch = smh_printch, },
- { .name = "uart8250-8bit", .printch = uart8250_8bit_printch, },
- { .name = "uart8250-32bit", .printch = uart8250_32bit_printch, },
- {}
-};
-
-static void early_write(struct console *con, const char *s, unsigned n)
-{
- while (n-- > 0) {
- if (*s == '\n')
- printch('\r');
- printch(*s);
- s++;
- }
-}
-
-static struct console early_console_dev = {
- .name = "earlycon",
- .write = early_write,
- .flags = CON_PRINTBUFFER | CON_BOOT,
- .index = -1,
-};
-
-/*
- * Parse earlyprintk=... parameter in the format:
- *
- * <name>[,<addr>][,<options>]
- *
- * and register the early console. It is assumed that the UART has been
- * initialised by the bootloader already.
- */
-static int __init setup_early_printk(char *buf)
-{
- const struct earlycon_match *match = earlycon_match;
- phys_addr_t paddr = 0;
-
- if (!buf) {
- pr_warning("No earlyprintk arguments passed.\n");
- return 0;
- }
-
- while (match->name) {
- size_t len = strlen(match->name);
- if (!strncmp(buf, match->name, len)) {
- buf += len;
- break;
- }
- match++;
- }
- if (!match->name) {
- pr_warning("Unknown earlyprintk arguments: %s\n", buf);
- return 0;
- }
-
- /* I/O address */
- if (!strncmp(buf, ",0x", 3)) {
- char *e;
- paddr = simple_strtoul(buf + 1, &e, 16);
- buf = e;
- }
- /* no options parsing yet */
-
- if (paddr)
- early_base = (void __iomem *)set_fixmap_offset_io(FIX_EARLYCON_MEM_BASE, paddr);
-
- printch = match->printch;
- early_console = &early_console_dev;
- register_console(&early_console_dev);
-
- return 0;
-}
-
-early_param("earlyprintk", setup_early_printk);
diff --git a/arch/arm64/kernel/efi-entry.S b/arch/arm64/kernel/efi-entry.S
new file mode 100644
index 000000000000..619b1dd7bcde
--- /dev/null
+++ b/arch/arm64/kernel/efi-entry.S
@@ -0,0 +1,108 @@
+/*
+ * EFI entry point.
+ *
+ * Copyright (C) 2013, 2014 Red Hat, Inc.
+ * Author: Mark Salter <msalter@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+#include <asm/assembler.h>
+
+#define EFI_LOAD_ERROR 0x8000000000000001
+
+ __INIT
+
+ /*
+ * We arrive here from the EFI boot manager with:
+ *
+ * * CPU in little-endian mode
+ * * MMU on with identity-mapped RAM
+ * * Icache and Dcache on
+ *
+ * We will most likely be running from some place other than where
+ * we want to be. The kernel image wants to be placed at TEXT_OFFSET
+ * from start of RAM.
+ */
+ENTRY(efi_stub_entry)
+ /*
+ * Create a stack frame to save FP/LR with extra space
+ * for image_addr variable passed to efi_entry().
+ */
+ stp x29, x30, [sp, #-32]!
+
+ /*
+ * Call efi_entry to do the real work.
+ * x0 and x1 are already set up by firmware. Current runtime
+ * address of image is calculated and passed via *image_addr.
+ *
+ * unsigned long efi_entry(void *handle,
+ * efi_system_table_t *sys_table,
+ * unsigned long *image_addr) ;
+ */
+ adrp x8, _text
+ add x8, x8, #:lo12:_text
+ add x2, sp, 16
+ str x8, [x2]
+ bl efi_entry
+ cmn x0, #1
+ b.eq efi_load_fail
+
+ /*
+ * efi_entry() will have relocated the kernel image if necessary
+ * and we return here with device tree address in x0 and the kernel
+ * entry point stored at *image_addr. Save those values in registers
+ * which are callee preserved.
+ */
+ mov x20, x0 // DTB address
+ ldr x0, [sp, #16] // relocated _text address
+ mov x21, x0
+
+ /*
+ * Flush dcache covering current runtime addresses
+ * of kernel text/data. Then flush all of icache.
+ */
+ adrp x1, _text
+ add x1, x1, #:lo12:_text
+ adrp x2, _edata
+ add x2, x2, #:lo12:_edata
+ sub x1, x2, x1
+
+ bl __flush_dcache_area
+ ic ialluis
+
+ /* Turn off Dcache and MMU */
+ mrs x0, CurrentEL
+ cmp x0, #CurrentEL_EL2
+ b.ne 1f
+ mrs x0, sctlr_el2
+ bic x0, x0, #1 << 0 // clear SCTLR.M
+ bic x0, x0, #1 << 2 // clear SCTLR.C
+ msr sctlr_el2, x0
+ isb
+ b 2f
+1:
+ mrs x0, sctlr_el1
+ bic x0, x0, #1 << 0 // clear SCTLR.M
+ bic x0, x0, #1 << 2 // clear SCTLR.C
+ msr sctlr_el1, x0
+ isb
+2:
+ /* Jump to kernel entry point */
+ mov x0, x20
+ mov x1, xzr
+ mov x2, xzr
+ mov x3, xzr
+ br x21
+
+efi_load_fail:
+ mov x0, #EFI_LOAD_ERROR
+ ldp x29, x30, [sp], #32
+ ret
+
+ENDPROC(efi_stub_entry)
diff --git a/arch/arm64/kernel/efi-stub.c b/arch/arm64/kernel/efi-stub.c
new file mode 100644
index 000000000000..60e98a639ac5
--- /dev/null
+++ b/arch/arm64/kernel/efi-stub.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2013, 2014 Linaro Ltd; <roy.franz@linaro.org>
+ *
+ * This file implements the EFI boot stub for the arm64 kernel.
+ * Adapted from ARM version by Mark Salter <msalter@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/efi.h>
+#include <linux/libfdt.h>
+#include <asm/sections.h>
+#include <generated/compile.h>
+#include <generated/utsrelease.h>
+
+/*
+ * AArch64 requires the DTB to be 8-byte aligned in the first 512MiB from
+ * start of kernel and may not cross a 2MiB boundary. We set alignment to
+ * 2MiB so we know it won't cross a 2MiB boundary.
+ */
+#define EFI_FDT_ALIGN SZ_2M /* used by allocate_new_fdt_and_exit_boot() */
+#define MAX_FDT_OFFSET SZ_512M
+
+#define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__)
+
+static void efi_char16_printk(efi_system_table_t *sys_table_arg,
+ efi_char16_t *str);
+
+static efi_status_t efi_open_volume(efi_system_table_t *sys_table,
+ void *__image, void **__fh);
+static efi_status_t efi_file_close(void *handle);
+
+static efi_status_t
+efi_file_read(void *handle, unsigned long *size, void *addr);
+
+static efi_status_t
+efi_file_size(efi_system_table_t *sys_table, void *__fh,
+ efi_char16_t *filename_16, void **handle, u64 *file_sz);
+
+/* Include shared EFI stub code */
+#include "../../../drivers/firmware/efi/efi-stub-helper.c"
+#include "../../../drivers/firmware/efi/fdt.c"
+#include "../../../drivers/firmware/efi/arm-stub.c"
+
+
+static efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
+ unsigned long *image_addr,
+ unsigned long *image_size,
+ unsigned long *reserve_addr,
+ unsigned long *reserve_size,
+ unsigned long dram_base,
+ efi_loaded_image_t *image)
+{
+ efi_status_t status;
+ unsigned long kernel_size, kernel_memsize = 0;
+
+ /* Relocate the image, if required. */
+ kernel_size = _edata - _text;
+ if (*image_addr != (dram_base + TEXT_OFFSET)) {
+ kernel_memsize = kernel_size + (_end - _edata);
+ status = efi_relocate_kernel(sys_table, image_addr,
+ kernel_size, kernel_memsize,
+ dram_base + TEXT_OFFSET,
+ PAGE_SIZE);
+ if (status != EFI_SUCCESS) {
+ pr_efi_err(sys_table, "Failed to relocate kernel\n");
+ return status;
+ }
+ if (*image_addr != (dram_base + TEXT_OFFSET)) {
+ pr_efi_err(sys_table, "Failed to alloc kernel memory\n");
+ efi_free(sys_table, kernel_memsize, *image_addr);
+ return EFI_ERROR;
+ }
+ *image_size = kernel_memsize;
+ }
+
+
+ return EFI_SUCCESS;
+}
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
new file mode 100644
index 000000000000..14db1f6e8d7f
--- /dev/null
+++ b/arch/arm64/kernel/efi.c
@@ -0,0 +1,469 @@
+/*
+ * Extensible Firmware Interface
+ *
+ * Based on Extensible Firmware Interface Specification version 2.4
+ *
+ * Copyright (C) 2013, 2014 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/efi.h>
+#include <linux/export.h>
+#include <linux/memblock.h>
+#include <linux/bootmem.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include <asm/cacheflush.h>
+#include <asm/efi.h>
+#include <asm/tlbflush.h>
+#include <asm/mmu_context.h>
+
+struct efi_memory_map memmap;
+
+static efi_runtime_services_t *runtime;
+
+static u64 efi_system_table;
+
+static int uefi_debug __initdata;
+static int __init uefi_debug_setup(char *str)
+{
+ uefi_debug = 1;
+
+ return 0;
+}
+early_param("uefi_debug", uefi_debug_setup);
+
+static int __init is_normal_ram(efi_memory_desc_t *md)
+{
+ if (md->attribute & EFI_MEMORY_WB)
+ return 1;
+ return 0;
+}
+
+static void __init efi_setup_idmap(void)
+{
+ struct memblock_region *r;
+ efi_memory_desc_t *md;
+ u64 paddr, npages, size;
+
+ for_each_memblock(memory, r)
+ create_id_mapping(r->base, r->size, 0);
+
+ /* map runtime io spaces */
+ for_each_efi_memory_desc(&memmap, md) {
+ if (!(md->attribute & EFI_MEMORY_RUNTIME) || is_normal_ram(md))
+ continue;
+ paddr = md->phys_addr;
+ npages = md->num_pages;
+ memrange_efi_to_native(&paddr, &npages);
+ size = npages << PAGE_SHIFT;
+ create_id_mapping(paddr, size, 1);
+ }
+}
+
+static int __init uefi_init(void)
+{
+ efi_char16_t *c16;
+ char vendor[100] = "unknown";
+ int i, retval;
+
+ efi.systab = early_memremap(efi_system_table,
+ sizeof(efi_system_table_t));
+ if (efi.systab == NULL) {
+ pr_warn("Unable to map EFI system table.\n");
+ return -ENOMEM;
+ }
+
+ set_bit(EFI_BOOT, &efi.flags);
+ set_bit(EFI_64BIT, &efi.flags);
+
+ /*
+ * Verify the EFI Table
+ */
+ if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) {
+ pr_err("System table signature incorrect\n");
+ return -EINVAL;
+ }
+ if ((efi.systab->hdr.revision >> 16) < 2)
+ pr_warn("Warning: EFI system table version %d.%02d, expected 2.00 or greater\n",
+ efi.systab->hdr.revision >> 16,
+ efi.systab->hdr.revision & 0xffff);
+
+ /* Show what we know for posterity */
+ c16 = early_memremap(efi.systab->fw_vendor,
+ sizeof(vendor));
+ if (c16) {
+ for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i)
+ vendor[i] = c16[i];
+ vendor[i] = '\0';
+ }
+
+ pr_info("EFI v%u.%.02u by %s\n",
+ efi.systab->hdr.revision >> 16,
+ efi.systab->hdr.revision & 0xffff, vendor);
+
+ retval = efi_config_init(NULL);
+ if (retval == 0)
+ set_bit(EFI_CONFIG_TABLES, &efi.flags);
+
+ early_memunmap(c16, sizeof(vendor));
+ early_memunmap(efi.systab, sizeof(efi_system_table_t));
+
+ return retval;
+}
+
+static __initdata char memory_type_name[][32] = {
+ {"Reserved"},
+ {"Loader Code"},
+ {"Loader Data"},
+ {"Boot Code"},
+ {"Boot Data"},
+ {"Runtime Code"},
+ {"Runtime Data"},
+ {"Conventional Memory"},
+ {"Unusable Memory"},
+ {"ACPI Reclaim Memory"},
+ {"ACPI Memory NVS"},
+ {"Memory Mapped I/O"},
+ {"MMIO Port Space"},
+ {"PAL Code"},
+};
+
+/*
+ * Return true for RAM regions we want to permanently reserve.
+ */
+static __init int is_reserve_region(efi_memory_desc_t *md)
+{
+ if (!is_normal_ram(md))
+ return 0;
+
+ if (md->attribute & EFI_MEMORY_RUNTIME)
+ return 1;
+
+ if (md->type == EFI_ACPI_RECLAIM_MEMORY ||
+ md->type == EFI_RESERVED_TYPE)
+ return 1;
+
+ return 0;
+}
+
+static __init void reserve_regions(void)
+{
+ efi_memory_desc_t *md;
+ u64 paddr, npages, size;
+
+ if (uefi_debug)
+ pr_info("Processing EFI memory map:\n");
+
+ for_each_efi_memory_desc(&memmap, md) {
+ paddr = md->phys_addr;
+ npages = md->num_pages;
+
+ if (uefi_debug)
+ pr_info(" 0x%012llx-0x%012llx [%s]",
+ paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1,
+ memory_type_name[md->type]);
+
+ memrange_efi_to_native(&paddr, &npages);
+ size = npages << PAGE_SHIFT;
+
+ if (is_normal_ram(md))
+ early_init_dt_add_memory_arch(paddr, size);
+
+ if (is_reserve_region(md) ||
+ md->type == EFI_BOOT_SERVICES_CODE ||
+ md->type == EFI_BOOT_SERVICES_DATA) {
+ memblock_reserve(paddr, size);
+ if (uefi_debug)
+ pr_cont("*");
+ }
+
+ if (uefi_debug)
+ pr_cont("\n");
+ }
+}
+
+
+static u64 __init free_one_region(u64 start, u64 end)
+{
+ u64 size = end - start;
+
+ if (uefi_debug)
+ pr_info(" EFI freeing: 0x%012llx-0x%012llx\n", start, end - 1);
+
+ free_bootmem_late(start, size);
+ return size;
+}
+
+static u64 __init free_region(u64 start, u64 end)
+{
+ u64 map_start, map_end, total = 0;
+
+ if (end <= start)
+ return total;
+
+ map_start = (u64)memmap.phys_map;
+ map_end = PAGE_ALIGN(map_start + (memmap.map_end - memmap.map));
+ map_start &= PAGE_MASK;
+
+ if (start < map_end && end > map_start) {
+ /* region overlaps UEFI memmap */
+ if (start < map_start)
+ total += free_one_region(start, map_start);
+
+ if (map_end < end)
+ total += free_one_region(map_end, end);
+ } else
+ total += free_one_region(start, end);
+
+ return total;
+}
+
+static void __init free_boot_services(void)
+{
+ u64 total_freed = 0;
+ u64 keep_end, free_start, free_end;
+ efi_memory_desc_t *md;
+
+ /*
+ * If kernel uses larger pages than UEFI, we have to be careful
+ * not to inadvertantly free memory we want to keep if there is
+ * overlap at the kernel page size alignment. We do not want to
+ * free is_reserve_region() memory nor the UEFI memmap itself.
+ *
+ * The memory map is sorted, so we keep track of the end of
+ * any previous region we want to keep, remember any region
+ * we want to free and defer freeing it until we encounter
+ * the next region we want to keep. This way, before freeing
+ * it, we can clip it as needed to avoid freeing memory we
+ * want to keep for UEFI.
+ */
+
+ keep_end = 0;
+ free_start = 0;
+
+ for_each_efi_memory_desc(&memmap, md) {
+ u64 paddr, npages, size;
+
+ if (is_reserve_region(md)) {
+ /*
+ * We don't want to free any memory from this region.
+ */
+ if (free_start) {
+ /* adjust free_end then free region */
+ if (free_end > md->phys_addr)
+ free_end -= PAGE_SIZE;
+ total_freed += free_region(free_start, free_end);
+ free_start = 0;
+ }
+ keep_end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
+ continue;
+ }
+
+ if (md->type != EFI_BOOT_SERVICES_CODE &&
+ md->type != EFI_BOOT_SERVICES_DATA) {
+ /* no need to free this region */
+ continue;
+ }
+
+ /*
+ * We want to free memory from this region.
+ */
+ paddr = md->phys_addr;
+ npages = md->num_pages;
+ memrange_efi_to_native(&paddr, &npages);
+ size = npages << PAGE_SHIFT;
+
+ if (free_start) {
+ if (paddr <= free_end)
+ free_end = paddr + size;
+ else {
+ total_freed += free_region(free_start, free_end);
+ free_start = paddr;
+ free_end = paddr + size;
+ }
+ } else {
+ free_start = paddr;
+ free_end = paddr + size;
+ }
+ if (free_start < keep_end) {
+ free_start += PAGE_SIZE;
+ if (free_start >= free_end)
+ free_start = 0;
+ }
+ }
+ if (free_start)
+ total_freed += free_region(free_start, free_end);
+
+ if (total_freed)
+ pr_info("Freed 0x%llx bytes of EFI boot services memory",
+ total_freed);
+}
+
+void __init efi_init(void)
+{
+ struct efi_fdt_params params;
+
+ /* Grab UEFI information placed in FDT by stub */
+ if (!efi_get_fdt_params(&params, uefi_debug))
+ return;
+
+ efi_system_table = params.system_table;
+
+ memblock_reserve(params.mmap & PAGE_MASK,
+ PAGE_ALIGN(params.mmap_size + (params.mmap & ~PAGE_MASK)));
+ memmap.phys_map = (void *)params.mmap;
+ memmap.map = early_memremap(params.mmap, params.mmap_size);
+ memmap.map_end = memmap.map + params.mmap_size;
+ memmap.desc_size = params.desc_size;
+ memmap.desc_version = params.desc_ver;
+
+ if (uefi_init() < 0)
+ return;
+
+ reserve_regions();
+}
+
+void __init efi_idmap_init(void)
+{
+ if (!efi_enabled(EFI_BOOT))
+ return;
+
+ /* boot time idmap_pg_dir is incomplete, so fill in missing parts */
+ efi_setup_idmap();
+}
+
+static int __init remap_region(efi_memory_desc_t *md, void **new)
+{
+ u64 paddr, vaddr, npages, size;
+
+ paddr = md->phys_addr;
+ npages = md->num_pages;
+ memrange_efi_to_native(&paddr, &npages);
+ size = npages << PAGE_SHIFT;
+
+ if (is_normal_ram(md))
+ vaddr = (__force u64)ioremap_cache(paddr, size);
+ else
+ vaddr = (__force u64)ioremap(paddr, size);
+
+ if (!vaddr) {
+ pr_err("Unable to remap 0x%llx pages @ %p\n",
+ npages, (void *)paddr);
+ return 0;
+ }
+
+ /* adjust for any rounding when EFI and system pagesize differs */
+ md->virt_addr = vaddr + (md->phys_addr - paddr);
+
+ if (uefi_debug)
+ pr_info(" EFI remap 0x%012llx => %p\n",
+ md->phys_addr, (void *)md->virt_addr);
+
+ memcpy(*new, md, memmap.desc_size);
+ *new += memmap.desc_size;
+
+ return 1;
+}
+
+/*
+ * Switch UEFI from an identity map to a kernel virtual map
+ */
+static int __init arm64_enter_virtual_mode(void)
+{
+ efi_memory_desc_t *md;
+ phys_addr_t virtmap_phys;
+ void *virtmap, *virt_md;
+ efi_status_t status;
+ u64 mapsize;
+ int count = 0;
+ unsigned long flags;
+
+ if (!efi_enabled(EFI_BOOT)) {
+ pr_info("EFI services will not be available.\n");
+ return -1;
+ }
+
+ pr_info("Remapping and enabling EFI services.\n");
+
+ /* replace early memmap mapping with permanent mapping */
+ mapsize = memmap.map_end - memmap.map;
+ early_memunmap(memmap.map, mapsize);
+ memmap.map = (__force void *)ioremap_cache((phys_addr_t)memmap.phys_map,
+ mapsize);
+ memmap.map_end = memmap.map + mapsize;
+
+ efi.memmap = &memmap;
+
+ /* Map the runtime regions */
+ virtmap = kmalloc(mapsize, GFP_KERNEL);
+ if (!virtmap) {
+ pr_err("Failed to allocate EFI virtual memmap\n");
+ return -1;
+ }
+ virtmap_phys = virt_to_phys(virtmap);
+ virt_md = virtmap;
+
+ for_each_efi_memory_desc(&memmap, md) {
+ if (!(md->attribute & EFI_MEMORY_RUNTIME))
+ continue;
+ if (remap_region(md, &virt_md))
+ ++count;
+ }
+
+ efi.systab = (__force void *)efi_lookup_mapped_addr(efi_system_table);
+ if (efi.systab)
+ set_bit(EFI_SYSTEM_TABLES, &efi.flags);
+
+ local_irq_save(flags);
+ cpu_switch_mm(idmap_pg_dir, &init_mm);
+
+ /* Call SetVirtualAddressMap with the physical address of the map */
+ runtime = efi.systab->runtime;
+ efi.set_virtual_address_map = runtime->set_virtual_address_map;
+
+ status = efi.set_virtual_address_map(count * memmap.desc_size,
+ memmap.desc_size,
+ memmap.desc_version,
+ (efi_memory_desc_t *)virtmap_phys);
+ cpu_set_reserved_ttbr0();
+ flush_tlb_all();
+ local_irq_restore(flags);
+
+ kfree(virtmap);
+
+ free_boot_services();
+
+ if (status != EFI_SUCCESS) {
+ pr_err("Failed to set EFI virtual address map! [%lx]\n",
+ status);
+ return -1;
+ }
+
+ /* Set up runtime services function pointers */
+ runtime = efi.systab->runtime;
+ efi.get_time = runtime->get_time;
+ efi.set_time = runtime->set_time;
+ efi.get_wakeup_time = runtime->get_wakeup_time;
+ efi.set_wakeup_time = runtime->set_wakeup_time;
+ efi.get_variable = runtime->get_variable;
+ efi.get_next_variable = runtime->get_next_variable;
+ efi.set_variable = runtime->set_variable;
+ efi.query_variable_info = runtime->query_variable_info;
+ efi.update_capsule = runtime->update_capsule;
+ efi.query_capsule_caps = runtime->query_capsule_caps;
+ efi.get_next_high_mono_count = runtime->get_next_high_mono_count;
+ efi.reset_system = runtime->reset_system;
+
+ set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+
+ return 0;
+}
+early_initcall(arm64_enter_virtual_mode);
diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S
index 6a27cd6dbfa6..d358ccacfc00 100644
--- a/arch/arm64/kernel/entry-fpsimd.S
+++ b/arch/arm64/kernel/entry-fpsimd.S
@@ -41,3 +41,27 @@ ENTRY(fpsimd_load_state)
fpsimd_restore x0, 8
ret
ENDPROC(fpsimd_load_state)
+
+#ifdef CONFIG_KERNEL_MODE_NEON
+
+/*
+ * Save the bottom n FP registers.
+ *
+ * x0 - pointer to struct fpsimd_partial_state
+ */
+ENTRY(fpsimd_save_partial_state)
+ fpsimd_save_partial x0, 1, 8, 9
+ ret
+ENDPROC(fpsimd_load_partial_state)
+
+/*
+ * Load the bottom n FP registers.
+ *
+ * x0 - pointer to struct fpsimd_partial_state
+ */
+ENTRY(fpsimd_load_partial_state)
+ fpsimd_restore_partial x0, 8, 9
+ ret
+ENDPROC(fpsimd_load_partial_state)
+
+#endif
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
new file mode 100644
index 000000000000..aa5f9fcbf9ee
--- /dev/null
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -0,0 +1,218 @@
+/*
+ * arch/arm64/kernel/entry-ftrace.S
+ *
+ * Copyright (C) 2013 Linaro Limited
+ * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/ftrace.h>
+#include <asm/insn.h>
+
+/*
+ * Gcc with -pg will put the following code in the beginning of each function:
+ * mov x0, x30
+ * bl _mcount
+ * [function's body ...]
+ * "bl _mcount" may be replaced to "bl ftrace_caller" or NOP if dynamic
+ * ftrace is enabled.
+ *
+ * Please note that x0 as an argument will not be used here because we can
+ * get lr(x30) of instrumented function at any time by winding up call stack
+ * as long as the kernel is compiled without -fomit-frame-pointer.
+ * (or CONFIG_FRAME_POINTER, this is forced on arm64)
+ *
+ * stack layout after mcount_enter in _mcount():
+ *
+ * current sp/fp => 0:+-----+
+ * in _mcount() | x29 | -> instrumented function's fp
+ * +-----+
+ * | x30 | -> _mcount()'s lr (= instrumented function's pc)
+ * old sp => +16:+-----+
+ * when instrumented | |
+ * function calls | ... |
+ * _mcount() | |
+ * | |
+ * instrumented => +xx:+-----+
+ * function's fp | x29 | -> parent's fp
+ * +-----+
+ * | x30 | -> instrumented function's lr (= parent's pc)
+ * +-----+
+ * | ... |
+ */
+
+ .macro mcount_enter
+ stp x29, x30, [sp, #-16]!
+ mov x29, sp
+ .endm
+
+ .macro mcount_exit
+ ldp x29, x30, [sp], #16
+ ret
+ .endm
+
+ .macro mcount_adjust_addr rd, rn
+ sub \rd, \rn, #AARCH64_INSN_SIZE
+ .endm
+
+ /* for instrumented function's parent */
+ .macro mcount_get_parent_fp reg
+ ldr \reg, [x29]
+ ldr \reg, [\reg]
+ .endm
+
+ /* for instrumented function */
+ .macro mcount_get_pc0 reg
+ mcount_adjust_addr \reg, x30
+ .endm
+
+ .macro mcount_get_pc reg
+ ldr \reg, [x29, #8]
+ mcount_adjust_addr \reg, \reg
+ .endm
+
+ .macro mcount_get_lr reg
+ ldr \reg, [x29]
+ ldr \reg, [\reg, #8]
+ mcount_adjust_addr \reg, \reg
+ .endm
+
+ .macro mcount_get_lr_addr reg
+ ldr \reg, [x29]
+ add \reg, \reg, #8
+ .endm
+
+#ifndef CONFIG_DYNAMIC_FTRACE
+/*
+ * void _mcount(unsigned long return_address)
+ * @return_address: return address to instrumented function
+ *
+ * This function makes calls, if enabled, to:
+ * - tracer function to probe instrumented function's entry,
+ * - ftrace_graph_caller to set up an exit hook
+ */
+ENTRY(_mcount)
+#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
+ ldr x0, =ftrace_trace_stop
+ ldr x0, [x0] // if ftrace_trace_stop
+ ret // return;
+#endif
+ mcount_enter
+
+ ldr x0, =ftrace_trace_function
+ ldr x2, [x0]
+ adr x0, ftrace_stub
+ cmp x0, x2 // if (ftrace_trace_function
+ b.eq skip_ftrace_call // != ftrace_stub) {
+
+ mcount_get_pc x0 // function's pc
+ mcount_get_lr x1 // function's lr (= parent's pc)
+ blr x2 // (*ftrace_trace_function)(pc, lr);
+
+#ifndef CONFIG_FUNCTION_GRAPH_TRACER
+skip_ftrace_call: // return;
+ mcount_exit // }
+#else
+ mcount_exit // return;
+ // }
+skip_ftrace_call:
+ ldr x1, =ftrace_graph_return
+ ldr x2, [x1] // if ((ftrace_graph_return
+ cmp x0, x2 // != ftrace_stub)
+ b.ne ftrace_graph_caller
+
+ ldr x1, =ftrace_graph_entry // || (ftrace_graph_entry
+ ldr x2, [x1] // != ftrace_graph_entry_stub))
+ ldr x0, =ftrace_graph_entry_stub
+ cmp x0, x2
+ b.ne ftrace_graph_caller // ftrace_graph_caller();
+
+ mcount_exit
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+ENDPROC(_mcount)
+
+#else /* CONFIG_DYNAMIC_FTRACE */
+/*
+ * _mcount() is used to build the kernel with -pg option, but all the branch
+ * instructions to _mcount() are replaced to NOP initially at kernel start up,
+ * and later on, NOP to branch to ftrace_caller() when enabled or branch to
+ * NOP when disabled per-function base.
+ */
+ENTRY(_mcount)
+ ret
+ENDPROC(_mcount)
+
+/*
+ * void ftrace_caller(unsigned long return_address)
+ * @return_address: return address to instrumented function
+ *
+ * This function is a counterpart of _mcount() in 'static' ftrace, and
+ * makes calls to:
+ * - tracer function to probe instrumented function's entry,
+ * - ftrace_graph_caller to set up an exit hook
+ */
+ENTRY(ftrace_caller)
+ mcount_enter
+
+ mcount_get_pc0 x0 // function's pc
+ mcount_get_lr x1 // function's lr
+
+ .global ftrace_call
+ftrace_call: // tracer(pc, lr);
+ nop // This will be replaced with "bl xxx"
+ // where xxx can be any kind of tracer.
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ .global ftrace_graph_call
+ftrace_graph_call: // ftrace_graph_caller();
+ nop // If enabled, this will be replaced
+ // "b ftrace_graph_caller"
+#endif
+
+ mcount_exit
+ENDPROC(ftrace_caller)
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+ENTRY(ftrace_stub)
+ ret
+ENDPROC(ftrace_stub)
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+/*
+ * void ftrace_graph_caller(void)
+ *
+ * Called from _mcount() or ftrace_caller() when function_graph tracer is
+ * selected.
+ * This function w/ prepare_ftrace_return() fakes link register's value on
+ * the call stack in order to intercept instrumented function's return path
+ * and run return_to_handler() later on its exit.
+ */
+ENTRY(ftrace_graph_caller)
+ mcount_get_lr_addr x0 // pointer to function's saved lr
+ mcount_get_pc x1 // function's pc
+ mcount_get_parent_fp x2 // parent's fp
+ bl prepare_ftrace_return // prepare_ftrace_return(&lr, pc, fp)
+
+ mcount_exit
+ENDPROC(ftrace_graph_caller)
+
+/*
+ * void return_to_handler(void)
+ *
+ * Run ftrace_return_to_handler() before going back to parent.
+ * @fp is checked against the value passed by ftrace_graph_caller()
+ * only when CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST is enabled.
+ */
+ENTRY(return_to_handler)
+ str x0, [sp, #-16]!
+ mov x0, x29 // parent's fp
+ bl ftrace_return_to_handler// addr = ftrace_return_to_hander(fp);
+ mov x30, x0 // restore the original return address
+ ldr x0, [sp], #16
+ ret
+END(return_to_handler)
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 39ac630d83de..9ce04ba6bcb0 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -60,6 +60,9 @@
push x0, x1
.if \el == 0
mrs x21, sp_el0
+ get_thread_info tsk // Ensure MDSCR_EL1.SS is clear,
+ ldr x19, [tsk, #TI_FLAGS] // since we can unmask debug
+ disable_step_tsk x19, x20 // exceptions when scheduling.
.else
add x21, sp, #S_FRAME_SIZE
.endif
@@ -259,7 +262,7 @@ el1_da:
* Data abort handling
*/
mrs x0, far_el1
- enable_dbg_if_not_stepping x2
+ enable_dbg
// re-enable interrupts if they were enabled in the aborted context
tbnz x23, #7, 1f // PSR_I_BIT
enable_irq
@@ -275,13 +278,14 @@ el1_sp_pc:
* Stack or PC alignment exception handling
*/
mrs x0, far_el1
- mov x1, x25
+ enable_dbg
mov x2, sp
b do_sp_pc_abort
el1_undef:
/*
* Undefined instruction
*/
+ enable_dbg
mov x0, sp
b do_undefinstr
el1_dbg:
@@ -294,10 +298,11 @@ el1_dbg:
mrs x0, far_el1
mov x2, sp // struct pt_regs
bl do_debug_exception
-
+ enable_dbg
kernel_exit 1
el1_inv:
// TODO: add support for undefined instructions in kernel mode
+ enable_dbg
mov x0, sp
mov x1, #BAD_SYNC
mrs x2, esr_el1
@@ -307,7 +312,7 @@ ENDPROC(el1_sync)
.align 6
el1_irq:
kernel_entry 1
- enable_dbg_if_not_stepping x0
+ enable_dbg
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
@@ -332,8 +337,7 @@ ENDPROC(el1_irq)
#ifdef CONFIG_PREEMPT
el1_preempt:
mov x24, lr
-1: enable_dbg
- bl preempt_schedule_irq // irq en/disable is done inside
+1: bl preempt_schedule_irq // irq en/disable is done inside
ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS
tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
ret x24
@@ -349,7 +353,7 @@ el0_sync:
lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class
cmp x24, #ESR_EL1_EC_SVC64 // SVC in 64-bit state
b.eq el0_svc
- adr lr, ret_from_exception
+ adr lr, ret_to_user
cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0
b.eq el0_da
cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0
@@ -378,7 +382,7 @@ el0_sync_compat:
lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class
cmp x24, #ESR_EL1_EC_SVC32 // SVC in 32-bit state
b.eq el0_svc_compat
- adr lr, ret_from_exception
+ adr lr, ret_to_user
cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0
b.eq el0_da
cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0
@@ -423,11 +427,8 @@ el0_da:
*/
mrs x0, far_el1
bic x0, x0, #(0xff << 56)
- disable_step x1
- isb
- enable_dbg
// enable interrupts before calling the main handler
- enable_irq
+ enable_dbg_and_irq
mov x1, x25
mov x2, sp
b do_mem_abort
@@ -436,11 +437,8 @@ el0_ia:
* Instruction abort handling
*/
mrs x0, far_el1
- disable_step x1
- isb
- enable_dbg
// enable interrupts before calling the main handler
- enable_irq
+ enable_dbg_and_irq
orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts
mov x2, sp
b do_mem_abort
@@ -448,6 +446,7 @@ el0_fpsimd_acc:
/*
* Floating Point or Advanced SIMD access
*/
+ enable_dbg
mov x0, x25
mov x1, sp
b do_fpsimd_acc
@@ -455,6 +454,7 @@ el0_fpsimd_exc:
/*
* Floating Point or Advanced SIMD exception
*/
+ enable_dbg
mov x0, x25
mov x1, sp
b do_fpsimd_exc
@@ -463,11 +463,8 @@ el0_sp_pc:
* Stack or PC alignment exception handling
*/
mrs x0, far_el1
- disable_step x1
- isb
- enable_dbg
// enable interrupts before calling the main handler
- enable_irq
+ enable_dbg_and_irq
mov x1, x25
mov x2, sp
b do_sp_pc_abort
@@ -475,9 +472,9 @@ el0_undef:
/*
* Undefined instruction
*/
- mov x0, sp
// enable interrupts before calling the main handler
- enable_irq
+ enable_dbg_and_irq
+ mov x0, sp
b do_undefinstr
el0_dbg:
/*
@@ -485,11 +482,13 @@ el0_dbg:
*/
tbnz x24, #0, el0_inv // EL0 only
mrs x0, far_el1
- disable_step x1
mov x1, x25
mov x2, sp
- b do_debug_exception
+ bl do_debug_exception
+ enable_dbg
+ b ret_to_user
el0_inv:
+ enable_dbg
mov x0, sp
mov x1, #BAD_SYNC
mrs x2, esr_el1
@@ -500,15 +499,12 @@ ENDPROC(el0_sync)
el0_irq:
kernel_entry 0
el0_irq_naked:
- disable_step x1
- isb
enable_dbg
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
irq_handler
- get_thread_info tsk
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_on
@@ -517,14 +513,6 @@ el0_irq_naked:
ENDPROC(el0_irq)
/*
- * This is the return code to user mode for abort handlers
- */
-ret_from_exception:
- get_thread_info tsk
- b ret_to_user
-ENDPROC(ret_from_exception)
-
-/*
* Register switch for AArch64. The callee-saved registers need to be saved
* and restored. On entry:
* x0 = previous task_struct (must be preserved across the switch)
@@ -563,10 +551,7 @@ ret_fast_syscall:
ldr x1, [tsk, #TI_FLAGS]
and x2, x1, #_TIF_WORK_MASK
cbnz x2, fast_work_pending
- tbz x1, #TIF_SINGLESTEP, fast_exit
- disable_dbg
- enable_step x2
-fast_exit:
+ enable_step_tsk x1, x2
kernel_exit 0, ret = 1
/*
@@ -576,7 +561,7 @@ fast_work_pending:
str x0, [sp, #S_X0] // returned x0
work_pending:
tbnz x1, #TIF_NEED_RESCHED, work_resched
- /* TIF_SIGPENDING or TIF_NOTIFY_RESUME case */
+ /* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
ldr x2, [sp, #S_PSTATE]
mov x0, sp // 'regs'
tst x2, #PSR_MODE_MASK // user mode regs?
@@ -585,7 +570,6 @@ work_pending:
bl do_notify_resume
b ret_to_user
work_resched:
- enable_dbg
bl schedule
/*
@@ -596,9 +580,7 @@ ret_to_user:
ldr x1, [tsk, #TI_FLAGS]
and x2, x1, #_TIF_WORK_MASK
cbnz x2, work_pending
- tbz x1, #TIF_SINGLESTEP, no_work_pending
- disable_dbg
- enable_step x2
+ enable_step_tsk x1, x2
no_work_pending:
kernel_exit 0, ret = 0
ENDPROC(ret_to_user)
@@ -625,14 +607,11 @@ el0_svc:
mov sc_nr, #__NR_syscalls
el0_svc_naked: // compat entry point
stp x0, scno, [sp, #S_ORIG_X0] // save the original x0 and syscall number
- disable_step x16
- isb
- enable_dbg
- enable_irq
+ enable_dbg_and_irq
- get_thread_info tsk
- ldr x16, [tsk, #TI_FLAGS] // check for syscall tracing
- tbnz x16, #TIF_SYSCALL_TRACE, __sys_trace // are we tracing syscalls?
+ ldr x16, [tsk, #TI_FLAGS] // check for syscall hooks
+ tst x16, #_TIF_SYSCALL_WORK
+ b.ne __sys_trace
adr lr, ret_fast_syscall // return address
cmp scno, sc_nr // check upper syscall limit
b.hs ni_sys
@@ -648,9 +627,8 @@ ENDPROC(el0_svc)
* switches, and waiting for our parent to respond.
*/
__sys_trace:
- mov x1, sp
- mov w0, #0 // trace entry
- bl syscall_trace
+ mov x0, sp
+ bl syscall_trace_enter
adr lr, __sys_trace_return // return address
uxtw scno, w0 // syscall number (possibly new)
mov x1, sp // pointer to regs
@@ -665,9 +643,8 @@ __sys_trace:
__sys_trace_return:
str x0, [sp] // save returned x0
- mov x1, sp
- mov w0, #1 // trace exit
- bl syscall_trace
+ mov x0, sp
+ bl syscall_trace_exit
b ret_to_user
/*
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 4aef42a04bdc..ad8aebb1cdef 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -35,6 +35,60 @@
#define FPEXC_IDF (1 << 7)
/*
+ * In order to reduce the number of times the FPSIMD state is needlessly saved
+ * and restored, we need to keep track of two things:
+ * (a) for each task, we need to remember which CPU was the last one to have
+ * the task's FPSIMD state loaded into its FPSIMD registers;
+ * (b) for each CPU, we need to remember which task's userland FPSIMD state has
+ * been loaded into its FPSIMD registers most recently, or whether it has
+ * been used to perform kernel mode NEON in the meantime.
+ *
+ * For (a), we add a 'cpu' field to struct fpsimd_state, which gets updated to
+ * the id of the current CPU everytime the state is loaded onto a CPU. For (b),
+ * we add the per-cpu variable 'fpsimd_last_state' (below), which contains the
+ * address of the userland FPSIMD state of the task that was loaded onto the CPU
+ * the most recently, or NULL if kernel mode NEON has been performed after that.
+ *
+ * With this in place, we no longer have to restore the next FPSIMD state right
+ * when switching between tasks. Instead, we can defer this check to userland
+ * resume, at which time we verify whether the CPU's fpsimd_last_state and the
+ * task's fpsimd_state.cpu are still mutually in sync. If this is the case, we
+ * can omit the FPSIMD restore.
+ *
+ * As an optimization, we use the thread_info flag TIF_FOREIGN_FPSTATE to
+ * indicate whether or not the userland FPSIMD state of the current task is
+ * present in the registers. The flag is set unless the FPSIMD registers of this
+ * CPU currently contain the most recent userland FPSIMD state of the current
+ * task.
+ *
+ * For a certain task, the sequence may look something like this:
+ * - the task gets scheduled in; if both the task's fpsimd_state.cpu field
+ * contains the id of the current CPU, and the CPU's fpsimd_last_state per-cpu
+ * variable points to the task's fpsimd_state, the TIF_FOREIGN_FPSTATE flag is
+ * cleared, otherwise it is set;
+ *
+ * - the task returns to userland; if TIF_FOREIGN_FPSTATE is set, the task's
+ * userland FPSIMD state is copied from memory to the registers, the task's
+ * fpsimd_state.cpu field is set to the id of the current CPU, the current
+ * CPU's fpsimd_last_state pointer is set to this task's fpsimd_state and the
+ * TIF_FOREIGN_FPSTATE flag is cleared;
+ *
+ * - the task executes an ordinary syscall; upon return to userland, the
+ * TIF_FOREIGN_FPSTATE flag will still be cleared, so no FPSIMD state is
+ * restored;
+ *
+ * - the task executes a syscall which executes some NEON instructions; this is
+ * preceded by a call to kernel_neon_begin(), which copies the task's FPSIMD
+ * register contents to memory, clears the fpsimd_last_state per-cpu variable
+ * and sets the TIF_FOREIGN_FPSTATE flag;
+ *
+ * - the task gets preempted after kernel_neon_end() is called; as we have not
+ * returned from the 2nd syscall yet, TIF_FOREIGN_FPSTATE is still set so
+ * whatever is in the FPSIMD registers is not saved to memory, but discarded.
+ */
+static DEFINE_PER_CPU(struct fpsimd_state *, fpsimd_last_state);
+
+/*
* Trapped FP/ASIMD access.
*/
void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs)
@@ -72,43 +126,137 @@ void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs)
void fpsimd_thread_switch(struct task_struct *next)
{
- /* check if not kernel threads */
- if (current->mm)
+ /*
+ * Save the current FPSIMD state to memory, but only if whatever is in
+ * the registers is in fact the most recent userland FPSIMD state of
+ * 'current'.
+ */
+ if (current->mm && !test_thread_flag(TIF_FOREIGN_FPSTATE))
fpsimd_save_state(&current->thread.fpsimd_state);
- if (next->mm)
- fpsimd_load_state(&next->thread.fpsimd_state);
+
+ if (next->mm) {
+ /*
+ * If we are switching to a task whose most recent userland
+ * FPSIMD state is already in the registers of *this* cpu,
+ * we can skip loading the state from memory. Otherwise, set
+ * the TIF_FOREIGN_FPSTATE flag so the state will be loaded
+ * upon the next return to userland.
+ */
+ struct fpsimd_state *st = &next->thread.fpsimd_state;
+
+ if (__this_cpu_read(fpsimd_last_state) == st
+ && st->cpu == smp_processor_id())
+ clear_ti_thread_flag(task_thread_info(next),
+ TIF_FOREIGN_FPSTATE);
+ else
+ set_ti_thread_flag(task_thread_info(next),
+ TIF_FOREIGN_FPSTATE);
+ }
}
void fpsimd_flush_thread(void)
{
- preempt_disable();
memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
- fpsimd_load_state(&current->thread.fpsimd_state);
+ set_thread_flag(TIF_FOREIGN_FPSTATE);
+}
+
+/*
+ * Save the userland FPSIMD state of 'current' to memory, but only if the state
+ * currently held in the registers does in fact belong to 'current'
+ */
+void fpsimd_preserve_current_state(void)
+{
+ preempt_disable();
+ if (!test_thread_flag(TIF_FOREIGN_FPSTATE))
+ fpsimd_save_state(&current->thread.fpsimd_state);
+ preempt_enable();
+}
+
+/*
+ * Load the userland FPSIMD state of 'current' from memory, but only if the
+ * FPSIMD state already held in the registers is /not/ the most recent FPSIMD
+ * state of 'current'
+ */
+void fpsimd_restore_current_state(void)
+{
+ preempt_disable();
+ if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
+ struct fpsimd_state *st = &current->thread.fpsimd_state;
+
+ fpsimd_load_state(st);
+ this_cpu_write(fpsimd_last_state, st);
+ st->cpu = smp_processor_id();
+ }
+ preempt_enable();
+}
+
+/*
+ * Load an updated userland FPSIMD state for 'current' from memory and set the
+ * flag that indicates that the FPSIMD register contents are the most recent
+ * FPSIMD state of 'current'
+ */
+void fpsimd_update_current_state(struct fpsimd_state *state)
+{
+ preempt_disable();
+ fpsimd_load_state(state);
+ if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
+ struct fpsimd_state *st = &current->thread.fpsimd_state;
+
+ this_cpu_write(fpsimd_last_state, st);
+ st->cpu = smp_processor_id();
+ }
preempt_enable();
}
+/*
+ * Invalidate live CPU copies of task t's FPSIMD state
+ */
+void fpsimd_flush_task_state(struct task_struct *t)
+{
+ t->thread.fpsimd_state.cpu = NR_CPUS;
+}
+
#ifdef CONFIG_KERNEL_MODE_NEON
+static DEFINE_PER_CPU(struct fpsimd_partial_state, hardirq_fpsimdstate);
+static DEFINE_PER_CPU(struct fpsimd_partial_state, softirq_fpsimdstate);
+
/*
* Kernel-side NEON support functions
*/
-void kernel_neon_begin(void)
+void kernel_neon_begin_partial(u32 num_regs)
{
- /* Avoid using the NEON in interrupt context */
- BUG_ON(in_interrupt());
- preempt_disable();
+ if (in_interrupt()) {
+ struct fpsimd_partial_state *s = this_cpu_ptr(
+ in_irq() ? &hardirq_fpsimdstate : &softirq_fpsimdstate);
- if (current->mm)
- fpsimd_save_state(&current->thread.fpsimd_state);
+ BUG_ON(num_regs > 32);
+ fpsimd_save_partial_state(s, roundup(num_regs, 2));
+ } else {
+ /*
+ * Save the userland FPSIMD state if we have one and if we
+ * haven't done so already. Clear fpsimd_last_state to indicate
+ * that there is no longer userland FPSIMD state in the
+ * registers.
+ */
+ preempt_disable();
+ if (current->mm &&
+ !test_and_set_thread_flag(TIF_FOREIGN_FPSTATE))
+ fpsimd_save_state(&current->thread.fpsimd_state);
+ this_cpu_write(fpsimd_last_state, NULL);
+ }
}
-EXPORT_SYMBOL(kernel_neon_begin);
+EXPORT_SYMBOL(kernel_neon_begin_partial);
void kernel_neon_end(void)
{
- if (current->mm)
- fpsimd_load_state(&current->thread.fpsimd_state);
-
- preempt_enable();
+ if (in_interrupt()) {
+ struct fpsimd_partial_state *s = this_cpu_ptr(
+ in_irq() ? &hardirq_fpsimdstate : &softirq_fpsimdstate);
+ fpsimd_load_partial_state(s);
+ } else {
+ preempt_enable();
+ }
}
EXPORT_SYMBOL(kernel_neon_end);
@@ -120,12 +268,12 @@ static int fpsimd_cpu_pm_notifier(struct notifier_block *self,
{
switch (cmd) {
case CPU_PM_ENTER:
- if (current->mm)
+ if (current->mm && !test_thread_flag(TIF_FOREIGN_FPSTATE))
fpsimd_save_state(&current->thread.fpsimd_state);
break;
case CPU_PM_EXIT:
if (current->mm)
- fpsimd_load_state(&current->thread.fpsimd_state);
+ set_thread_flag(TIF_FOREIGN_FPSTATE);
break;
case CPU_PM_ENTER_FAILED:
default:
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
new file mode 100644
index 000000000000..7924d73b6476
--- /dev/null
+++ b/arch/arm64/kernel/ftrace.c
@@ -0,0 +1,176 @@
+/*
+ * arch/arm64/kernel/ftrace.c
+ *
+ * Copyright (C) 2013 Linaro Limited
+ * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/ftrace.h>
+#include <linux/swab.h>
+#include <linux/uaccess.h>
+
+#include <asm/cacheflush.h>
+#include <asm/ftrace.h>
+#include <asm/insn.h>
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+/*
+ * Replace a single instruction, which may be a branch or NOP.
+ * If @validate == true, a replaced instruction is checked against 'old'.
+ */
+static int ftrace_modify_code(unsigned long pc, u32 old, u32 new,
+ bool validate)
+{
+ u32 replaced;
+
+ /*
+ * Note:
+ * Due to modules and __init, code can disappear and change,
+ * we need to protect against faulting as well as code changing.
+ * We do this by aarch64_insn_*() which use the probe_kernel_*().
+ *
+ * No lock is held here because all the modifications are run
+ * through stop_machine().
+ */
+ if (validate) {
+ if (aarch64_insn_read((void *)pc, &replaced))
+ return -EFAULT;
+
+ if (replaced != old)
+ return -EINVAL;
+ }
+ if (aarch64_insn_patch_text_nosync((void *)pc, new))
+ return -EPERM;
+
+ return 0;
+}
+
+/*
+ * Replace tracer function in ftrace_caller()
+ */
+int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+ unsigned long pc;
+ u32 new;
+
+ pc = (unsigned long)&ftrace_call;
+ new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func, true);
+
+ return ftrace_modify_code(pc, 0, new, false);
+}
+
+/*
+ * Turn on the call to ftrace_caller() in instrumented function
+ */
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned long pc = rec->ip;
+ u32 old, new;
+
+ old = aarch64_insn_gen_nop();
+ new = aarch64_insn_gen_branch_imm(pc, addr, true);
+
+ return ftrace_modify_code(pc, old, new, true);
+}
+
+/*
+ * Turn off the call to ftrace_caller() in instrumented function
+ */
+int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
+ unsigned long addr)
+{
+ unsigned long pc = rec->ip;
+ u32 old, new;
+
+ old = aarch64_insn_gen_branch_imm(pc, addr, true);
+ new = aarch64_insn_gen_nop();
+
+ return ftrace_modify_code(pc, old, new, true);
+}
+
+int __init ftrace_dyn_arch_init(void)
+{
+ return 0;
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+/*
+ * function_graph tracer expects ftrace_return_to_handler() to be called
+ * on the way back to parent. For this purpose, this function is called
+ * in _mcount() or ftrace_caller() to replace return address (*parent) on
+ * the call stack to return_to_handler.
+ *
+ * Note that @frame_pointer is used only for sanity check later.
+ */
+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
+ unsigned long frame_pointer)
+{
+ unsigned long return_hooker = (unsigned long)&return_to_handler;
+ unsigned long old;
+ struct ftrace_graph_ent trace;
+ int err;
+
+ if (unlikely(atomic_read(&current->tracing_graph_pause)))
+ return;
+
+ /*
+ * Note:
+ * No protection against faulting at *parent, which may be seen
+ * on other archs. It's unlikely on AArch64.
+ */
+ old = *parent;
+ *parent = return_hooker;
+
+ trace.func = self_addr;
+ trace.depth = current->curr_ret_stack + 1;
+
+ /* Only trace if the calling function expects to */
+ if (!ftrace_graph_entry(&trace)) {
+ *parent = old;
+ return;
+ }
+
+ err = ftrace_push_return_trace(old, self_addr, &trace.depth,
+ frame_pointer);
+ if (err == -EBUSY) {
+ *parent = old;
+ return;
+ }
+}
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+/*
+ * Turn on/off the call to ftrace_graph_caller() in ftrace_caller()
+ * depending on @enable.
+ */
+static int ftrace_modify_graph_caller(bool enable)
+{
+ unsigned long pc = (unsigned long)&ftrace_graph_call;
+ u32 branch, nop;
+
+ branch = aarch64_insn_gen_branch_imm(pc,
+ (unsigned long)ftrace_graph_caller, false);
+ nop = aarch64_insn_gen_nop();
+
+ if (enable)
+ return ftrace_modify_code(pc, nop, branch, true);
+ else
+ return ftrace_modify_code(pc, branch, nop, true);
+}
+
+int ftrace_enable_ftrace_graph_caller(void)
+{
+ return ftrace_modify_graph_caller(true);
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+ return ftrace_modify_graph_caller(false);
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 0fd565000772..a2c1195abb7f 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -108,8 +108,18 @@
/*
* DO NOT MODIFY. Image header expected by Linux boot-loaders.
*/
+#ifdef CONFIG_EFI
+efi_head:
+ /*
+ * This add instruction has no meaningful effect except that
+ * its opcode forms the magic "MZ" signature required by UEFI.
+ */
+ add x13, x18, #0x16
+ b stext
+#else
b stext // branch to kernel start, magic
.long 0 // reserved
+#endif
.quad TEXT_OFFSET // Image load offset from start of RAM
.quad 0 // reserved
.quad 0 // reserved
@@ -120,7 +130,109 @@
.byte 0x52
.byte 0x4d
.byte 0x64
+#ifdef CONFIG_EFI
+ .long pe_header - efi_head // Offset to the PE header.
+#else
.word 0 // reserved
+#endif
+
+#ifdef CONFIG_EFI
+ .align 3
+pe_header:
+ .ascii "PE"
+ .short 0
+coff_header:
+ .short 0xaa64 // AArch64
+ .short 2 // nr_sections
+ .long 0 // TimeDateStamp
+ .long 0 // PointerToSymbolTable
+ .long 1 // NumberOfSymbols
+ .short section_table - optional_header // SizeOfOptionalHeader
+ .short 0x206 // Characteristics.
+ // IMAGE_FILE_DEBUG_STRIPPED |
+ // IMAGE_FILE_EXECUTABLE_IMAGE |
+ // IMAGE_FILE_LINE_NUMS_STRIPPED
+optional_header:
+ .short 0x20b // PE32+ format
+ .byte 0x02 // MajorLinkerVersion
+ .byte 0x14 // MinorLinkerVersion
+ .long _edata - stext // SizeOfCode
+ .long 0 // SizeOfInitializedData
+ .long 0 // SizeOfUninitializedData
+ .long efi_stub_entry - efi_head // AddressOfEntryPoint
+ .long stext - efi_head // BaseOfCode
+
+extra_header_fields:
+ .quad 0 // ImageBase
+ .long 0x20 // SectionAlignment
+ .long 0x8 // FileAlignment
+ .short 0 // MajorOperatingSystemVersion
+ .short 0 // MinorOperatingSystemVersion
+ .short 0 // MajorImageVersion
+ .short 0 // MinorImageVersion
+ .short 0 // MajorSubsystemVersion
+ .short 0 // MinorSubsystemVersion
+ .long 0 // Win32VersionValue
+
+ .long _edata - efi_head // SizeOfImage
+
+ // Everything before the kernel image is considered part of the header
+ .long stext - efi_head // SizeOfHeaders
+ .long 0 // CheckSum
+ .short 0xa // Subsystem (EFI application)
+ .short 0 // DllCharacteristics
+ .quad 0 // SizeOfStackReserve
+ .quad 0 // SizeOfStackCommit
+ .quad 0 // SizeOfHeapReserve
+ .quad 0 // SizeOfHeapCommit
+ .long 0 // LoaderFlags
+ .long 0x6 // NumberOfRvaAndSizes
+
+ .quad 0 // ExportTable
+ .quad 0 // ImportTable
+ .quad 0 // ResourceTable
+ .quad 0 // ExceptionTable
+ .quad 0 // CertificationTable
+ .quad 0 // BaseRelocationTable
+
+ // Section table
+section_table:
+
+ /*
+ * The EFI application loader requires a relocation section
+ * because EFI applications must be relocatable. This is a
+ * dummy section as far as we are concerned.
+ */
+ .ascii ".reloc"
+ .byte 0
+ .byte 0 // end of 0 padding of section name
+ .long 0
+ .long 0
+ .long 0 // SizeOfRawData
+ .long 0 // PointerToRawData
+ .long 0 // PointerToRelocations
+ .long 0 // PointerToLineNumbers
+ .short 0 // NumberOfRelocations
+ .short 0 // NumberOfLineNumbers
+ .long 0x42100040 // Characteristics (section flags)
+
+
+ .ascii ".text"
+ .byte 0
+ .byte 0
+ .byte 0 // end of 0 padding of section name
+ .long _edata - stext // VirtualSize
+ .long stext - efi_head // VirtualAddress
+ .long _edata - stext // SizeOfRawData
+ .long stext - efi_head // PointerToRawData
+
+ .long 0 // PointerToRelocations (0 for executables)
+ .long 0 // PointerToLineNumbers (0 for executables)
+ .short 0 // NumberOfRelocations (0 for executables)
+ .short 0 // NumberOfLineNumbers (0 for executables)
+ .long 0xe0500020 // Characteristics (section flags)
+ .align 5
+#endif
ENTRY(stext)
mov x21, x0 // x21=FDT
@@ -158,8 +270,7 @@ ENDPROC(stext)
*/
ENTRY(el2_setup)
mrs x0, CurrentEL
- cmp x0, #PSR_MODE_EL2t
- ccmp x0, #PSR_MODE_EL2h, #0x4, ne
+ cmp x0, #CurrentEL_EL2
b.ne 1f
mrs x0, sctlr_el2
CPU_BE( orr x0, x0, #(1 << 25) ) // Set the EE bit for EL2
@@ -230,11 +341,9 @@ ENTRY(set_cpu_boot_mode_flag)
cmp w20, #BOOT_CPU_MODE_EL2
b.ne 1f
add x1, x1, #4
-1: dc cvac, x1 // Clean potentially dirty cache line
- dsb sy
- str w20, [x1] // This CPU has booted in EL1
- dc civac, x1 // Clean&invalidate potentially stale cache line
- dsb sy
+1: str w20, [x1] // This CPU has booted in EL1
+ dmb sy
+ dc ivac, x1 // Invalidate potentially stale cache line
ret
ENDPROC(set_cpu_boot_mode_flag)
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index bee789757806..df1cf15377b4 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -20,6 +20,7 @@
#define pr_fmt(fmt) "hw-breakpoint: " fmt
+#include <linux/compat.h>
#include <linux/cpu_pm.h>
#include <linux/errno.h>
#include <linux/hw_breakpoint.h>
@@ -27,7 +28,6 @@
#include <linux/ptrace.h>
#include <linux/smp.h>
-#include <asm/compat.h>
#include <asm/current.h>
#include <asm/debug-monitors.h>
#include <asm/hw_breakpoint.h>
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 6391485f342d..43b7c34f92cb 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -20,6 +20,7 @@
#include <stdarg.h>
+#include <linux/compat.h>
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/kernel.h>
@@ -113,32 +114,62 @@ void arch_cpu_idle_dead(void)
}
#endif
+/*
+ * Called by kexec, immediately prior to machine_kexec().
+ *
+ * This must completely disable all secondary CPUs; simply causing those CPUs
+ * to execute e.g. a RAM-based pin loop is not sufficient. This allows the
+ * kexec'd kernel to use any and all RAM as it sees fit, without having to
+ * avoid any code or data used by any SW CPU pin loop. The CPU hotplug
+ * functionality embodied in disable_nonboot_cpus() to achieve this.
+ */
void machine_shutdown(void)
{
-#ifdef CONFIG_SMP
- smp_send_stop();
-#endif
+ disable_nonboot_cpus();
}
+/*
+ * Halting simply requires that the secondary CPUs stop performing any
+ * activity (executing tasks, handling interrupts). smp_send_stop()
+ * achieves this.
+ */
void machine_halt(void)
{
- machine_shutdown();
+ local_irq_disable();
+ smp_send_stop();
while (1);
}
+/*
+ * Power-off simply requires that the secondary CPUs stop performing any
+ * activity (executing tasks, handling interrupts). smp_send_stop()
+ * achieves this. When the system power is turned off, it will take all CPUs
+ * with it.
+ */
void machine_power_off(void)
{
- machine_shutdown();
+ local_irq_disable();
+ smp_send_stop();
if (pm_power_off)
pm_power_off();
}
+/*
+ * Restart requires that the secondary CPUs stop performing any activity
+ * while the primary CPU resets the system. Systems with a single CPU can
+ * use soft_restart() as their machine descriptor's .restart hook, since that
+ * will cause the only available CPU to reset. Systems with multiple CPUs must
+ * provide a HW restart implementation, to ensure that all CPUs reset at once.
+ * This is required so that any code running after reset on the primary CPU
+ * doesn't have to co-ordinate with other CPUs to ensure they aren't still
+ * executing pre-reset code, and using RAM that the primary CPU's code wishes
+ * to use. Implementing such co-ordination would be essentially impossible.
+ */
void machine_restart(char *cmd)
{
- machine_shutdown();
-
/* Disable interrupts first */
local_irq_disable();
+ smp_send_stop();
/* Now call the architecture specific reboot code. */
if (arm_pm_restart)
@@ -205,7 +236,7 @@ void release_thread(struct task_struct *dead_task)
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
- fpsimd_save_state(&current->thread.fpsimd_state);
+ fpsimd_preserve_current_state();
*dst = *src;
return 0;
}
@@ -300,7 +331,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
* Complete any pending TLB or cache maintenance on this CPU in case
* the thread migrates to a different CPU.
*/
- dsb();
+ dsb(ish);
/* the actual thread switch */
last = cpu_switch_to(prev, next);
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index ea4828a4aa96..9e9798f91172 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -18,12 +18,17 @@
#include <linux/init.h>
#include <linux/of.h>
#include <linux/smp.h>
+#include <linux/reboot.h>
+#include <linux/pm.h>
+#include <linux/delay.h>
+#include <uapi/linux/psci.h>
#include <asm/compiler.h>
#include <asm/cpu_ops.h>
#include <asm/errno.h>
#include <asm/psci.h>
#include <asm/smp_plat.h>
+#include <asm/system_misc.h>
#define PSCI_POWER_STATE_TYPE_STANDBY 0
#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
@@ -40,58 +45,52 @@ struct psci_operations {
int (*cpu_off)(struct psci_power_state state);
int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
int (*migrate)(unsigned long cpuid);
+ int (*affinity_info)(unsigned long target_affinity,
+ unsigned long lowest_affinity_level);
+ int (*migrate_info_type)(void);
};
static struct psci_operations psci_ops;
static int (*invoke_psci_fn)(u64, u64, u64, u64);
+typedef int (*psci_initcall_t)(const struct device_node *);
enum psci_function {
PSCI_FN_CPU_SUSPEND,
PSCI_FN_CPU_ON,
PSCI_FN_CPU_OFF,
PSCI_FN_MIGRATE,
+ PSCI_FN_AFFINITY_INFO,
+ PSCI_FN_MIGRATE_INFO_TYPE,
PSCI_FN_MAX,
};
static u32 psci_function_id[PSCI_FN_MAX];
-#define PSCI_RET_SUCCESS 0
-#define PSCI_RET_EOPNOTSUPP -1
-#define PSCI_RET_EINVAL -2
-#define PSCI_RET_EPERM -3
-
static int psci_to_linux_errno(int errno)
{
switch (errno) {
case PSCI_RET_SUCCESS:
return 0;
- case PSCI_RET_EOPNOTSUPP:
+ case PSCI_RET_NOT_SUPPORTED:
return -EOPNOTSUPP;
- case PSCI_RET_EINVAL:
+ case PSCI_RET_INVALID_PARAMS:
return -EINVAL;
- case PSCI_RET_EPERM:
+ case PSCI_RET_DENIED:
return -EPERM;
};
return -EINVAL;
}
-#define PSCI_POWER_STATE_ID_MASK 0xffff
-#define PSCI_POWER_STATE_ID_SHIFT 0
-#define PSCI_POWER_STATE_TYPE_MASK 0x1
-#define PSCI_POWER_STATE_TYPE_SHIFT 16
-#define PSCI_POWER_STATE_AFFL_MASK 0x3
-#define PSCI_POWER_STATE_AFFL_SHIFT 24
-
static u32 psci_power_state_pack(struct psci_power_state state)
{
- return ((state.id & PSCI_POWER_STATE_ID_MASK)
- << PSCI_POWER_STATE_ID_SHIFT) |
- ((state.type & PSCI_POWER_STATE_TYPE_MASK)
- << PSCI_POWER_STATE_TYPE_SHIFT) |
- ((state.affinity_level & PSCI_POWER_STATE_AFFL_MASK)
- << PSCI_POWER_STATE_AFFL_SHIFT);
+ return ((state.id << PSCI_0_2_POWER_STATE_ID_SHIFT)
+ & PSCI_0_2_POWER_STATE_ID_MASK) |
+ ((state.type << PSCI_0_2_POWER_STATE_TYPE_SHIFT)
+ & PSCI_0_2_POWER_STATE_TYPE_MASK) |
+ ((state.affinity_level << PSCI_0_2_POWER_STATE_AFFL_SHIFT)
+ & PSCI_0_2_POWER_STATE_AFFL_MASK);
}
/*
@@ -128,6 +127,14 @@ static noinline int __invoke_psci_fn_smc(u64 function_id, u64 arg0, u64 arg1,
return function_id;
}
+static int psci_get_version(void)
+{
+ int err;
+
+ err = invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
+ return err;
+}
+
static int psci_cpu_suspend(struct psci_power_state state,
unsigned long entry_point)
{
@@ -171,26 +178,36 @@ static int psci_migrate(unsigned long cpuid)
return psci_to_linux_errno(err);
}
-static const struct of_device_id psci_of_match[] __initconst = {
- { .compatible = "arm,psci", },
- {},
-};
+static int psci_affinity_info(unsigned long target_affinity,
+ unsigned long lowest_affinity_level)
+{
+ int err;
+ u32 fn;
+
+ fn = psci_function_id[PSCI_FN_AFFINITY_INFO];
+ err = invoke_psci_fn(fn, target_affinity, lowest_affinity_level, 0);
+ return err;
+}
-void __init psci_init(void)
+static int psci_migrate_info_type(void)
{
- struct device_node *np;
- const char *method;
- u32 id;
+ int err;
+ u32 fn;
- np = of_find_matching_node(NULL, psci_of_match);
- if (!np)
- return;
+ fn = psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE];
+ err = invoke_psci_fn(fn, 0, 0, 0);
+ return err;
+}
- pr_info("probing function IDs from device-tree\n");
+static int get_set_conduit_method(struct device_node *np)
+{
+ const char *method;
+
+ pr_info("probing for conduit method from DT.\n");
if (of_property_read_string(np, "method", &method)) {
- pr_warning("missing \"method\" property\n");
- goto out_put_node;
+ pr_warn("missing \"method\" property\n");
+ return -ENXIO;
}
if (!strcmp("hvc", method)) {
@@ -198,10 +215,99 @@ void __init psci_init(void)
} else if (!strcmp("smc", method)) {
invoke_psci_fn = __invoke_psci_fn_smc;
} else {
- pr_warning("invalid \"method\" property: %s\n", method);
+ pr_warn("invalid \"method\" property: %s\n", method);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd)
+{
+ invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
+}
+
+static void psci_sys_poweroff(void)
+{
+ invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
+}
+
+/*
+ * PSCI Function IDs for v0.2+ are well defined so use
+ * standard values.
+ */
+static int psci_0_2_init(struct device_node *np)
+{
+ int err, ver;
+
+ err = get_set_conduit_method(np);
+
+ if (err)
+ goto out_put_node;
+
+ ver = psci_get_version();
+
+ if (ver == PSCI_RET_NOT_SUPPORTED) {
+ /* PSCI v0.2 mandates implementation of PSCI_ID_VERSION. */
+ pr_err("PSCI firmware does not comply with the v0.2 spec.\n");
+ err = -EOPNOTSUPP;
goto out_put_node;
+ } else {
+ pr_info("PSCIv%d.%d detected in firmware.\n",
+ PSCI_VERSION_MAJOR(ver),
+ PSCI_VERSION_MINOR(ver));
+
+ if (PSCI_VERSION_MAJOR(ver) == 0 &&
+ PSCI_VERSION_MINOR(ver) < 2) {
+ err = -EINVAL;
+ pr_err("Conflicting PSCI version detected.\n");
+ goto out_put_node;
+ }
}
+ pr_info("Using standard PSCI v0.2 function IDs\n");
+ psci_function_id[PSCI_FN_CPU_SUSPEND] = PSCI_0_2_FN64_CPU_SUSPEND;
+ psci_ops.cpu_suspend = psci_cpu_suspend;
+
+ psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF;
+ psci_ops.cpu_off = psci_cpu_off;
+
+ psci_function_id[PSCI_FN_CPU_ON] = PSCI_0_2_FN64_CPU_ON;
+ psci_ops.cpu_on = psci_cpu_on;
+
+ psci_function_id[PSCI_FN_MIGRATE] = PSCI_0_2_FN64_MIGRATE;
+ psci_ops.migrate = psci_migrate;
+
+ psci_function_id[PSCI_FN_AFFINITY_INFO] = PSCI_0_2_FN64_AFFINITY_INFO;
+ psci_ops.affinity_info = psci_affinity_info;
+
+ psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE] =
+ PSCI_0_2_FN_MIGRATE_INFO_TYPE;
+ psci_ops.migrate_info_type = psci_migrate_info_type;
+
+ arm_pm_restart = psci_sys_reset;
+
+ pm_power_off = psci_sys_poweroff;
+
+out_put_node:
+ of_node_put(np);
+ return err;
+}
+
+/*
+ * PSCI < v0.2 get PSCI Function IDs via DT.
+ */
+static int psci_0_1_init(struct device_node *np)
+{
+ u32 id;
+ int err;
+
+ err = get_set_conduit_method(np);
+
+ if (err)
+ goto out_put_node;
+
+ pr_info("Using PSCI v0.1 Function IDs from DT\n");
+
if (!of_property_read_u32(np, "cpu_suspend", &id)) {
psci_function_id[PSCI_FN_CPU_SUSPEND] = id;
psci_ops.cpu_suspend = psci_cpu_suspend;
@@ -224,7 +330,28 @@ void __init psci_init(void)
out_put_node:
of_node_put(np);
- return;
+ return err;
+}
+
+static const struct of_device_id psci_of_match[] __initconst = {
+ { .compatible = "arm,psci", .data = psci_0_1_init},
+ { .compatible = "arm,psci-0.2", .data = psci_0_2_init},
+ {},
+};
+
+int __init psci_init(void)
+{
+ struct device_node *np;
+ const struct of_device_id *matched_np;
+ psci_initcall_t init_fn;
+
+ np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
+
+ if (!np)
+ return -ENODEV;
+
+ init_fn = (psci_initcall_t)matched_np->data;
+ return init_fn(np);
}
#ifdef CONFIG_SMP
@@ -277,6 +404,35 @@ static void cpu_psci_cpu_die(unsigned int cpu)
pr_crit("unable to power off CPU%u (%d)\n", cpu, ret);
}
+
+static int cpu_psci_cpu_kill(unsigned int cpu)
+{
+ int err, i;
+
+ if (!psci_ops.affinity_info)
+ return 1;
+ /*
+ * cpu_kill could race with cpu_die and we can
+ * potentially end up declaring this cpu undead
+ * while it is dying. So, try again a few times.
+ */
+
+ for (i = 0; i < 10; i++) {
+ err = psci_ops.affinity_info(cpu_logical_map(cpu), 0);
+ if (err == PSCI_0_2_AFFINITY_LEVEL_OFF) {
+ pr_info("CPU%d killed.\n", cpu);
+ return 1;
+ }
+
+ msleep(10);
+ pr_info("Retrying again to check for CPU kill\n");
+ }
+
+ pr_warn("CPU%d may not have shut down cleanly (AFFINITY_INFO reports %d)\n",
+ cpu, err);
+ /* Make op_cpu_kill() fail. */
+ return 0;
+}
#endif
const struct cpu_operations cpu_psci_ops = {
@@ -287,6 +443,7 @@ const struct cpu_operations cpu_psci_ops = {
#ifdef CONFIG_HOTPLUG_CPU
.cpu_disable = cpu_psci_cpu_disable,
.cpu_die = cpu_psci_cpu_die,
+ .cpu_kill = cpu_psci_cpu_kill,
#endif
};
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 6a8928bba03c..9fde010c945f 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -19,6 +19,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/compat.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
@@ -41,6 +42,9 @@
#include <asm/traps.h>
#include <asm/system_misc.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/syscalls.h>
+
/*
* TODO: does not yet catch signals sent when the child dies.
* in exit.c or in signal.c.
@@ -517,6 +521,7 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
return ret;
target->thread.fpsimd_state.user_fpsimd = newstate;
+ fpsimd_flush_task_state(target);
return ret;
}
@@ -650,11 +655,16 @@ static int compat_gpr_get(struct task_struct *target,
reg = task_pt_regs(target)->regs[idx];
}
- ret = copy_to_user(ubuf, &reg, sizeof(reg));
- if (ret)
- break;
+ if (kbuf) {
+ memcpy(kbuf, &reg, sizeof(reg));
+ kbuf += sizeof(reg);
+ } else {
+ ret = copy_to_user(ubuf, &reg, sizeof(reg));
+ if (ret)
+ break;
- ubuf += sizeof(reg);
+ ubuf += sizeof(reg);
+ }
}
return ret;
@@ -684,11 +694,16 @@ static int compat_gpr_set(struct task_struct *target,
unsigned int idx = start + i;
compat_ulong_t reg;
- ret = copy_from_user(&reg, ubuf, sizeof(reg));
- if (ret)
- return ret;
+ if (kbuf) {
+ memcpy(&reg, kbuf, sizeof(reg));
+ kbuf += sizeof(reg);
+ } else {
+ ret = copy_from_user(&reg, ubuf, sizeof(reg));
+ if (ret)
+ return ret;
- ubuf += sizeof(reg);
+ ubuf += sizeof(reg);
+ }
switch (idx) {
case 15:
@@ -764,6 +779,7 @@ static int compat_vfp_set(struct task_struct *target,
uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
}
+ fpsimd_flush_task_state(target);
return ret;
}
@@ -821,6 +837,7 @@ static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
compat_ulong_t val)
{
int ret;
+ mm_segment_t old_fs = get_fs();
if (off & 3 || off >= COMPAT_USER_SZ)
return -EIO;
@@ -828,10 +845,13 @@ static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
if (off >= sizeof(compat_elf_gregset_t))
return 0;
+ set_fs(KERNEL_DS);
ret = copy_regset_from_user(tsk, &user_aarch32_view,
REGSET_COMPAT_GPR, off,
sizeof(compat_ulong_t),
&val);
+ set_fs(old_fs);
+
return ret;
}
@@ -1058,35 +1078,49 @@ long arch_ptrace(struct task_struct *child, long request,
return ptrace_request(child, request, addr, data);
}
-asmlinkage int syscall_trace(int dir, struct pt_regs *regs)
+enum ptrace_syscall_dir {
+ PTRACE_SYSCALL_ENTER = 0,
+ PTRACE_SYSCALL_EXIT,
+};
+
+static void tracehook_report_syscall(struct pt_regs *regs,
+ enum ptrace_syscall_dir dir)
{
+ int regno;
unsigned long saved_reg;
- if (!test_thread_flag(TIF_SYSCALL_TRACE))
- return regs->syscallno;
-
- if (is_compat_task()) {
- /* AArch32 uses ip (r12) for scratch */
- saved_reg = regs->regs[12];
- regs->regs[12] = dir;
- } else {
- /*
- * Save X7. X7 is used to denote syscall entry/exit:
- * X7 = 0 -> entry, = 1 -> exit
- */
- saved_reg = regs->regs[7];
- regs->regs[7] = dir;
- }
+ /*
+ * A scratch register (ip(r12) on AArch32, x7 on AArch64) is
+ * used to denote syscall entry/exit:
+ */
+ regno = (is_compat_task() ? 12 : 7);
+ saved_reg = regs->regs[regno];
+ regs->regs[regno] = dir;
- if (dir)
+ if (dir == PTRACE_SYSCALL_EXIT)
tracehook_report_syscall_exit(regs, 0);
else if (tracehook_report_syscall_entry(regs))
regs->syscallno = ~0UL;
- if (is_compat_task())
- regs->regs[12] = saved_reg;
- else
- regs->regs[7] = saved_reg;
+ regs->regs[regno] = saved_reg;
+}
+
+asmlinkage int syscall_trace_enter(struct pt_regs *regs)
+{
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
+
+ if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
+ trace_sys_enter(regs, regs->syscallno);
return regs->syscallno;
}
+
+asmlinkage void syscall_trace_exit(struct pt_regs *regs)
+{
+ if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
+ trace_sys_exit(regs, regs_return_value(regs));
+
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
+}
diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c
new file mode 100644
index 000000000000..89102a6ffad5
--- /dev/null
+++ b/arch/arm64/kernel/return_address.c
@@ -0,0 +1,55 @@
+/*
+ * arch/arm64/kernel/return_address.c
+ *
+ * Copyright (C) 2013 Linaro Limited
+ * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/export.h>
+#include <linux/ftrace.h>
+
+#include <asm/stacktrace.h>
+
+struct return_address_data {
+ unsigned int level;
+ void *addr;
+};
+
+static int save_return_addr(struct stackframe *frame, void *d)
+{
+ struct return_address_data *data = d;
+
+ if (!data->level) {
+ data->addr = (void *)frame->pc;
+ return 1;
+ } else {
+ --data->level;
+ return 0;
+ }
+}
+
+void *return_address(unsigned int level)
+{
+ struct return_address_data data;
+ struct stackframe frame;
+ register unsigned long current_sp asm ("sp");
+
+ data.level = level + 2;
+ data.addr = NULL;
+
+ frame.fp = (unsigned long)__builtin_frame_address(0);
+ frame.sp = current_sp;
+ frame.pc = (unsigned long)return_address; /* dummy */
+
+ walk_stackframe(&frame, save_return_addr, &data);
+
+ if (!data.level)
+ return data.addr;
+ else
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(return_address);
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 7ec784653b29..46d1125571f6 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -25,6 +25,7 @@
#include <linux/utsname.h>
#include <linux/initrd.h>
#include <linux/console.h>
+#include <linux/cache.h>
#include <linux/bootmem.h>
#include <linux/seq_file.h>
#include <linux/screen_info.h>
@@ -41,6 +42,7 @@
#include <linux/memblock.h>
#include <linux/of_fdt.h>
#include <linux/of_platform.h>
+#include <linux/efi.h>
#include <asm/fixmap.h>
#include <asm/cputype.h>
@@ -55,6 +57,7 @@
#include <asm/traps.h>
#include <asm/memblock.h>
#include <asm/psci.h>
+#include <asm/efi.h>
unsigned int processor_id;
EXPORT_SYMBOL(processor_id);
@@ -198,6 +201,8 @@ static void __init setup_processor(void)
{
struct cpu_info *cpu_info;
u64 features, block;
+ u32 cwg;
+ int cls;
cpu_info = lookup_processor_type(read_cpuid_id());
if (!cpu_info) {
@@ -215,6 +220,18 @@ static void __init setup_processor(void)
elf_hwcap = 0;
/*
+ * Check for sane CTR_EL0.CWG value.
+ */
+ cwg = cache_type_cwg();
+ cls = cache_line_size();
+ if (!cwg)
+ pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n",
+ cls);
+ if (L1_CACHE_BYTES < cls)
+ pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
+ L1_CACHE_BYTES, cls);
+
+ /*
* ID_AA64ISAR0_EL1 contains 4-bit wide signed feature blocks.
* The blocks we test below represent incremental functionality
* for non-negative values. Negative values are reserved.
@@ -361,16 +378,18 @@ void __init setup_arch(char **cmdline_p)
*cmdline_p = boot_command_line;
- init_mem_pgprot();
early_ioremap_init();
parse_early_param();
+ efi_init();
arm64_memblock_init();
paging_init();
request_standard_resources();
+ efi_idmap_init();
+
unflatten_device_tree();
psci_init();
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 890a591f75dd..6357b9c6c90e 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -17,6 +17,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/compat.h>
#include <linux/errno.h>
#include <linux/signal.h>
#include <linux/personality.h>
@@ -25,7 +26,6 @@
#include <linux/tracehook.h>
#include <linux/ratelimit.h>
-#include <asm/compat.h>
#include <asm/debug-monitors.h>
#include <asm/elf.h>
#include <asm/cacheflush.h>
@@ -51,7 +51,7 @@ static int preserve_fpsimd_context(struct fpsimd_context __user *ctx)
int err;
/* dump the hardware registers to the fpsimd_state structure */
- fpsimd_save_state(fpsimd);
+ fpsimd_preserve_current_state();
/* copy the FP and status/control registers */
err = __copy_to_user(ctx->vregs, fpsimd->vregs, sizeof(fpsimd->vregs));
@@ -86,11 +86,8 @@ static int restore_fpsimd_context(struct fpsimd_context __user *ctx)
__get_user_error(fpsimd.fpcr, &ctx->fpcr, err);
/* load the hardware registers from the fpsimd_state structure */
- if (!err) {
- preempt_disable();
- fpsimd_load_state(&fpsimd);
- preempt_enable();
- }
+ if (!err)
+ fpsimd_update_current_state(&fpsimd);
return err ? -EFAULT : 0;
}
@@ -100,8 +97,7 @@ static int restore_sigframe(struct pt_regs *regs,
{
sigset_t set;
int i, err;
- struct aux_context __user *aux =
- (struct aux_context __user *)sf->uc.uc_mcontext.__reserved;
+ void *aux = sf->uc.uc_mcontext.__reserved;
err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
if (err == 0)
@@ -121,8 +117,11 @@ static int restore_sigframe(struct pt_regs *regs,
err |= !valid_user_regs(&regs->user_regs);
- if (err == 0)
- err |= restore_fpsimd_context(&aux->fpsimd);
+ if (err == 0) {
+ struct fpsimd_context *fpsimd_ctx =
+ container_of(aux, struct fpsimd_context, head);
+ err |= restore_fpsimd_context(fpsimd_ctx);
+ }
return err;
}
@@ -167,8 +166,8 @@ static int setup_sigframe(struct rt_sigframe __user *sf,
struct pt_regs *regs, sigset_t *set)
{
int i, err = 0;
- struct aux_context __user *aux =
- (struct aux_context __user *)sf->uc.uc_mcontext.__reserved;
+ void *aux = sf->uc.uc_mcontext.__reserved;
+ struct _aarch64_ctx *end;
/* set up the stack frame for unwinding */
__put_user_error(regs->regs[29], &sf->fp, err);
@@ -185,12 +184,27 @@ static int setup_sigframe(struct rt_sigframe __user *sf,
err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
- if (err == 0)
- err |= preserve_fpsimd_context(&aux->fpsimd);
+ if (err == 0) {
+ struct fpsimd_context *fpsimd_ctx =
+ container_of(aux, struct fpsimd_context, head);
+ err |= preserve_fpsimd_context(fpsimd_ctx);
+ aux += sizeof(*fpsimd_ctx);
+ }
+
+ /* fault information, if valid */
+ if (current->thread.fault_code) {
+ struct esr_context *esr_ctx =
+ container_of(aux, struct esr_context, head);
+ __put_user_error(ESR_MAGIC, &esr_ctx->head.magic, err);
+ __put_user_error(sizeof(*esr_ctx), &esr_ctx->head.size, err);
+ __put_user_error(current->thread.fault_code, &esr_ctx->esr, err);
+ aux += sizeof(*esr_ctx);
+ }
/* set the "end" magic */
- __put_user_error(0, &aux->end.magic, err);
- __put_user_error(0, &aux->end.size, err);
+ end = aux;
+ __put_user_error(0, &end->magic, err);
+ __put_user_error(0, &end->size, err);
return err;
}
@@ -416,4 +430,8 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
}
+
+ if (thread_flags & _TIF_FOREIGN_FPSTATE)
+ fpsimd_restore_current_state();
+
}
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index b3fc9f5ec6d3..3491c638f172 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -23,6 +23,7 @@
#include <linux/syscalls.h>
#include <linux/ratelimit.h>
+#include <asm/esr.h>
#include <asm/fpsimd.h>
#include <asm/signal32.h>
#include <asm/uaccess.h>
@@ -81,6 +82,8 @@ struct compat_vfp_sigframe {
#define VFP_MAGIC 0x56465001
#define VFP_STORAGE_SIZE sizeof(struct compat_vfp_sigframe)
+#define FSR_WRITE_SHIFT (11)
+
struct compat_aux_sigframe {
struct compat_vfp_sigframe vfp;
@@ -219,7 +222,7 @@ static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame)
* Note that this also saves V16-31, which aren't visible
* in AArch32.
*/
- fpsimd_save_state(fpsimd);
+ fpsimd_preserve_current_state();
/* Place structure header on the stack */
__put_user_error(magic, &frame->magic, err);
@@ -282,11 +285,8 @@ static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame)
* We don't need to touch the exception register, so
* reload the hardware state.
*/
- if (!err) {
- preempt_disable();
- fpsimd_load_state(&fpsimd);
- preempt_enable();
- }
+ if (!err)
+ fpsimd_update_current_state(&fpsimd);
return err ? -EFAULT : 0;
}
@@ -500,7 +500,9 @@ static int compat_setup_sigframe(struct compat_sigframe __user *sf,
__put_user_error(regs->pstate, &sf->uc.uc_mcontext.arm_cpsr, err);
__put_user_error((compat_ulong_t)0, &sf->uc.uc_mcontext.trap_no, err);
- __put_user_error((compat_ulong_t)0, &sf->uc.uc_mcontext.error_code, err);
+ /* set the compat FSR WnR */
+ __put_user_error(!!(current->thread.fault_code & ESR_EL1_WRITE) <<
+ FSR_WRITE_SHIFT, &sf->uc.uc_mcontext.error_code, err);
__put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err);
__put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err);
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index f0a141dd5655..40f38f46c8e0 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -35,6 +35,7 @@
#include <linux/clockchips.h>
#include <linux/completion.h>
#include <linux/of.h>
+#include <linux/irq_work.h>
#include <asm/atomic.h>
#include <asm/cacheflush.h>
@@ -62,6 +63,7 @@ enum ipi_msg_type {
IPI_CALL_FUNC_SINGLE,
IPI_CPU_STOP,
IPI_TIMER,
+ IPI_IRQ_WORK,
};
/*
@@ -228,6 +230,19 @@ int __cpu_disable(void)
return 0;
}
+static int op_cpu_kill(unsigned int cpu)
+{
+ /*
+ * If we have no means of synchronising with the dying CPU, then assume
+ * that it is really dead. We can only wait for an arbitrary length of
+ * time and hope that it's dead, so let's skip the wait and just hope.
+ */
+ if (!cpu_ops[cpu]->cpu_kill)
+ return 1;
+
+ return cpu_ops[cpu]->cpu_kill(cpu);
+}
+
static DECLARE_COMPLETION(cpu_died);
/*
@@ -241,6 +256,15 @@ void __cpu_die(unsigned int cpu)
return;
}
pr_notice("CPU%u: shutdown\n", cpu);
+
+ /*
+ * Now that the dying CPU is beyond the point of no return w.r.t.
+ * in-kernel synchronisation, try to get the firwmare to help us to
+ * verify that it has really left the kernel before we consider
+ * clobbering anything it might still be using.
+ */
+ if (!op_cpu_kill(cpu))
+ pr_warn("CPU%d may not have shut down cleanly\n", cpu);
}
/*
@@ -455,6 +479,14 @@ void arch_send_call_function_single_ipi(int cpu)
smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
}
+#ifdef CONFIG_IRQ_WORK
+void arch_irq_work_raise(void)
+{
+ if (smp_cross_call)
+ smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
+}
+#endif
+
static const char *ipi_types[NR_IPI] = {
#define S(x,s) [x - IPI_RESCHEDULE] = s
S(IPI_RESCHEDULE, "Rescheduling interrupts"),
@@ -462,6 +494,7 @@ static const char *ipi_types[NR_IPI] = {
S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
S(IPI_CPU_STOP, "CPU stop interrupts"),
S(IPI_TIMER, "Timer broadcast interrupts"),
+ S(IPI_IRQ_WORK, "IRQ work interrupts"),
};
void show_ipi_list(struct seq_file *p, int prec)
@@ -554,6 +587,14 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
break;
#endif
+#ifdef CONFIG_IRQ_WORK
+ case IPI_IRQ_WORK:
+ irq_enter();
+ irq_work_run();
+ irq_exit();
+ break;
+#endif
+
default:
pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
break;
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
index 7a530d2cc807..0347d38eea29 100644
--- a/arch/arm64/kernel/smp_spin_table.c
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -30,7 +30,6 @@ extern void secondary_holding_pen(void);
volatile unsigned long secondary_holding_pen_release = INVALID_HWID;
static phys_addr_t cpu_release_addr[NR_CPUS];
-static DEFINE_RAW_SPINLOCK(boot_lock);
/*
* Write secondary_holding_pen_release in a way that is guaranteed to be
@@ -94,14 +93,6 @@ static int smp_spin_table_cpu_prepare(unsigned int cpu)
static int smp_spin_table_cpu_boot(unsigned int cpu)
{
- unsigned long timeout;
-
- /*
- * Set synchronisation state between this boot processor
- * and the secondary one
- */
- raw_spin_lock(&boot_lock);
-
/*
* Update the pen release flag.
*/
@@ -112,34 +103,7 @@ static int smp_spin_table_cpu_boot(unsigned int cpu)
*/
sev();
- timeout = jiffies + (1 * HZ);
- while (time_before(jiffies, timeout)) {
- if (secondary_holding_pen_release == INVALID_HWID)
- break;
- udelay(10);
- }
-
- /*
- * Now the secondary core is starting up let it run its
- * calibrations, then wait for it to finish
- */
- raw_spin_unlock(&boot_lock);
-
- return secondary_holding_pen_release != INVALID_HWID ? -ENOSYS : 0;
-}
-
-static void smp_spin_table_cpu_postboot(void)
-{
- /*
- * Let the primary processor know we're out of the pen.
- */
- write_pen_release(INVALID_HWID);
-
- /*
- * Synchronise with the boot thread.
- */
- raw_spin_lock(&boot_lock);
- raw_spin_unlock(&boot_lock);
+ return 0;
}
const struct cpu_operations smp_spin_table_ops = {
@@ -147,5 +111,4 @@ const struct cpu_operations smp_spin_table_ops = {
.cpu_init = smp_spin_table_cpu_init,
.cpu_prepare = smp_spin_table_cpu_prepare,
.cpu_boot = smp_spin_table_cpu_boot,
- .cpu_postboot = smp_spin_table_cpu_postboot,
};
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 38f0558f0c0a..55437ba1f5a4 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -35,7 +35,7 @@
* ldp x29, x30, [sp]
* add sp, sp, #0x10
*/
-int unwind_frame(struct stackframe *frame)
+int notrace unwind_frame(struct stackframe *frame)
{
unsigned long high, low;
unsigned long fp = frame->fp;
diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c
index 6815987b50f8..1a7125c3099b 100644
--- a/arch/arm64/kernel/time.c
+++ b/arch/arm64/kernel/time.c
@@ -18,6 +18,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/clockchips.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
@@ -69,6 +70,8 @@ void __init time_init(void)
of_clk_init(NULL);
clocksource_of_init();
+ tick_setup_hrtimer_broadcast();
+
arch_timer_rate = arch_timer_get_rate();
if (!arch_timer_rate)
panic("Unable to initialise architected timer.\n");
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 3e06b0be4ec8..43514f905916 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -17,10 +17,192 @@
#include <linux/percpu.h>
#include <linux/node.h>
#include <linux/nodemask.h>
+#include <linux/of.h>
#include <linux/sched.h>
#include <asm/topology.h>
+static int __init get_cpu_for_node(struct device_node *node)
+{
+ struct device_node *cpu_node;
+ int cpu;
+
+ cpu_node = of_parse_phandle(node, "cpu", 0);
+ if (!cpu_node)
+ return -1;
+
+ for_each_possible_cpu(cpu) {
+ if (of_get_cpu_node(cpu, NULL) == cpu_node) {
+ of_node_put(cpu_node);
+ return cpu;
+ }
+ }
+
+ pr_crit("Unable to find CPU node for %s\n", cpu_node->full_name);
+
+ of_node_put(cpu_node);
+ return -1;
+}
+
+static int __init parse_core(struct device_node *core, int cluster_id,
+ int core_id)
+{
+ char name[10];
+ bool leaf = true;
+ int i = 0;
+ int cpu;
+ struct device_node *t;
+
+ do {
+ snprintf(name, sizeof(name), "thread%d", i);
+ t = of_get_child_by_name(core, name);
+ if (t) {
+ leaf = false;
+ cpu = get_cpu_for_node(t);
+ if (cpu >= 0) {
+ cpu_topology[cpu].cluster_id = cluster_id;
+ cpu_topology[cpu].core_id = core_id;
+ cpu_topology[cpu].thread_id = i;
+ } else {
+ pr_err("%s: Can't get CPU for thread\n",
+ t->full_name);
+ of_node_put(t);
+ return -EINVAL;
+ }
+ of_node_put(t);
+ }
+ i++;
+ } while (t);
+
+ cpu = get_cpu_for_node(core);
+ if (cpu >= 0) {
+ if (!leaf) {
+ pr_err("%s: Core has both threads and CPU\n",
+ core->full_name);
+ return -EINVAL;
+ }
+
+ cpu_topology[cpu].cluster_id = cluster_id;
+ cpu_topology[cpu].core_id = core_id;
+ } else if (leaf) {
+ pr_err("%s: Can't get CPU for leaf core\n", core->full_name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __init parse_cluster(struct device_node *cluster, int depth)
+{
+ char name[10];
+ bool leaf = true;
+ bool has_cores = false;
+ struct device_node *c;
+ static int cluster_id __initdata;
+ int core_id = 0;
+ int i, ret;
+
+ /*
+ * First check for child clusters; we currently ignore any
+ * information about the nesting of clusters and present the
+ * scheduler with a flat list of them.
+ */
+ i = 0;
+ do {
+ snprintf(name, sizeof(name), "cluster%d", i);
+ c = of_get_child_by_name(cluster, name);
+ if (c) {
+ leaf = false;
+ ret = parse_cluster(c, depth + 1);
+ of_node_put(c);
+ if (ret != 0)
+ return ret;
+ }
+ i++;
+ } while (c);
+
+ /* Now check for cores */
+ i = 0;
+ do {
+ snprintf(name, sizeof(name), "core%d", i);
+ c = of_get_child_by_name(cluster, name);
+ if (c) {
+ has_cores = true;
+
+ if (depth == 0) {
+ pr_err("%s: cpu-map children should be clusters\n",
+ c->full_name);
+ of_node_put(c);
+ return -EINVAL;
+ }
+
+ if (leaf) {
+ ret = parse_core(c, cluster_id, core_id++);
+ } else {
+ pr_err("%s: Non-leaf cluster with core %s\n",
+ cluster->full_name, name);
+ ret = -EINVAL;
+ }
+
+ of_node_put(c);
+ if (ret != 0)
+ return ret;
+ }
+ i++;
+ } while (c);
+
+ if (leaf && !has_cores)
+ pr_warn("%s: empty cluster\n", cluster->full_name);
+
+ if (leaf)
+ cluster_id++;
+
+ return 0;
+}
+
+static int __init parse_dt_topology(void)
+{
+ struct device_node *cn, *map;
+ int ret = 0;
+ int cpu;
+
+ cn = of_find_node_by_path("/cpus");
+ if (!cn) {
+ pr_err("No CPU information found in DT\n");
+ return 0;
+ }
+
+ /*
+ * When topology is provided cpu-map is essentially a root
+ * cluster with restricted subnodes.
+ */
+ map = of_get_child_by_name(cn, "cpu-map");
+ if (!map)
+ goto out;
+
+ ret = parse_cluster(map, 0);
+ if (ret != 0)
+ goto out_map;
+
+ /*
+ * Check that all cores are in the topology; the SMP code will
+ * only mark cores described in the DT as possible.
+ */
+ for_each_possible_cpu(cpu) {
+ if (cpu_topology[cpu].cluster_id == -1) {
+ pr_err("CPU%d: No topology information specified\n",
+ cpu);
+ ret = -EINVAL;
+ }
+ }
+
+out_map:
+ of_node_put(map);
+out:
+ of_node_put(cn);
+ return ret;
+}
+
/*
* cpu topology table
*/
@@ -39,13 +221,9 @@ static void update_siblings_masks(unsigned int cpuid)
if (cpuid_topo->cluster_id == -1) {
/*
- * DT does not contain topology information for this cpu
- * reset it to default behaviour
+ * DT does not contain topology information for this cpu.
*/
pr_debug("CPU%u: No topology information configured\n", cpuid);
- cpuid_topo->core_id = 0;
- cpumask_set_cpu(cpuid, &cpuid_topo->core_sibling);
- cpumask_set_cpu(cpuid, &cpuid_topo->thread_sibling);
return;
}
@@ -74,22 +252,32 @@ void store_cpu_topology(unsigned int cpuid)
update_siblings_masks(cpuid);
}
-/*
- * init_cpu_topology is called at boot when only one cpu is running
- * which prevent simultaneous write access to cpu_topology array
- */
-void __init init_cpu_topology(void)
+static void __init reset_cpu_topology(void)
{
unsigned int cpu;
- /* init core mask and power*/
for_each_possible_cpu(cpu) {
struct cpu_topology *cpu_topo = &cpu_topology[cpu];
cpu_topo->thread_id = -1;
- cpu_topo->core_id = -1;
+ cpu_topo->core_id = 0;
cpu_topo->cluster_id = -1;
+
cpumask_clear(&cpu_topo->core_sibling);
+ cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
cpumask_clear(&cpu_topo->thread_sibling);
+ cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
}
}
+
+void __init init_cpu_topology(void)
+{
+ reset_cpu_topology();
+
+ /*
+ * Discard anything that was parsed if we hit an error so we
+ * don't use partial information.
+ */
+ if (parse_dt_topology())
+ reset_cpu_topology();
+}
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 7ffadddb645d..c43cfa9b8304 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -251,10 +251,13 @@ void die(const char *str, struct pt_regs *regs, int err)
void arm64_notify_die(const char *str, struct pt_regs *regs,
struct siginfo *info, int err)
{
- if (user_mode(regs))
+ if (user_mode(regs)) {
+ current->thread.fault_address = 0;
+ current->thread.fault_code = err;
force_sig_info(info->si_signo, info, current);
- else
+ } else {
die(str, regs, err);
+ }
}
asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 4ba7a55b49c7..f1e6d5c032e1 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -13,7 +13,7 @@
#define ARM_EXIT_DISCARD(x) x
OUTPUT_ARCH(aarch64)
-ENTRY(stext)
+ENTRY(_text)
jiffies = jiffies_64;