aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r--arch/arm64/kernel/Makefile5
-rw-r--r--arch/arm64/kernel/acpi.c345
-rw-r--r--arch/arm64/kernel/alternative.c55
-rw-r--r--arch/arm64/kernel/asm-offsets.c5
-rw-r--r--arch/arm64/kernel/cpu_errata.c47
-rw-r--r--arch/arm64/kernel/cpu_ops.c2
-rw-r--r--arch/arm64/kernel/cpufeature.c47
-rw-r--r--arch/arm64/kernel/cpuidle.c2
-rw-r--r--arch/arm64/kernel/cpuinfo.c1
-rw-r--r--arch/arm64/kernel/cputable.c33
-rw-r--r--arch/arm64/kernel/entry.S20
-rw-r--r--arch/arm64/kernel/entry32.S18
-rw-r--r--arch/arm64/kernel/head.S261
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c2
-rw-r--r--arch/arm64/kernel/insn.c81
-rw-r--r--arch/arm64/kernel/pci.c25
-rw-r--r--arch/arm64/kernel/perf_event.c78
-rw-r--r--arch/arm64/kernel/psci.c112
-rw-r--r--arch/arm64/kernel/setup.c76
-rw-r--r--arch/arm64/kernel/signal.c7
-rw-r--r--arch/arm64/kernel/smp.c7
-rw-r--r--arch/arm64/kernel/sys32.c1
-rw-r--r--arch/arm64/kernel/time.c7
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S17
24 files changed, 910 insertions, 344 deletions
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 5ee07eee80c2..426d0763c81b 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -12,12 +12,12 @@ CFLAGS_REMOVE_insn.o = -pg
CFLAGS_REMOVE_return_address.o = -pg
# Object file lists.
-arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \
+arm64-obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
entry-fpsimd.o process.o ptrace.o setup.o signal.o \
sys.o stacktrace.o time.o traps.o io.o vdso.o \
hyp-stub.o psci.o psci-call.o cpu_ops.o insn.o \
return_address.o cpuinfo.o cpu_errata.o \
- alternative.o cacheinfo.o
+ cpufeature.o alternative.o cacheinfo.o
arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
sys_compat.o entry32.o \
@@ -35,6 +35,7 @@ arm64-obj-$(CONFIG_KGDB) += kgdb.o
arm64-obj-$(CONFIG_EFI) += efi.o efi-stub.o efi-entry.o
arm64-obj-$(CONFIG_PCI) += pci.o
arm64-obj-$(CONFIG_ARMV8_DEPRECATED) += armv8_deprecated.o
+arm64-obj-$(CONFIG_ACPI) += acpi.o
obj-y += $(arm64-obj-y) vdso/
obj-m += $(arm64-obj-m)
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
new file mode 100644
index 000000000000..8b839558838e
--- /dev/null
+++ b/arch/arm64/kernel/acpi.c
@@ -0,0 +1,345 @@
+/*
+ * ARM64 Specific Low-Level ACPI Boot Support
+ *
+ * Copyright (C) 2013-2014, Linaro Ltd.
+ * Author: Al Stone <al.stone@linaro.org>
+ * Author: Graeme Gregory <graeme.gregory@linaro.org>
+ * Author: Hanjun Guo <hanjun.guo@linaro.org>
+ * Author: Tomasz Nowicki <tomasz.nowicki@linaro.org>
+ * Author: Naresh Bhat <naresh.bhat@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) "ACPI: " fmt
+
+#include <linux/acpi.h>
+#include <linux/bootmem.h>
+#include <linux/cpumask.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/memblock.h>
+#include <linux/of_fdt.h>
+#include <linux/smp.h>
+
+#include <asm/cputype.h>
+#include <asm/cpu_ops.h>
+#include <asm/smp_plat.h>
+
+int acpi_noirq = 1; /* skip ACPI IRQ initialization */
+int acpi_disabled = 1;
+EXPORT_SYMBOL(acpi_disabled);
+
+int acpi_pci_disabled = 1; /* skip ACPI PCI scan and IRQ initialization */
+EXPORT_SYMBOL(acpi_pci_disabled);
+
+/* Processors with enabled flag and sane MPIDR */
+static int enabled_cpus;
+
+/* Boot CPU is valid or not in MADT */
+static bool bootcpu_valid __initdata;
+
+static bool param_acpi_off __initdata;
+static bool param_acpi_force __initdata;
+
+static int __init parse_acpi(char *arg)
+{
+ if (!arg)
+ return -EINVAL;
+
+ /* "acpi=off" disables both ACPI table parsing and interpreter */
+ if (strcmp(arg, "off") == 0)
+ param_acpi_off = true;
+ else if (strcmp(arg, "force") == 0) /* force ACPI to be enabled */
+ param_acpi_force = true;
+ else
+ return -EINVAL; /* Core will print when we return error */
+
+ return 0;
+}
+early_param("acpi", parse_acpi);
+
+static int __init dt_scan_depth1_nodes(unsigned long node,
+ const char *uname, int depth,
+ void *data)
+{
+ /*
+ * Return 1 as soon as we encounter a node at depth 1 that is
+ * not the /chosen node.
+ */
+ if (depth == 1 && (strcmp(uname, "chosen") != 0))
+ return 1;
+ return 0;
+}
+
+/*
+ * __acpi_map_table() will be called before page_init(), so early_ioremap()
+ * or early_memremap() should be called here to for ACPI table mapping.
+ */
+char *__init __acpi_map_table(unsigned long phys, unsigned long size)
+{
+ if (!size)
+ return NULL;
+
+ return early_memremap(phys, size);
+}
+
+void __init __acpi_unmap_table(char *map, unsigned long size)
+{
+ if (!map || !size)
+ return;
+
+ early_memunmap(map, size);
+}
+
+/**
+ * acpi_map_gic_cpu_interface - generates a logical cpu number
+ * and map to MPIDR represented by GICC structure
+ */
+static void __init
+acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
+{
+ int i;
+ u64 mpidr = processor->arm_mpidr & MPIDR_HWID_BITMASK;
+ bool enabled = !!(processor->flags & ACPI_MADT_ENABLED);
+
+ if (mpidr == INVALID_HWID) {
+ pr_info("Skip MADT cpu entry with invalid MPIDR\n");
+ return;
+ }
+
+ total_cpus++;
+ if (!enabled)
+ return;
+
+ if (enabled_cpus >= NR_CPUS) {
+ pr_warn("NR_CPUS limit of %d reached, Processor %d/0x%llx ignored.\n",
+ NR_CPUS, total_cpus, mpidr);
+ return;
+ }
+
+ /* Check if GICC structure of boot CPU is available in the MADT */
+ if (cpu_logical_map(0) == mpidr) {
+ if (bootcpu_valid) {
+ pr_err("Firmware bug, duplicate CPU MPIDR: 0x%llx in MADT\n",
+ mpidr);
+ return;
+ }
+
+ bootcpu_valid = true;
+ }
+
+ /*
+ * Duplicate MPIDRs are a recipe for disaster. Scan
+ * all initialized entries and check for
+ * duplicates. If any is found just ignore the CPU.
+ */
+ for (i = 1; i < enabled_cpus; i++) {
+ if (cpu_logical_map(i) == mpidr) {
+ pr_err("Firmware bug, duplicate CPU MPIDR: 0x%llx in MADT\n",
+ mpidr);
+ return;
+ }
+ }
+
+ if (!acpi_psci_present())
+ return;
+
+ cpu_ops[enabled_cpus] = cpu_get_ops("psci");
+ /* CPU 0 was already initialized */
+ if (enabled_cpus) {
+ if (!cpu_ops[enabled_cpus])
+ return;
+
+ if (cpu_ops[enabled_cpus]->cpu_init(NULL, enabled_cpus))
+ return;
+
+ /* map the logical cpu id to cpu MPIDR */
+ cpu_logical_map(enabled_cpus) = mpidr;
+ }
+
+ enabled_cpus++;
+}
+
+static int __init
+acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header,
+ const unsigned long end)
+{
+ struct acpi_madt_generic_interrupt *processor;
+
+ processor = (struct acpi_madt_generic_interrupt *)header;
+
+ if (BAD_MADT_ENTRY(processor, end))
+ return -EINVAL;
+
+ acpi_table_print_madt_entry(header);
+ acpi_map_gic_cpu_interface(processor);
+ return 0;
+}
+
+/* Parse GIC cpu interface entries in MADT for SMP init */
+void __init acpi_init_cpus(void)
+{
+ int count, i;
+
+ /*
+ * do a partial walk of MADT to determine how many CPUs
+ * we have including disabled CPUs, and get information
+ * we need for SMP init
+ */
+ count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
+ acpi_parse_gic_cpu_interface, 0);
+
+ if (!count) {
+ pr_err("No GIC CPU interface entries present\n");
+ return;
+ } else if (count < 0) {
+ pr_err("Error parsing GIC CPU interface entry\n");
+ return;
+ }
+
+ if (!bootcpu_valid) {
+ pr_err("MADT missing boot CPU MPIDR, not enabling secondaries\n");
+ return;
+ }
+
+ for (i = 0; i < enabled_cpus; i++)
+ set_cpu_possible(i, true);
+
+ /* Make boot-up look pretty */
+ pr_info("%d CPUs enabled, %d CPUs total\n", enabled_cpus, total_cpus);
+}
+
+/*
+ * acpi_fadt_sanity_check() - Check FADT presence and carry out sanity
+ * checks on it
+ *
+ * Return 0 on success, <0 on failure
+ */
+static int __init acpi_fadt_sanity_check(void)
+{
+ struct acpi_table_header *table;
+ struct acpi_table_fadt *fadt;
+ acpi_status status;
+ acpi_size tbl_size;
+ int ret = 0;
+
+ /*
+ * FADT is required on arm64; retrieve it to check its presence
+ * and carry out revision and ACPI HW reduced compliancy tests
+ */
+ status = acpi_get_table_with_size(ACPI_SIG_FADT, 0, &table, &tbl_size);
+ if (ACPI_FAILURE(status)) {
+ const char *msg = acpi_format_exception(status);
+
+ pr_err("Failed to get FADT table, %s\n", msg);
+ return -ENODEV;
+ }
+
+ fadt = (struct acpi_table_fadt *)table;
+
+ /*
+ * Revision in table header is the FADT Major revision, and there
+ * is a minor revision of FADT which was introduced by ACPI 5.1,
+ * we only deal with ACPI 5.1 or newer revision to get GIC and SMP
+ * boot protocol configuration data.
+ */
+ if (table->revision < 5 ||
+ (table->revision == 5 && fadt->minor_revision < 1)) {
+ pr_err("Unsupported FADT revision %d.%d, should be 5.1+\n",
+ table->revision, fadt->minor_revision);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!(fadt->flags & ACPI_FADT_HW_REDUCED)) {
+ pr_err("FADT not ACPI hardware reduced compliant\n");
+ ret = -EINVAL;
+ }
+
+out:
+ /*
+ * acpi_get_table_with_size() creates FADT table mapping that
+ * should be released after parsing and before resuming boot
+ */
+ early_acpi_os_unmap_memory(table, tbl_size);
+ return ret;
+}
+
+/*
+ * acpi_boot_table_init() called from setup_arch(), always.
+ * 1. find RSDP and get its address, and then find XSDT
+ * 2. extract all tables and checksums them all
+ * 3. check ACPI FADT revision
+ * 4. check ACPI FADT HW reduced flag
+ *
+ * We can parse ACPI boot-time tables such as MADT after
+ * this function is called.
+ *
+ * On return ACPI is enabled if either:
+ *
+ * - ACPI tables are initialized and sanity checks passed
+ * - acpi=force was passed in the command line and ACPI was not disabled
+ * explicitly through acpi=off command line parameter
+ *
+ * ACPI is disabled on function return otherwise
+ */
+void __init acpi_boot_table_init(void)
+{
+ /*
+ * Enable ACPI instead of device tree unless
+ * - ACPI has been disabled explicitly (acpi=off), or
+ * - the device tree is not empty (it has more than just a /chosen node)
+ * and ACPI has not been force enabled (acpi=force)
+ */
+ if (param_acpi_off ||
+ (!param_acpi_force && of_scan_flat_dt(dt_scan_depth1_nodes, NULL)))
+ return;
+
+ /*
+ * ACPI is disabled at this point. Enable it in order to parse
+ * the ACPI tables and carry out sanity checks
+ */
+ enable_acpi();
+
+ /*
+ * If ACPI tables are initialized and FADT sanity checks passed,
+ * leave ACPI enabled and carry on booting; otherwise disable ACPI
+ * on initialization error.
+ * If acpi=force was passed on the command line it forces ACPI
+ * to be enabled even if its initialization failed.
+ */
+ if (acpi_table_init() || acpi_fadt_sanity_check()) {
+ pr_err("Failed to init ACPI tables\n");
+ if (!param_acpi_force)
+ disable_acpi();
+ }
+}
+
+void __init acpi_gic_init(void)
+{
+ struct acpi_table_header *table;
+ acpi_status status;
+ acpi_size tbl_size;
+ int err;
+
+ if (acpi_disabled)
+ return;
+
+ status = acpi_get_table_with_size(ACPI_SIG_MADT, 0, &table, &tbl_size);
+ if (ACPI_FAILURE(status)) {
+ const char *msg = acpi_format_exception(status);
+
+ pr_err("Failed to get MADT table, %s\n", msg);
+ return;
+ }
+
+ err = gic_v2_acpi_init(table);
+ if (err)
+ pr_err("Failed to initialize GIC IRQ controller");
+
+ early_acpi_os_unmap_memory((char *)table, tbl_size);
+}
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
index ad7821d64a1d..21033bba9390 100644
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
@@ -24,6 +24,7 @@
#include <asm/cacheflush.h>
#include <asm/alternative.h>
#include <asm/cpufeature.h>
+#include <asm/insn.h>
#include <linux/stop_machine.h>
extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
@@ -33,6 +34,48 @@ struct alt_region {
struct alt_instr *end;
};
+/*
+ * Decode the imm field of a b/bl instruction, and return the byte
+ * offset as a signed value (so it can be used when computing a new
+ * branch target).
+ */
+static s32 get_branch_offset(u32 insn)
+{
+ s32 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
+
+ /* sign-extend the immediate before turning it into a byte offset */
+ return (imm << 6) >> 4;
+}
+
+static u32 get_alt_insn(u8 *insnptr, u8 *altinsnptr)
+{
+ u32 insn;
+
+ aarch64_insn_read(altinsnptr, &insn);
+
+ /* Stop the world on instructions we don't support... */
+ BUG_ON(aarch64_insn_is_cbz(insn));
+ BUG_ON(aarch64_insn_is_cbnz(insn));
+ BUG_ON(aarch64_insn_is_bcond(insn));
+ /* ... and there is probably more. */
+
+ if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
+ enum aarch64_insn_branch_type type;
+ unsigned long target;
+
+ if (aarch64_insn_is_b(insn))
+ type = AARCH64_INSN_BRANCH_NOLINK;
+ else
+ type = AARCH64_INSN_BRANCH_LINK;
+
+ target = (unsigned long)altinsnptr + get_branch_offset(insn);
+ insn = aarch64_insn_gen_branch_imm((unsigned long)insnptr,
+ target, type);
+ }
+
+ return insn;
+}
+
static int __apply_alternatives(void *alt_region)
{
struct alt_instr *alt;
@@ -40,16 +83,24 @@ static int __apply_alternatives(void *alt_region)
u8 *origptr, *replptr;
for (alt = region->begin; alt < region->end; alt++) {
+ u32 insn;
+ int i;
+
if (!cpus_have_cap(alt->cpufeature))
continue;
- BUG_ON(alt->alt_len > alt->orig_len);
+ BUG_ON(alt->alt_len != alt->orig_len);
pr_info_once("patching kernel code\n");
origptr = (u8 *)&alt->orig_offset + alt->orig_offset;
replptr = (u8 *)&alt->alt_offset + alt->alt_offset;
- memcpy(origptr, replptr, alt->alt_len);
+
+ for (i = 0; i < alt->alt_len; i += sizeof(insn)) {
+ insn = get_alt_insn(origptr + i, replptr + i);
+ aarch64_insn_write(origptr + i, insn);
+ }
+
flush_icache_range((uintptr_t)origptr,
(uintptr_t)(origptr + alt->alt_len));
}
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index f7fa65d4c352..da675cc5dfae 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -24,7 +24,6 @@
#include <linux/kvm_host.h>
#include <asm/thread_info.h>
#include <asm/memory.h>
-#include <asm/cputable.h>
#include <asm/smp_plat.h>
#include <asm/suspend.h>
#include <asm/vdso_datapage.h>
@@ -38,7 +37,6 @@ int main(void)
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
DEFINE(TI_TASK, offsetof(struct thread_info, task));
- DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
BLANK();
DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context));
@@ -71,9 +69,6 @@ int main(void)
BLANK();
DEFINE(PAGE_SZ, PAGE_SIZE);
BLANK();
- DEFINE(CPU_INFO_SZ, sizeof(struct cpu_info));
- DEFINE(CPU_INFO_SETUP, offsetof(struct cpu_info, cpu_setup));
- BLANK();
DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL);
DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE);
DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE);
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index fa62637e63a8..6ffd91438560 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -16,8 +16,6 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#define pr_fmt(fmt) "alternatives: " fmt
-
#include <linux/types.h>
#include <asm/cpu.h>
#include <asm/cputype.h>
@@ -26,27 +24,11 @@
#define MIDR_CORTEX_A53 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
#define MIDR_CORTEX_A57 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
-/*
- * Add a struct or another datatype to the union below if you need
- * different means to detect an affected CPU.
- */
-struct arm64_cpu_capabilities {
- const char *desc;
- u16 capability;
- bool (*is_affected)(struct arm64_cpu_capabilities *);
- union {
- struct {
- u32 midr_model;
- u32 midr_range_min, midr_range_max;
- };
- };
-};
-
#define CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \
MIDR_ARCHITECTURE_MASK)
static bool __maybe_unused
-is_affected_midr_range(struct arm64_cpu_capabilities *entry)
+is_affected_midr_range(const struct arm64_cpu_capabilities *entry)
{
u32 midr = read_cpuid_id();
@@ -59,12 +41,12 @@ is_affected_midr_range(struct arm64_cpu_capabilities *entry)
}
#define MIDR_RANGE(model, min, max) \
- .is_affected = is_affected_midr_range, \
+ .matches = is_affected_midr_range, \
.midr_model = model, \
.midr_range_min = min, \
.midr_range_max = max
-struct arm64_cpu_capabilities arm64_errata[] = {
+const struct arm64_cpu_capabilities arm64_errata[] = {
#if defined(CONFIG_ARM64_ERRATUM_826319) || \
defined(CONFIG_ARM64_ERRATUM_827319) || \
defined(CONFIG_ARM64_ERRATUM_824069)
@@ -88,7 +70,16 @@ struct arm64_cpu_capabilities arm64_errata[] = {
/* Cortex-A57 r0p0 - r1p2 */
.desc = "ARM erratum 832075",
.capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
- MIDR_RANGE(MIDR_CORTEX_A57, 0x00, 0x12),
+ MIDR_RANGE(MIDR_CORTEX_A57, 0x00,
+ (1 << MIDR_VARIANT_SHIFT) | 2),
+ },
+#endif
+#ifdef CONFIG_ARM64_ERRATUM_845719
+ {
+ /* Cortex-A53 r0p[01234] */
+ .desc = "ARM erratum 845719",
+ .capability = ARM64_WORKAROUND_845719,
+ MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04),
},
#endif
{
@@ -97,15 +88,5 @@ struct arm64_cpu_capabilities arm64_errata[] = {
void check_local_cpu_errata(void)
{
- struct arm64_cpu_capabilities *cpus = arm64_errata;
- int i;
-
- for (i = 0; cpus[i].desc; i++) {
- if (!cpus[i].is_affected(&cpus[i]))
- continue;
-
- if (!cpus_have_cap(cpus[i].capability))
- pr_info("enabling workaround for %s\n", cpus[i].desc);
- cpus_set_cap(cpus[i].capability);
- }
+ check_cpu_capabilities(arm64_errata, "enabling workaround for");
}
diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c
index cce952440c64..fb8ff9ba467a 100644
--- a/arch/arm64/kernel/cpu_ops.c
+++ b/arch/arm64/kernel/cpu_ops.c
@@ -35,7 +35,7 @@ static const struct cpu_operations *supported_cpu_ops[] __initconst = {
NULL,
};
-static const struct cpu_operations * __init cpu_get_ops(const char *name)
+const struct cpu_operations * __init cpu_get_ops(const char *name)
{
const struct cpu_operations **ops = supported_cpu_ops;
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
new file mode 100644
index 000000000000..3d9967e43d89
--- /dev/null
+++ b/arch/arm64/kernel/cpufeature.c
@@ -0,0 +1,47 @@
+/*
+ * Contains CPU feature definitions
+ *
+ * Copyright (C) 2015 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "alternatives: " fmt
+
+#include <linux/types.h>
+#include <asm/cpu.h>
+#include <asm/cpufeature.h>
+
+static const struct arm64_cpu_capabilities arm64_features[] = {
+ {},
+};
+
+void check_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
+ const char *info)
+{
+ int i;
+
+ for (i = 0; caps[i].desc; i++) {
+ if (!caps[i].matches(&caps[i]))
+ continue;
+
+ if (!cpus_have_cap(caps[i].capability))
+ pr_info("%s %s\n", info, caps[i].desc);
+ cpus_set_cap(caps[i].capability);
+ }
+}
+
+void check_local_cpu_features(void)
+{
+ check_cpu_capabilities(arm64_features, "detected feature");
+}
diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c
index 5c0896647fd1..a78143a5c99f 100644
--- a/arch/arm64/kernel/cpuidle.c
+++ b/arch/arm64/kernel/cpuidle.c
@@ -15,7 +15,7 @@
#include <asm/cpuidle.h>
#include <asm/cpu_ops.h>
-int cpu_init_idle(unsigned int cpu)
+int arm_cpuidle_init(unsigned int cpu)
{
int ret = -EOPNOTSUPP;
struct device_node *cpu_node = of_cpu_device_node_get(cpu);
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 929855691dae..75d5a867e7fb 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -236,6 +236,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
cpuinfo_detect_icache_policy(info);
check_local_cpu_errata();
+ check_local_cpu_features();
update_cpu_features(info);
}
diff --git a/arch/arm64/kernel/cputable.c b/arch/arm64/kernel/cputable.c
deleted file mode 100644
index fd3993cb060f..000000000000
--- a/arch/arm64/kernel/cputable.c
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * arch/arm64/kernel/cputable.c
- *
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/init.h>
-
-#include <asm/cputable.h>
-
-extern unsigned long __cpu_setup(void);
-
-struct cpu_info cpu_table[] = {
- {
- .cpu_id_val = 0x000f0000,
- .cpu_id_mask = 0x000f0000,
- .cpu_name = "AArch64 Processor",
- .cpu_setup = __cpu_setup,
- },
- { /* Empty */ },
-};
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index cf21bb3bf752..959fe8733560 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -21,8 +21,10 @@
#include <linux/init.h>
#include <linux/linkage.h>
+#include <asm/alternative-asm.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
+#include <asm/cpufeature.h>
#include <asm/errno.h>
#include <asm/esr.h>
#include <asm/thread_info.h>
@@ -120,6 +122,24 @@
ct_user_enter
ldr x23, [sp, #S_SP] // load return stack pointer
msr sp_el0, x23
+
+#ifdef CONFIG_ARM64_ERRATUM_845719
+ alternative_insn \
+ "nop", \
+ "tbz x22, #4, 1f", \
+ ARM64_WORKAROUND_845719
+#ifdef CONFIG_PID_IN_CONTEXTIDR
+ alternative_insn \
+ "nop; nop", \
+ "mrs x29, contextidr_el1; msr contextidr_el1, x29; 1:", \
+ ARM64_WORKAROUND_845719
+#else
+ alternative_insn \
+ "nop", \
+ "msr contextidr_el1, xzr; 1:", \
+ ARM64_WORKAROUND_845719
+#endif
+#endif
.endif
msr elr_el1, x21 // set up the return data
msr spsr_el1, x22
diff --git a/arch/arm64/kernel/entry32.S b/arch/arm64/kernel/entry32.S
index 9a8f6ae2530e..bd9bfaa9269b 100644
--- a/arch/arm64/kernel/entry32.S
+++ b/arch/arm64/kernel/entry32.S
@@ -19,9 +19,12 @@
*/
#include <linux/linkage.h>
+#include <linux/const.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
+#include <asm/errno.h>
+#include <asm/page.h>
/*
* System call wrappers for the AArch32 compatibility layer.
@@ -54,6 +57,21 @@ ENTRY(compat_sys_fstatfs64_wrapper)
ENDPROC(compat_sys_fstatfs64_wrapper)
/*
+ * Note: off_4k (w5) is always in units of 4K. If we can't do the
+ * requested offset because it is not page-aligned, we return -EINVAL.
+ */
+ENTRY(compat_sys_mmap2_wrapper)
+#if PAGE_SHIFT > 12
+ tst w5, #~PAGE_MASK >> 12
+ b.ne 1f
+ lsr w5, w5, #PAGE_SHIFT - 12
+#endif
+ b sys_mmap_pgoff
+1: mov x0, #-EINVAL
+ ret
+ENDPROC(compat_sys_mmap2_wrapper)
+
+/*
* Wrappers for AArch32 syscalls that either take 64-bit parameters
* in registers or that take 32-bit parameters which require sign
* extension.
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 07f930540f4a..19f915e8f6e0 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -36,7 +36,7 @@
#include <asm/page.h>
#include <asm/virt.h>
-#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET)
+#define __PHYS_OFFSET (KERNEL_START - TEXT_OFFSET)
#if (TEXT_OFFSET & 0xfff) != 0
#error TEXT_OFFSET must be at least 4KB aligned
@@ -46,13 +46,6 @@
#error TEXT_OFFSET must be less than 2MB
#endif
- .macro pgtbl, ttb0, ttb1, virt_to_phys
- ldr \ttb1, =swapper_pg_dir
- ldr \ttb0, =idmap_pg_dir
- add \ttb1, \ttb1, \virt_to_phys
- add \ttb0, \ttb0, \virt_to_phys
- .endm
-
#ifdef CONFIG_ARM64_64K_PAGES
#define BLOCK_SHIFT PAGE_SHIFT
#define BLOCK_SIZE PAGE_SIZE
@@ -63,7 +56,7 @@
#define TABLE_SHIFT PUD_SHIFT
#endif
-#define KERNEL_START KERNEL_RAM_VADDR
+#define KERNEL_START _text
#define KERNEL_END _end
/*
@@ -240,40 +233,43 @@ section_table:
#endif
ENTRY(stext)
- mov x21, x0 // x21=FDT
+ bl preserve_boot_args
bl el2_setup // Drop to EL1, w20=cpu_boot_mode
- bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET
+ adrp x24, __PHYS_OFFSET
bl set_cpu_boot_mode_flag
- mrs x22, midr_el1 // x22=cpuid
- mov x0, x22
- bl lookup_processor_type
- mov x23, x0 // x23=current cpu_table
- /*
- * __error_p may end up out of range for cbz if text areas are
- * aligned up to section sizes.
- */
- cbnz x23, 1f // invalid processor (x23=0)?
- b __error_p
-1:
+
bl __vet_fdt
bl __create_page_tables // x25=TTBR0, x26=TTBR1
/*
- * The following calls CPU specific code in a position independent
- * manner. See arch/arm64/mm/proc.S for details. x23 = base of
- * cpu_info structure selected by lookup_processor_type above.
+ * The following calls CPU setup code, see arch/arm64/mm/proc.S for
+ * details.
* On return, the CPU will be ready for the MMU to be turned on and
* the TCR will have been set.
*/
- ldr x27, __switch_data // address to jump to after
+ ldr x27, =__mmap_switched // address to jump to after
// MMU has been enabled
- adrp lr, __enable_mmu // return (PIC) address
- add lr, lr, #:lo12:__enable_mmu
- ldr x12, [x23, #CPU_INFO_SETUP]
- add x12, x12, x28 // __virt_to_phys
- br x12 // initialise processor
+ adr_l lr, __enable_mmu // return (PIC) address
+ b __cpu_setup // initialise processor
ENDPROC(stext)
/*
+ * Preserve the arguments passed by the bootloader in x0 .. x3
+ */
+preserve_boot_args:
+ mov x21, x0 // x21=FDT
+
+ adr_l x0, boot_args // record the contents of
+ stp x21, x1, [x0] // x0 .. x3 at kernel entry
+ stp x2, x3, [x0, #16]
+
+ dmb sy // needed before dc ivac with
+ // MMU off
+
+ add x1, x0, #0x20 // 4 x 8 bytes
+ b __inval_cache_range // tail call
+ENDPROC(preserve_boot_args)
+
+/*
* Determine validity of the x21 FDT pointer.
* The dtb must be 8-byte aligned and live in the first 512M of memory.
*/
@@ -356,7 +352,8 @@ ENDPROC(__vet_fdt)
* - pgd entry for fixed mappings (TTBR1)
*/
__create_page_tables:
- pgtbl x25, x26, x28 // idmap_pg_dir and swapper_pg_dir addresses
+ adrp x25, idmap_pg_dir
+ adrp x26, swapper_pg_dir
mov x27, lr
/*
@@ -385,12 +382,50 @@ __create_page_tables:
* Create the identity mapping.
*/
mov x0, x25 // idmap_pg_dir
- ldr x3, =KERNEL_START
- add x3, x3, x28 // __pa(KERNEL_START)
+ adrp x3, KERNEL_START // __pa(KERNEL_START)
+
+#ifndef CONFIG_ARM64_VA_BITS_48
+#define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3)
+#define EXTRA_PTRS (1 << (48 - EXTRA_SHIFT))
+
+ /*
+ * If VA_BITS < 48, it may be too small to allow for an ID mapping to be
+ * created that covers system RAM if that is located sufficiently high
+ * in the physical address space. So for the ID map, use an extended
+ * virtual range in that case, by configuring an additional translation
+ * level.
+ * First, we have to verify our assumption that the current value of
+ * VA_BITS was chosen such that all translation levels are fully
+ * utilised, and that lowering T0SZ will always result in an additional
+ * translation level to be configured.
+ */
+#if VA_BITS != EXTRA_SHIFT
+#error "Mismatch between VA_BITS and page size/number of translation levels"
+#endif
+
+ /*
+ * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the
+ * entire kernel image can be ID mapped. As T0SZ == (64 - #bits used),
+ * this number conveniently equals the number of leading zeroes in
+ * the physical address of KERNEL_END.
+ */
+ adrp x5, KERNEL_END
+ clz x5, x5
+ cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough?
+ b.ge 1f // .. then skip additional level
+
+ adr_l x6, idmap_t0sz
+ str x5, [x6]
+ dmb sy
+ dc ivac, x6 // Invalidate potentially stale cache line
+
+ create_table_entry x0, x3, EXTRA_SHIFT, EXTRA_PTRS, x5, x6
+1:
+#endif
+
create_pgd_entry x0, x3, x5, x6
- ldr x6, =KERNEL_END
mov x5, x3 // __pa(KERNEL_START)
- add x6, x6, x28 // __pa(KERNEL_END)
+ adr_l x6, KERNEL_END // __pa(KERNEL_END)
create_block_map x0, x7, x3, x5, x6
/*
@@ -399,7 +434,7 @@ __create_page_tables:
mov x0, x26 // swapper_pg_dir
mov x5, #PAGE_OFFSET
create_pgd_entry x0, x5, x3, x6
- ldr x6, =KERNEL_END
+ ldr x6, =KERNEL_END // __va(KERNEL_END)
mov x3, x24 // phys offset
create_block_map x0, x7, x3, x5, x6
@@ -426,6 +461,7 @@ __create_page_tables:
*/
mov x0, x25
add x1, x26, #SWAPPER_DIR_SIZE
+ dmb sy
bl __inval_cache_range
mov lr, x27
@@ -433,37 +469,22 @@ __create_page_tables:
ENDPROC(__create_page_tables)
.ltorg
- .align 3
- .type __switch_data, %object
-__switch_data:
- .quad __mmap_switched
- .quad __bss_start // x6
- .quad __bss_stop // x7
- .quad processor_id // x4
- .quad __fdt_pointer // x5
- .quad memstart_addr // x6
- .quad init_thread_union + THREAD_START_SP // sp
-
/*
- * The following fragment of code is executed with the MMU on in MMU mode, and
- * uses absolute addresses; this is not position independent.
+ * The following fragment of code is executed with the MMU enabled.
*/
+ .set initial_sp, init_thread_union + THREAD_START_SP
__mmap_switched:
- adr x3, __switch_data + 8
+ adr_l x6, __bss_start
+ adr_l x7, __bss_stop
- ldp x6, x7, [x3], #16
1: cmp x6, x7
b.hs 2f
str xzr, [x6], #8 // Clear BSS
b 1b
2:
- ldp x4, x5, [x3], #16
- ldr x6, [x3], #8
- ldr x16, [x3]
- mov sp, x16
- str x22, [x4] // Save processor ID
- str x21, [x5] // Save FDT pointer
- str x24, [x6] // Save PHYS_OFFSET
+ adr_l sp, initial_sp, x4
+ str_l x21, __fdt_pointer, x5 // Save FDT pointer
+ str_l x24, memstart_addr, x6 // Save PHYS_OFFSET
mov x29, #0
b start_kernel
ENDPROC(__mmap_switched)
@@ -566,8 +587,7 @@ ENDPROC(el2_setup)
* in x20. See arch/arm64/include/asm/virt.h for more info.
*/
ENTRY(set_cpu_boot_mode_flag)
- ldr x1, =__boot_cpu_mode // Compute __boot_cpu_mode
- add x1, x1, x28
+ adr_l x1, __boot_cpu_mode
cmp w20, #BOOT_CPU_MODE_EL2
b.ne 1f
add x1, x1, #4
@@ -588,29 +608,21 @@ ENDPROC(set_cpu_boot_mode_flag)
.align L1_CACHE_SHIFT
ENTRY(__boot_cpu_mode)
.long BOOT_CPU_MODE_EL2
- .long 0
+ .long BOOT_CPU_MODE_EL1
.popsection
#ifdef CONFIG_SMP
- .align 3
-1: .quad .
- .quad secondary_holding_pen_release
-
/*
* This provides a "holding pen" for platforms to hold all secondary
* cores are held until we're ready for them to initialise.
*/
ENTRY(secondary_holding_pen)
bl el2_setup // Drop to EL1, w20=cpu_boot_mode
- bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET
bl set_cpu_boot_mode_flag
mrs x0, mpidr_el1
ldr x1, =MPIDR_HWID_BITMASK
and x0, x0, x1
- adr x1, 1b
- ldp x2, x3, [x1]
- sub x1, x1, x2
- add x3, x3, x1
+ adr_l x3, secondary_holding_pen_release
pen: ldr x4, [x3]
cmp x4, x0
b.eq secondary_startup
@@ -624,7 +636,6 @@ ENDPROC(secondary_holding_pen)
*/
ENTRY(secondary_entry)
bl el2_setup // Drop to EL1
- bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET
bl set_cpu_boot_mode_flag
b secondary_startup
ENDPROC(secondary_entry)
@@ -633,16 +644,9 @@ ENTRY(secondary_startup)
/*
* Common entry point for secondary CPUs.
*/
- mrs x22, midr_el1 // x22=cpuid
- mov x0, x22
- bl lookup_processor_type
- mov x23, x0 // x23=current cpu_table
- cbz x23, __error_p // invalid processor (x23=0)?
-
- pgtbl x25, x26, x28 // x25=TTBR0, x26=TTBR1
- ldr x12, [x23, #CPU_INFO_SETUP]
- add x12, x12, x28 // __virt_to_phys
- blr x12 // initialise processor
+ adrp x25, idmap_pg_dir
+ adrp x26, swapper_pg_dir
+ bl __cpu_setup // initialise processor
ldr x21, =secondary_data
ldr x27, =__secondary_switched // address to jump to after enabling the MMU
@@ -658,11 +662,12 @@ ENDPROC(__secondary_switched)
#endif /* CONFIG_SMP */
/*
- * Setup common bits before finally enabling the MMU. Essentially this is just
- * loading the page table pointer and vector base registers.
+ * Enable the MMU.
*
- * On entry to this code, x0 must contain the SCTLR_EL1 value for turning on
- * the MMU.
+ * x0 = SCTLR_EL1 value for turning on the MMU.
+ * x27 = *virtual* address to jump to upon completion
+ *
+ * other registers depend on the function called upon completion
*/
__enable_mmu:
ldr x5, =vectors
@@ -670,89 +675,7 @@ __enable_mmu:
msr ttbr0_el1, x25 // load TTBR0
msr ttbr1_el1, x26 // load TTBR1
isb
- b __turn_mmu_on
-ENDPROC(__enable_mmu)
-
-/*
- * Enable the MMU. This completely changes the structure of the visible memory
- * space. You will not be able to trace execution through this.
- *
- * x0 = system control register
- * x27 = *virtual* address to jump to upon completion
- *
- * other registers depend on the function called upon completion
- *
- * We align the entire function to the smallest power of two larger than it to
- * ensure it fits within a single block map entry. Otherwise were PHYS_OFFSET
- * close to the end of a 512MB or 1GB block we might require an additional
- * table to map the entire function.
- */
- .align 4
-__turn_mmu_on:
msr sctlr_el1, x0
isb
br x27
-ENDPROC(__turn_mmu_on)
-
-/*
- * Calculate the start of physical memory.
- */
-__calc_phys_offset:
- adr x0, 1f
- ldp x1, x2, [x0]
- sub x28, x0, x1 // x28 = PHYS_OFFSET - PAGE_OFFSET
- add x24, x2, x28 // x24 = PHYS_OFFSET
- ret
-ENDPROC(__calc_phys_offset)
-
- .align 3
-1: .quad .
- .quad PAGE_OFFSET
-
-/*
- * Exception handling. Something went wrong and we can't proceed. We ought to
- * tell the user, but since we don't have any guarantee that we're even
- * running on the right architecture, we do virtually nothing.
- */
-__error_p:
-ENDPROC(__error_p)
-
-__error:
-1: nop
- b 1b
-ENDPROC(__error)
-
-/*
- * This function gets the processor ID in w0 and searches the cpu_table[] for
- * a match. It returns a pointer to the struct cpu_info it found. The
- * cpu_table[] must end with an empty (all zeros) structure.
- *
- * This routine can be called via C code and it needs to work with the MMU
- * both disabled and enabled (the offset is calculated automatically).
- */
-ENTRY(lookup_processor_type)
- adr x1, __lookup_processor_type_data
- ldp x2, x3, [x1]
- sub x1, x1, x2 // get offset between VA and PA
- add x3, x3, x1 // convert VA to PA
-1:
- ldp w5, w6, [x3] // load cpu_id_val and cpu_id_mask
- cbz w5, 2f // end of list?
- and w6, w6, w0
- cmp w5, w6
- b.eq 3f
- add x3, x3, #CPU_INFO_SZ
- b 1b
-2:
- mov x3, #0 // unknown processor
-3:
- mov x0, x3
- ret
-ENDPROC(lookup_processor_type)
-
- .align 3
- .type __lookup_processor_type_data, %object
-__lookup_processor_type_data:
- .quad .
- .quad cpu_table
- .size __lookup_processor_type_data, . - __lookup_processor_type_data
+ENDPROC(__enable_mmu)
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 98bbe06e469c..e7d934d3afe0 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -527,7 +527,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
* Disallow per-task kernel breakpoints since these would
* complicate the stepping code.
*/
- if (info->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.bp_target)
+ if (info->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.target)
return -EINVAL;
return 0;
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index c8eca88f12e6..924902083e47 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -265,23 +265,13 @@ int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
return aarch64_insn_patch_text_sync(addrs, insns, cnt);
}
-u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
- u32 insn, u64 imm)
+static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
+ u32 *maskp, int *shiftp)
{
- u32 immlo, immhi, lomask, himask, mask;
+ u32 mask;
int shift;
switch (type) {
- case AARCH64_INSN_IMM_ADR:
- lomask = 0x3;
- himask = 0x7ffff;
- immlo = imm & lomask;
- imm >>= 2;
- immhi = imm & himask;
- imm = (immlo << 24) | (immhi);
- mask = (lomask << 24) | (himask);
- shift = 5;
- break;
case AARCH64_INSN_IMM_26:
mask = BIT(26) - 1;
shift = 0;
@@ -320,9 +310,68 @@ u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
shift = 16;
break;
default:
- pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
- type);
- return 0;
+ return -EINVAL;
+ }
+
+ *maskp = mask;
+ *shiftp = shift;
+
+ return 0;
+}
+
+#define ADR_IMM_HILOSPLIT 2
+#define ADR_IMM_SIZE SZ_2M
+#define ADR_IMM_LOMASK ((1 << ADR_IMM_HILOSPLIT) - 1)
+#define ADR_IMM_HIMASK ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
+#define ADR_IMM_LOSHIFT 29
+#define ADR_IMM_HISHIFT 5
+
+u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn)
+{
+ u32 immlo, immhi, mask;
+ int shift;
+
+ switch (type) {
+ case AARCH64_INSN_IMM_ADR:
+ shift = 0;
+ immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK;
+ immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK;
+ insn = (immhi << ADR_IMM_HILOSPLIT) | immlo;
+ mask = ADR_IMM_SIZE - 1;
+ break;
+ default:
+ if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
+ pr_err("aarch64_insn_decode_immediate: unknown immediate encoding %d\n",
+ type);
+ return 0;
+ }
+ }
+
+ return (insn >> shift) & mask;
+}
+
+u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
+ u32 insn, u64 imm)
+{
+ u32 immlo, immhi, mask;
+ int shift;
+
+ switch (type) {
+ case AARCH64_INSN_IMM_ADR:
+ shift = 0;
+ immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT;
+ imm >>= ADR_IMM_HILOSPLIT;
+ immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT;
+ imm = immlo | immhi;
+ mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) |
+ (ADR_IMM_HIMASK << ADR_IMM_HISHIFT));
+ break;
+ default:
+ if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
+ pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
+ type);
+ return 0;
+ }
}
/* Update the immediate field. */
diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c
index 6f93c24ca801..4095379dc069 100644
--- a/arch/arm64/kernel/pci.c
+++ b/arch/arm64/kernel/pci.c
@@ -10,6 +10,7 @@
*
*/
+#include <linux/acpi.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -46,3 +47,27 @@ int pcibios_add_device(struct pci_dev *dev)
return 0;
}
+
+/*
+ * raw_pci_read/write - Platform-specific PCI config space access.
+ */
+int raw_pci_read(unsigned int domain, unsigned int bus,
+ unsigned int devfn, int reg, int len, u32 *val)
+{
+ return -ENXIO;
+}
+
+int raw_pci_write(unsigned int domain, unsigned int bus,
+ unsigned int devfn, int reg, int len, u32 val)
+{
+ return -ENXIO;
+}
+
+#ifdef CONFIG_ACPI
+/* Root bridge scanning */
+struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
+{
+ /* TODO: Should be revisited when implementing PCI on ACPI */
+ return NULL;
+}
+#endif
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 25a5308744b1..195991dadc37 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -25,8 +25,10 @@
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/export.h>
+#include <linux/of.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
+#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
@@ -322,22 +324,31 @@ out:
}
static int
-validate_event(struct pmu_hw_events *hw_events,
- struct perf_event *event)
+validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events,
+ struct perf_event *event)
{
- struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+ struct arm_pmu *armpmu;
struct hw_perf_event fake_event = event->hw;
struct pmu *leader_pmu = event->group_leader->pmu;
if (is_software_event(event))
return 1;
+ /*
+ * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
+ * core perf code won't check that the pmu->ctx == leader->ctx
+ * until after pmu->event_init(event).
+ */
+ if (event->pmu != pmu)
+ return 0;
+
if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
return 1;
if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
return 1;
+ armpmu = to_arm_pmu(event->pmu);
return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
}
@@ -355,15 +366,15 @@ validate_group(struct perf_event *event)
memset(fake_used_mask, 0, sizeof(fake_used_mask));
fake_pmu.used_mask = fake_used_mask;
- if (!validate_event(&fake_pmu, leader))
+ if (!validate_event(event->pmu, &fake_pmu, leader))
return -EINVAL;
list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
- if (!validate_event(&fake_pmu, sibling))
+ if (!validate_event(event->pmu, &fake_pmu, sibling))
return -EINVAL;
}
- if (!validate_event(&fake_pmu, event))
+ if (!validate_event(event->pmu, &fake_pmu, event))
return -EINVAL;
return 0;
@@ -396,7 +407,12 @@ armpmu_release_hardware(struct arm_pmu *armpmu)
free_percpu_irq(irq, &cpu_hw_events);
} else {
for (i = 0; i < irqs; ++i) {
- if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
+ int cpu = i;
+
+ if (armpmu->irq_affinity)
+ cpu = armpmu->irq_affinity[i];
+
+ if (!cpumask_test_and_clear_cpu(cpu, &armpmu->active_irqs))
continue;
irq = platform_get_irq(pmu_device, i);
if (irq > 0)
@@ -450,19 +466,24 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
on_each_cpu(armpmu_enable_percpu_irq, &irq, 1);
} else {
for (i = 0; i < irqs; ++i) {
+ int cpu = i;
+
err = 0;
irq = platform_get_irq(pmu_device, i);
if (irq <= 0)
continue;
+ if (armpmu->irq_affinity)
+ cpu = armpmu->irq_affinity[i];
+
/*
* If we have a single PMU interrupt that we can't shift,
* assume that we're running on a uniprocessor machine and
* continue. Otherwise, continue without this interrupt.
*/
- if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
+ if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) {
pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
- irq, i);
+ irq, cpu);
continue;
}
@@ -476,7 +497,7 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
return err;
}
- cpumask_set_cpu(i, &armpmu->active_irqs);
+ cpumask_set_cpu(cpu, &armpmu->active_irqs);
}
}
@@ -1289,9 +1310,46 @@ static const struct of_device_id armpmu_of_device_ids[] = {
static int armpmu_device_probe(struct platform_device *pdev)
{
+ int i, *irqs;
+
if (!cpu_pmu)
return -ENODEV;
+ irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
+ if (!irqs)
+ return -ENOMEM;
+
+ for (i = 0; i < pdev->num_resources; ++i) {
+ struct device_node *dn;
+ int cpu;
+
+ dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity",
+ i);
+ if (!dn) {
+ pr_warn("Failed to parse %s/interrupt-affinity[%d]\n",
+ of_node_full_name(dn), i);
+ break;
+ }
+
+ for_each_possible_cpu(cpu)
+ if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL))
+ break;
+
+ of_node_put(dn);
+ if (cpu >= nr_cpu_ids) {
+ pr_warn("Failed to find logical CPU for %s\n",
+ dn->name);
+ break;
+ }
+
+ irqs[i] = cpu;
+ }
+
+ if (i == pdev->num_resources)
+ cpu_pmu->irq_affinity = irqs;
+ else
+ kfree(irqs);
+
cpu_pmu->plat_device = pdev;
return 0;
}
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index 9b8a70ae64a1..ea18cb53921e 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -15,6 +15,7 @@
#define pr_fmt(fmt) "psci: " fmt
+#include <linux/acpi.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/smp.h>
@@ -24,6 +25,7 @@
#include <linux/slab.h>
#include <uapi/linux/psci.h>
+#include <asm/acpi.h>
#include <asm/compiler.h>
#include <asm/cpu_ops.h>
#include <asm/errno.h>
@@ -273,39 +275,8 @@ static void psci_sys_poweroff(void)
invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
}
-/*
- * PSCI Function IDs for v0.2+ are well defined so use
- * standard values.
- */
-static int __init psci_0_2_init(struct device_node *np)
+static void __init psci_0_2_set_functions(void)
{
- int err, ver;
-
- err = get_set_conduit_method(np);
-
- if (err)
- goto out_put_node;
-
- ver = psci_get_version();
-
- if (ver == PSCI_RET_NOT_SUPPORTED) {
- /* PSCI v0.2 mandates implementation of PSCI_ID_VERSION. */
- pr_err("PSCI firmware does not comply with the v0.2 spec.\n");
- err = -EOPNOTSUPP;
- goto out_put_node;
- } else {
- pr_info("PSCIv%d.%d detected in firmware.\n",
- PSCI_VERSION_MAJOR(ver),
- PSCI_VERSION_MINOR(ver));
-
- if (PSCI_VERSION_MAJOR(ver) == 0 &&
- PSCI_VERSION_MINOR(ver) < 2) {
- err = -EINVAL;
- pr_err("Conflicting PSCI version detected.\n");
- goto out_put_node;
- }
- }
-
pr_info("Using standard PSCI v0.2 function IDs\n");
psci_function_id[PSCI_FN_CPU_SUSPEND] = PSCI_0_2_FN64_CPU_SUSPEND;
psci_ops.cpu_suspend = psci_cpu_suspend;
@@ -329,6 +300,60 @@ static int __init psci_0_2_init(struct device_node *np)
arm_pm_restart = psci_sys_reset;
pm_power_off = psci_sys_poweroff;
+}
+
+/*
+ * Probe function for PSCI firmware versions >= 0.2
+ */
+static int __init psci_probe(void)
+{
+ int ver = psci_get_version();
+
+ if (ver == PSCI_RET_NOT_SUPPORTED) {
+ /*
+ * PSCI versions >=0.2 mandates implementation of
+ * PSCI_VERSION.
+ */
+ pr_err("PSCI firmware does not comply with the v0.2 spec.\n");
+ return -EOPNOTSUPP;
+ } else {
+ pr_info("PSCIv%d.%d detected in firmware.\n",
+ PSCI_VERSION_MAJOR(ver),
+ PSCI_VERSION_MINOR(ver));
+
+ if (PSCI_VERSION_MAJOR(ver) == 0 &&
+ PSCI_VERSION_MINOR(ver) < 2) {
+ pr_err("Conflicting PSCI version detected.\n");
+ return -EINVAL;
+ }
+ }
+
+ psci_0_2_set_functions();
+
+ return 0;
+}
+
+/*
+ * PSCI init function for PSCI versions >=0.2
+ *
+ * Probe based on PSCI PSCI_VERSION function
+ */
+static int __init psci_0_2_init(struct device_node *np)
+{
+ int err;
+
+ err = get_set_conduit_method(np);
+
+ if (err)
+ goto out_put_node;
+ /*
+ * Starting with v0.2, the PSCI specification introduced a call
+ * (PSCI_VERSION) that allows probing the firmware version, so
+ * that PSCI function IDs and version specific initialization
+ * can be carried out according to the specific version reported
+ * by firmware
+ */
+ err = psci_probe();
out_put_node:
of_node_put(np);
@@ -381,7 +406,7 @@ static const struct of_device_id psci_of_match[] __initconst = {
{},
};
-int __init psci_init(void)
+int __init psci_dt_init(void)
{
struct device_node *np;
const struct of_device_id *matched_np;
@@ -396,6 +421,27 @@ int __init psci_init(void)
return init_fn(np);
}
+/*
+ * We use PSCI 0.2+ when ACPI is deployed on ARM64 and it's
+ * explicitly clarified in SBBR
+ */
+int __init psci_acpi_init(void)
+{
+ if (!acpi_psci_present()) {
+ pr_info("is not implemented in ACPI.\n");
+ return -EOPNOTSUPP;
+ }
+
+ pr_info("probing for conduit method from ACPI.\n");
+
+ if (acpi_psci_use_hvc())
+ invoke_psci_fn = __invoke_psci_fn_hvc;
+ else
+ invoke_psci_fn = __invoke_psci_fn_smc;
+
+ return psci_probe();
+}
+
#ifdef CONFIG_SMP
static int __init cpu_psci_cpu_init(struct device_node *dn, unsigned int cpu)
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index e8420f635bd4..74753132c3ac 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -17,6 +17,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/acpi.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/stddef.h>
@@ -46,11 +47,11 @@
#include <linux/efi.h>
#include <linux/personality.h>
+#include <asm/acpi.h>
#include <asm/fixmap.h>
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/elf.h>
-#include <asm/cputable.h>
#include <asm/cpufeature.h>
#include <asm/cpu_ops.h>
#include <asm/sections.h>
@@ -62,9 +63,7 @@
#include <asm/memblock.h>
#include <asm/psci.h>
#include <asm/efi.h>
-
-unsigned int processor_id;
-EXPORT_SYMBOL(processor_id);
+#include <asm/virt.h>
unsigned long elf_hwcap __read_mostly;
EXPORT_SYMBOL_GPL(elf_hwcap);
@@ -83,7 +82,6 @@ unsigned int compat_elf_hwcap2 __read_mostly;
DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
-static const char *cpu_name;
phys_addr_t __fdt_pointer __initdata;
/*
@@ -119,6 +117,11 @@ void __init early_print(const char *str, ...)
printk("%s", buf);
}
+/*
+ * The recorded values of x0 .. x3 upon kernel entry.
+ */
+u64 __cacheline_aligned boot_args[4];
+
void __init smp_setup_processor_id(void)
{
u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
@@ -207,24 +210,38 @@ static void __init smp_build_mpidr_hash(void)
}
#endif
+static void __init hyp_mode_check(void)
+{
+ if (is_hyp_mode_available())
+ pr_info("CPU: All CPU(s) started at EL2\n");
+ else if (is_hyp_mode_mismatched())
+ WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC,
+ "CPU: CPUs started in inconsistent modes");
+ else
+ pr_info("CPU: All CPU(s) started at EL1\n");
+}
+
+void __init do_post_cpus_up_work(void)
+{
+ hyp_mode_check();
+ apply_alternatives_all();
+}
+
+#ifdef CONFIG_UP_LATE_INIT
+void __init up_late_init(void)
+{
+ do_post_cpus_up_work();
+}
+#endif /* CONFIG_UP_LATE_INIT */
+
static void __init setup_processor(void)
{
- struct cpu_info *cpu_info;
u64 features, block;
u32 cwg;
int cls;
- cpu_info = lookup_processor_type(read_cpuid_id());
- if (!cpu_info) {
- printk("CPU configuration botched (ID %08x), unable to continue.\n",
- read_cpuid_id());
- while (1);
- }
-
- cpu_name = cpu_info->cpu_name;
-
- printk("CPU: %s [%08x] revision %d\n",
- cpu_name, read_cpuid_id(), read_cpuid_id() & 15);
+ printk("CPU: AArch64 Processor [%08x] revision %d\n",
+ read_cpuid_id(), read_cpuid_id() & 15);
sprintf(init_utsname()->machine, ELF_PLATFORM);
elf_hwcap = 0;
@@ -380,18 +397,27 @@ void __init setup_arch(char **cmdline_p)
efi_init();
arm64_memblock_init();
+ /* Parse the ACPI tables for possible boot-time configuration */
+ acpi_boot_table_init();
+
paging_init();
request_standard_resources();
early_ioremap_reset();
- unflatten_device_tree();
-
- psci_init();
+ if (acpi_disabled) {
+ unflatten_device_tree();
+ psci_dt_init();
+ cpu_read_bootcpu_ops();
+#ifdef CONFIG_SMP
+ of_smp_init_cpus();
+#endif
+ } else {
+ psci_acpi_init();
+ acpi_init_cpus();
+ }
- cpu_read_bootcpu_ops();
#ifdef CONFIG_SMP
- smp_init_cpus();
smp_build_mpidr_hash();
#endif
@@ -402,6 +428,12 @@ void __init setup_arch(char **cmdline_p)
conswitchp = &dummy_con;
#endif
#endif
+ if (boot_args[1] || boot_args[2] || boot_args[3]) {
+ pr_err("WARNING: x1-x3 nonzero in violation of boot protocol:\n"
+ "\tx1: %016llx\n\tx2: %016llx\n\tx3: %016llx\n"
+ "This indicates a broken bootloader or old kernel\n",
+ boot_args[1], boot_args[2], boot_args[3]);
+ }
}
static int __init arm64_device_init(void)
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 660ccf9f7524..e18c48cb6db1 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -287,19 +287,12 @@ static void setup_restart_syscall(struct pt_regs *regs)
*/
static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
{
- struct thread_info *thread = current_thread_info();
struct task_struct *tsk = current;
sigset_t *oldset = sigmask_to_save();
int usig = ksig->sig;
int ret;
/*
- * translate the signal
- */
- if (usig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap)
- usig = thread->exec_domain->signal_invmap[usig];
-
- /*
* Set up the stack frame
*/
if (is_compat_task()) {
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 328b8ce4b007..2cb008177252 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -151,6 +151,7 @@ asmlinkage void secondary_start_kernel(void)
*/
cpu_set_reserved_ttbr0();
flush_tlb_all();
+ cpu_set_default_tcr_t0sz();
preempt_disable();
trace_hardirqs_off();
@@ -309,7 +310,7 @@ void cpu_die(void)
void __init smp_cpus_done(unsigned int max_cpus)
{
pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
- apply_alternatives_all();
+ do_post_cpus_up_work();
}
void __init smp_prepare_boot_cpu(void)
@@ -322,7 +323,7 @@ void __init smp_prepare_boot_cpu(void)
* cpu logical map array containing MPIDR values related to logical
* cpus. Assumes that cpu_logical_map(0) has already been initialized.
*/
-void __init smp_init_cpus(void)
+void __init of_smp_init_cpus(void)
{
struct device_node *dn = NULL;
unsigned int i, cpu = 1;
@@ -635,7 +636,7 @@ void smp_send_stop(void)
cpumask_t mask;
cpumask_copy(&mask, cpu_online_mask);
- cpu_clear(smp_processor_id(), mask);
+ cpumask_clear_cpu(smp_processor_id(), &mask);
smp_cross_call(&mask, IPI_CPU_STOP);
}
diff --git a/arch/arm64/kernel/sys32.c b/arch/arm64/kernel/sys32.c
index 2d5ab3c90b82..a40b1343b819 100644
--- a/arch/arm64/kernel/sys32.c
+++ b/arch/arm64/kernel/sys32.c
@@ -37,6 +37,7 @@ asmlinkage long compat_sys_readahead_wrapper(void);
asmlinkage long compat_sys_fadvise64_64_wrapper(void);
asmlinkage long compat_sys_sync_file_range2_wrapper(void);
asmlinkage long compat_sys_fallocate_wrapper(void);
+asmlinkage long compat_sys_mmap2_wrapper(void);
#undef __SYSCALL
#define __SYSCALL(nr, sym) [nr] = sym,
diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c
index 1a7125c3099b..42f9195cf2f8 100644
--- a/arch/arm64/kernel/time.c
+++ b/arch/arm64/kernel/time.c
@@ -35,6 +35,7 @@
#include <linux/delay.h>
#include <linux/clocksource.h>
#include <linux/clk-provider.h>
+#include <linux/acpi.h>
#include <clocksource/arm_arch_timer.h>
@@ -72,6 +73,12 @@ void __init time_init(void)
tick_setup_hrtimer_broadcast();
+ /*
+ * Since ACPI or FDT will only one be available in the system,
+ * we can use acpi_generic_timer_init() here safely
+ */
+ acpi_generic_timer_init();
+
arch_timer_rate = arch_timer_get_rate();
if (!arch_timer_rate)
panic("Unable to initialise architected timer.\n");
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 5d9d2dca530d..a2c29865c3fe 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -23,10 +23,14 @@ jiffies = jiffies_64;
#define HYPERVISOR_TEXT \
/* \
- * Force the alignment to be compatible with \
- * the vectors requirements \
+ * Align to 4 KB so that \
+ * a) the HYP vector table is at its minimum \
+ * alignment of 2048 bytes \
+ * b) the HYP init code will not cross a page \
+ * boundary if its size does not exceed \
+ * 4 KB (see related ASSERT() below) \
*/ \
- . = ALIGN(2048); \
+ . = ALIGN(SZ_4K); \
VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
*(.hyp.idmap.text) \
VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; \
@@ -163,10 +167,11 @@ SECTIONS
}
/*
- * The HYP init code can't be more than a page long.
+ * The HYP init code can't be more than a page long,
+ * and should not cross a page boundary.
*/
-ASSERT(((__hyp_idmap_text_start + PAGE_SIZE) > __hyp_idmap_text_end),
- "HYP init code too big")
+ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
+ "HYP init code too big or misaligned")
/*
* If padding is applied before .head.text, virt<->phys conversions will fail.