aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r--arch/arm64/kernel/cacheinfo.c13
-rw-r--r--arch/arm64/kernel/cpufeature.c1
-rw-r--r--arch/arm64/kernel/fpsimd.c42
-rw-r--r--arch/arm64/kernel/hibernate.c3
-rw-r--r--arch/arm64/kernel/module.c38
-rw-r--r--arch/arm64/kernel/paravirt.c9
-rw-r--r--arch/arm64/kernel/probes/decode-insn.c10
-rw-r--r--arch/arm64/kernel/probes/decode-insn.h10
-rw-r--r--arch/arm64/kernel/probes/kprobes.c11
-rw-r--r--arch/arm64/kernel/probes/simulate-insn.c10
-rw-r--r--arch/arm64/kernel/probes/simulate-insn.h10
-rw-r--r--arch/arm64/kernel/psci.c9
-rw-r--r--arch/arm64/kernel/smccc-call.S11
-rw-r--r--arch/arm64/kernel/sys.c16
-rw-r--r--arch/arm64/kernel/sys32.c7
-rw-r--r--arch/arm64/kernel/traps.c5
16 files changed, 90 insertions, 115 deletions
diff --git a/arch/arm64/kernel/cacheinfo.c b/arch/arm64/kernel/cacheinfo.c
index 0bf0a835122f..880d79904d36 100644
--- a/arch/arm64/kernel/cacheinfo.c
+++ b/arch/arm64/kernel/cacheinfo.c
@@ -1,20 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* ARM64 cacheinfo support
*
* Copyright (C) 2015 ARM Ltd.
* All Rights Reserved
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/acpi.h>
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index ca27e08e3d8a..80babf451519 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -830,6 +830,7 @@ static u64 __read_sysreg_by_encoding(u32 sys_id)
read_sysreg_case(SYS_ID_AA64PFR0_EL1);
read_sysreg_case(SYS_ID_AA64PFR1_EL1);
+ read_sysreg_case(SYS_ID_AA64ZFR0_EL1);
read_sysreg_case(SYS_ID_AA64DFR0_EL1);
read_sysreg_case(SYS_ID_AA64DFR1_EL1);
read_sysreg_case(SYS_ID_AA64MMFR0_EL1);
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index a38bf74bcca8..bb42cd04baec 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -39,6 +39,7 @@
#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/sysctl.h>
+#include <linux/swab.h>
#include <asm/esr.h>
#include <asm/fpsimd.h>
@@ -352,6 +353,23 @@ static int __init sve_sysctl_init(void) { return 0; }
#define ZREG(sve_state, vq, n) ((char *)(sve_state) + \
(SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET))
+#ifdef CONFIG_CPU_BIG_ENDIAN
+static __uint128_t arm64_cpu_to_le128(__uint128_t x)
+{
+ u64 a = swab64(x);
+ u64 b = swab64(x >> 64);
+
+ return ((__uint128_t)a << 64) | b;
+}
+#else
+static __uint128_t arm64_cpu_to_le128(__uint128_t x)
+{
+ return x;
+}
+#endif
+
+#define arm64_le128_to_cpu(x) arm64_cpu_to_le128(x)
+
/*
* Transfer the FPSIMD state in task->thread.uw.fpsimd_state to
* task->thread.sve_state.
@@ -369,14 +387,16 @@ static void fpsimd_to_sve(struct task_struct *task)
void *sst = task->thread.sve_state;
struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
unsigned int i;
+ __uint128_t *p;
if (!system_supports_sve())
return;
vq = sve_vq_from_vl(task->thread.sve_vl);
- for (i = 0; i < 32; ++i)
- memcpy(ZREG(sst, vq, i), &fst->vregs[i],
- sizeof(fst->vregs[i]));
+ for (i = 0; i < 32; ++i) {
+ p = (__uint128_t *)ZREG(sst, vq, i);
+ *p = arm64_cpu_to_le128(fst->vregs[i]);
+ }
}
/*
@@ -395,14 +415,16 @@ static void sve_to_fpsimd(struct task_struct *task)
void const *sst = task->thread.sve_state;
struct user_fpsimd_state *fst = &task->thread.uw.fpsimd_state;
unsigned int i;
+ __uint128_t const *p;
if (!system_supports_sve())
return;
vq = sve_vq_from_vl(task->thread.sve_vl);
- for (i = 0; i < 32; ++i)
- memcpy(&fst->vregs[i], ZREG(sst, vq, i),
- sizeof(fst->vregs[i]));
+ for (i = 0; i < 32; ++i) {
+ p = (__uint128_t const *)ZREG(sst, vq, i);
+ fst->vregs[i] = arm64_le128_to_cpu(*p);
+ }
}
#ifdef CONFIG_ARM64_SVE
@@ -491,6 +513,7 @@ void sve_sync_from_fpsimd_zeropad(struct task_struct *task)
void *sst = task->thread.sve_state;
struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
unsigned int i;
+ __uint128_t *p;
if (!test_tsk_thread_flag(task, TIF_SVE))
return;
@@ -499,9 +522,10 @@ void sve_sync_from_fpsimd_zeropad(struct task_struct *task)
memset(sst, 0, SVE_SIG_REGS_SIZE(vq));
- for (i = 0; i < 32; ++i)
- memcpy(ZREG(sst, vq, i), &fst->vregs[i],
- sizeof(fst->vregs[i]));
+ for (i = 0; i < 32; ++i) {
+ p = (__uint128_t *)ZREG(sst, vq, i);
+ *p = arm64_cpu_to_le128(fst->vregs[i]);
+ }
}
int sve_set_vector_length(struct task_struct *task,
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index 9859e1178e6b..9341fcc6e809 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*:
* Hibernate support specific for ARM64
*
@@ -11,8 +12,6 @@
* https://patchwork.kernel.org/patch/96442/
*
* Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
- *
- * License terms: GNU General Public License (GPL) version 2
*/
#define pr_fmt(x) "hibernate: " x
#include <linux/cpu.h>
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index f32359cffb01..dd080837e6a9 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -98,10 +98,10 @@ static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
/*
* The ELF psABI for AArch64 documents the 16-bit and 32-bit place
- * relative relocations as having a range of [-2^15, 2^16) or
- * [-2^31, 2^32), respectively. However, in order to be able to detect
- * overflows reliably, we have to choose whether we interpret such
- * quantities as signed or as unsigned, and stick with it.
+ * relative and absolute relocations as having a range of [-2^15, 2^16)
+ * or [-2^31, 2^32), respectively. However, in order to be able to
+ * detect overflows reliably, we have to choose whether we interpret
+ * such quantities as signed or as unsigned, and stick with it.
* The way we organize our address space requires a signed
* interpretation of 32-bit relative references, so let's use that
* for all R_AARCH64_PRELxx relocations. This means our upper
@@ -111,13 +111,35 @@ static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
switch (len) {
case 16:
*(s16 *)place = sval;
- if (sval < S16_MIN || sval > S16_MAX)
- return -ERANGE;
+ switch (op) {
+ case RELOC_OP_ABS:
+ if (sval < 0 || sval > U16_MAX)
+ return -ERANGE;
+ break;
+ case RELOC_OP_PREL:
+ if (sval < S16_MIN || sval > S16_MAX)
+ return -ERANGE;
+ break;
+ default:
+ pr_err("Invalid 16-bit data relocation (%d)\n", op);
+ return 0;
+ }
break;
case 32:
*(s32 *)place = sval;
- if (sval < S32_MIN || sval > S32_MAX)
- return -ERANGE;
+ switch (op) {
+ case RELOC_OP_ABS:
+ if (sval < 0 || sval > U32_MAX)
+ return -ERANGE;
+ break;
+ case RELOC_OP_PREL:
+ if (sval < S32_MIN || sval > S32_MAX)
+ return -ERANGE;
+ break;
+ default:
+ pr_err("Invalid 32-bit data relocation (%d)\n", op);
+ return 0;
+ }
break;
case 64:
*(s64 *)place = sval;
diff --git a/arch/arm64/kernel/paravirt.c b/arch/arm64/kernel/paravirt.c
index 75c158b0353f..4cfed91fe256 100644
--- a/arch/arm64/kernel/paravirt.c
+++ b/arch/arm64/kernel/paravirt.c
@@ -1,12 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*
* Copyright (C) 2013 Citrix Systems
*
diff --git a/arch/arm64/kernel/probes/decode-insn.c b/arch/arm64/kernel/probes/decode-insn.c
index 6bf6657a5a52..b78fac9e546c 100644
--- a/arch/arm64/kernel/probes/decode-insn.c
+++ b/arch/arm64/kernel/probes/decode-insn.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* arch/arm64/kernel/probes/decode-insn.c
*
* Copyright (C) 2013 Linaro Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#include <linux/kernel.h>
diff --git a/arch/arm64/kernel/probes/decode-insn.h b/arch/arm64/kernel/probes/decode-insn.h
index 192ab007bacb..8b758c5a2062 100644
--- a/arch/arm64/kernel/probes/decode-insn.h
+++ b/arch/arm64/kernel/probes/decode-insn.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm64/kernel/probes/decode-insn.h
*
* Copyright (C) 2013 Linaro Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#ifndef _ARM_KERNEL_KPROBES_ARM64_H
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index 2509fcb6d404..88ce502c8e6f 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* arch/arm64/kernel/probes/kprobes.c
*
@@ -5,16 +6,6 @@
*
* Copyright (C) 2013 Linaro Limited.
* Author: Sandeepa Prabhu <sandeepa.prabhu@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
*/
#include <linux/kasan.h>
#include <linux/kernel.h>
diff --git a/arch/arm64/kernel/probes/simulate-insn.c b/arch/arm64/kernel/probes/simulate-insn.c
index be05868418ee..25f67ec59635 100644
--- a/arch/arm64/kernel/probes/simulate-insn.c
+++ b/arch/arm64/kernel/probes/simulate-insn.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* arch/arm64/kernel/probes/simulate-insn.c
*
* Copyright (C) 2013 Linaro Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#include <linux/bitops.h>
diff --git a/arch/arm64/kernel/probes/simulate-insn.h b/arch/arm64/kernel/probes/simulate-insn.h
index 050bde683c2d..e065dc92218e 100644
--- a/arch/arm64/kernel/probes/simulate-insn.h
+++ b/arch/arm64/kernel/probes/simulate-insn.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm64/kernel/probes/simulate-insn.h
*
* Copyright (C) 2013 Linaro Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#ifndef _ARM_KERNEL_KPROBES_SIMULATE_INSN_H
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index 8cdaf25e99cd..85ee7d07889e 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -1,12 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*
* Copyright (C) 2013 ARM Limited
*
diff --git a/arch/arm64/kernel/smccc-call.S b/arch/arm64/kernel/smccc-call.S
index 184332286a81..54655273d1e0 100644
--- a/arch/arm64/kernel/smccc-call.S
+++ b/arch/arm64/kernel/smccc-call.S
@@ -1,15 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015, Linaro Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License Version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/linkage.h>
#include <linux/arm-smccc.h>
diff --git a/arch/arm64/kernel/sys.c b/arch/arm64/kernel/sys.c
index 6f91e8116514..fe20c461582a 100644
--- a/arch/arm64/kernel/sys.c
+++ b/arch/arm64/kernel/sys.c
@@ -47,22 +47,26 @@ SYSCALL_DEFINE1(arm64_personality, unsigned int, personality)
return ksys_personality(personality);
}
+asmlinkage long sys_ni_syscall(void);
+
+asmlinkage long __arm64_sys_ni_syscall(const struct pt_regs *__unused)
+{
+ return sys_ni_syscall();
+}
+
/*
* Wrappers to pass the pt_regs argument.
*/
-#define sys_personality sys_arm64_personality
-
-asmlinkage long sys_ni_syscall(const struct pt_regs *);
-#define __arm64_sys_ni_syscall sys_ni_syscall
+#define __arm64_sys_personality __arm64_sys_arm64_personality
#undef __SYSCALL
#define __SYSCALL(nr, sym) asmlinkage long __arm64_##sym(const struct pt_regs *);
#include <asm/unistd.h>
#undef __SYSCALL
-#define __SYSCALL(nr, sym) [nr] = (syscall_fn_t)__arm64_##sym,
+#define __SYSCALL(nr, sym) [nr] = __arm64_##sym,
const syscall_fn_t sys_call_table[__NR_syscalls] = {
- [0 ... __NR_syscalls - 1] = (syscall_fn_t)sys_ni_syscall,
+ [0 ... __NR_syscalls - 1] = __arm64_sys_ni_syscall,
#include <asm/unistd.h>
};
diff --git a/arch/arm64/kernel/sys32.c b/arch/arm64/kernel/sys32.c
index 0f8bcb7de700..3c80a40c1c9d 100644
--- a/arch/arm64/kernel/sys32.c
+++ b/arch/arm64/kernel/sys32.c
@@ -133,17 +133,14 @@ COMPAT_SYSCALL_DEFINE6(aarch32_fallocate, int, fd, int, mode,
return ksys_fallocate(fd, mode, arg_u64(offset), arg_u64(len));
}
-asmlinkage long sys_ni_syscall(const struct pt_regs *);
-#define __arm64_sys_ni_syscall sys_ni_syscall
-
#undef __SYSCALL
#define __SYSCALL(nr, sym) asmlinkage long __arm64_##sym(const struct pt_regs *);
#include <asm/unistd32.h>
#undef __SYSCALL
-#define __SYSCALL(nr, sym) [nr] = (syscall_fn_t)__arm64_##sym,
+#define __SYSCALL(nr, sym) [nr] = __arm64_##sym,
const syscall_fn_t compat_sys_call_table[__NR_compat_syscalls] = {
- [0 ... __NR_compat_syscalls - 1] = (syscall_fn_t)sys_ni_syscall,
+ [0 ... __NR_compat_syscalls - 1] = __arm64_sys_ni_syscall,
#include <asm/unistd32.h>
};
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index e6be1a6efc0a..177c0f6ebabf 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -252,7 +252,10 @@ void arm64_force_sig_fault(int signo, int code, void __user *addr,
const char *str)
{
arm64_show_signal(signo, str);
- force_sig_fault(signo, code, addr, current);
+ if (signo == SIGKILL)
+ force_sig(SIGKILL, current);
+ else
+ force_sig_fault(signo, code, addr, current);
}
void arm64_force_sig_mceerr(int code, void __user *addr, short lsb,