aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/arch/arm64/kernel/process.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kernel/process.c')
-rw-r--r--arch/arm64/kernel/process.c39
1 files changed, 17 insertions, 22 deletions
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 92bcc1768f0b..4ae31b7af6c3 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -69,7 +69,7 @@ void (*pm_power_off)(void);
EXPORT_SYMBOL_GPL(pm_power_off);
#ifdef CONFIG_HOTPLUG_CPU
-void arch_cpu_idle_dead(void)
+void __noreturn arch_cpu_idle_dead(void)
{
cpu_die();
}
@@ -217,7 +217,7 @@ void __show_regs(struct pt_regs *regs)
if (!user_mode(regs)) {
printk("pc : %pS\n", (void *)regs->pc);
- printk("lr : %pS\n", (void *)ptrauth_strip_insn_pac(lr));
+ printk("lr : %pS\n", (void *)ptrauth_strip_kernel_insn_pac(lr));
} else {
printk("pc : %016llx\n", regs->pc);
printk("lr : %016llx\n", lr);
@@ -279,10 +279,6 @@ void flush_thread(void)
flush_tagged_addr_state();
}
-void release_thread(struct task_struct *dead_task)
-{
-}
-
void arch_release_task_struct(struct task_struct *tsk)
{
fpsimd_release_task(tsk);
@@ -294,9 +290,6 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
fpsimd_preserve_current_state();
*dst = *src;
- /* We rely on the above assignment to initialize dst's thread_flags: */
- BUILD_BUG_ON(!IS_ENABLED(CONFIG_THREAD_INFO_IN_TASK));
-
/*
* Detach src's sve_state (if any) from dst so that it does not
* get erroneously used or freed prematurely. dst's copies
@@ -311,30 +304,33 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
/*
* In the unlikely event that we create a new thread with ZA
- * enabled we should retain the ZA state so duplicate it here.
- * This may be shortly freed if we exec() or if CLONE_SETTLS
- * but it's simpler to do it here. To avoid confusing the rest
- * of the code ensure that we have a sve_state allocated
- * whenever za_state is allocated.
+ * enabled we should retain the ZA and ZT state so duplicate
+ * it here. This may be shortly freed if we exec() or if
+ * CLONE_SETTLS but it's simpler to do it here. To avoid
+ * confusing the rest of the code ensure that we have a
+ * sve_state allocated whenever sme_state is allocated.
*/
if (thread_za_enabled(&src->thread)) {
dst->thread.sve_state = kzalloc(sve_state_size(src),
GFP_KERNEL);
if (!dst->thread.sve_state)
return -ENOMEM;
- dst->thread.za_state = kmemdup(src->thread.za_state,
- za_state_size(src),
- GFP_KERNEL);
- if (!dst->thread.za_state) {
+
+ dst->thread.sme_state = kmemdup(src->thread.sme_state,
+ sme_state_size(src),
+ GFP_KERNEL);
+ if (!dst->thread.sme_state) {
kfree(dst->thread.sve_state);
dst->thread.sve_state = NULL;
return -ENOMEM;
}
} else {
- dst->thread.za_state = NULL;
+ dst->thread.sme_state = NULL;
clear_tsk_thread_flag(dst, TIF_SME);
}
+ dst->thread.fp_type = FP_STATE_FPSIMD;
+
/* clear any pending asynchronous tag fault raised by the parent */
clear_tsk_thread_flag(dst, TIF_MTE_ASYNC_FAULT);
@@ -455,7 +451,7 @@ static void ssbs_thread_switch(struct task_struct *next)
* If all CPUs implement the SSBS extension, then we just need to
* context-switch the PSTATE field.
*/
- if (cpus_have_const_cap(ARM64_SSBS))
+ if (alternative_has_cap_unlikely(ARM64_SSBS))
return;
spectre_v4_enable_task_mitigation(next);
@@ -595,7 +591,7 @@ unsigned long __get_wchan(struct task_struct *p)
unsigned long arch_align_stack(unsigned long sp)
{
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
- sp -= get_random_int() & ~PAGE_MASK;
+ sp -= get_random_u32_below(PAGE_SIZE);
return sp & ~0xf;
}
@@ -725,7 +721,6 @@ static struct ctl_table tagged_addr_sysctl_table[] = {
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
- { }
};
static int __init tagged_addr_init(void)