aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/arch/um/kernel
diff options
context:
space:
mode:
authorBenjamin Berg <benjamin.berg@intel.com>2024-11-11 11:29:10 +0100
committerJohannes Berg <johannes.berg@intel.com>2024-11-12 14:50:31 +0100
commit2f681ba4b352cdd5658ed2a96062375a12839755 (patch)
tree8cc0179eb7c009f8c155acee180270918b93af23 /arch/um/kernel
parentum: Always dump trace for specified task in show_stack (diff)
downloadwireguard-linux-2f681ba4b352cdd5658ed2a96062375a12839755.tar.xz
wireguard-linux-2f681ba4b352cdd5658ed2a96062375a12839755.zip
um: move thread info into task
This selects the THREAD_INFO_IN_TASK option for UM and changes the way that the current task is discovered. This is trivial though, as UML already tracks the current task in cpu_tasks[] and this can be used to retrieve it. Also remove the signal handler code that copies the thread information into the IRQ stack. It is obsolete now, which also means that the mentioned race condition cannot happen anymore. Signed-off-by: Benjamin Berg <benjamin.berg@intel.com> Reviewed-by: Hajime Tazaki <thehajime@gmail.com> Link: https://patch.msgid.link/20241111102910.46512-1-benjamin@sipsolutions.net Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Diffstat (limited to 'arch/um/kernel')
-rw-r--r--arch/um/kernel/dyn.lds.S2
-rw-r--r--arch/um/kernel/irq.c112
-rw-r--r--arch/um/kernel/process.c5
-rw-r--r--arch/um/kernel/skas/process.c4
-rw-r--r--arch/um/kernel/um_arch.c7
-rw-r--r--arch/um/kernel/uml.lds.S2
6 files changed, 7 insertions, 125 deletions
diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S
index dc9d9a68af55..a36b7918a011 100644
--- a/arch/um/kernel/dyn.lds.S
+++ b/arch/um/kernel/dyn.lds.S
@@ -116,8 +116,6 @@ SECTIONS
.fini_array : { *(.fini_array) }
.data : {
INIT_TASK_DATA(KERNEL_STACK_SIZE)
- . = ALIGN(KERNEL_STACK_SIZE);
- *(.data..init_irqstack)
DATA_DATA
*(.data.* .gnu.linkonce.d.*)
SORT(CONSTRUCTORS)
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c
index 534e91797f89..338450741aac 100644
--- a/arch/um/kernel/irq.c
+++ b/arch/um/kernel/irq.c
@@ -674,115 +674,3 @@ void __init init_IRQ(void)
/* Initialize EPOLL Loop */
os_setup_epoll();
}
-
-/*
- * IRQ stack entry and exit:
- *
- * Unlike i386, UML doesn't receive IRQs on the normal kernel stack
- * and switch over to the IRQ stack after some preparation. We use
- * sigaltstack to receive signals on a separate stack from the start.
- * These two functions make sure the rest of the kernel won't be too
- * upset by being on a different stack. The IRQ stack has a
- * thread_info structure at the bottom so that current et al continue
- * to work.
- *
- * to_irq_stack copies the current task's thread_info to the IRQ stack
- * thread_info and sets the tasks's stack to point to the IRQ stack.
- *
- * from_irq_stack copies the thread_info struct back (flags may have
- * been modified) and resets the task's stack pointer.
- *
- * Tricky bits -
- *
- * What happens when two signals race each other? UML doesn't block
- * signals with sigprocmask, SA_DEFER, or sa_mask, so a second signal
- * could arrive while a previous one is still setting up the
- * thread_info.
- *
- * There are three cases -
- * The first interrupt on the stack - sets up the thread_info and
- * handles the interrupt
- * A nested interrupt interrupting the copying of the thread_info -
- * can't handle the interrupt, as the stack is in an unknown state
- * A nested interrupt not interrupting the copying of the
- * thread_info - doesn't do any setup, just handles the interrupt
- *
- * The first job is to figure out whether we interrupted stack setup.
- * This is done by xchging the signal mask with thread_info->pending.
- * If the value that comes back is zero, then there is no setup in
- * progress, and the interrupt can be handled. If the value is
- * non-zero, then there is stack setup in progress. In order to have
- * the interrupt handled, we leave our signal in the mask, and it will
- * be handled by the upper handler after it has set up the stack.
- *
- * Next is to figure out whether we are the outer handler or a nested
- * one. As part of setting up the stack, thread_info->real_thread is
- * set to non-NULL (and is reset to NULL on exit). This is the
- * nesting indicator. If it is non-NULL, then the stack is already
- * set up and the handler can run.
- */
-
-static unsigned long pending_mask;
-
-unsigned long to_irq_stack(unsigned long *mask_out)
-{
- struct thread_info *ti;
- unsigned long mask, old;
- int nested;
-
- mask = xchg(&pending_mask, *mask_out);
- if (mask != 0) {
- /*
- * If any interrupts come in at this point, we want to
- * make sure that their bits aren't lost by our
- * putting our bit in. So, this loop accumulates bits
- * until xchg returns the same value that we put in.
- * When that happens, there were no new interrupts,
- * and pending_mask contains a bit for each interrupt
- * that came in.
- */
- old = *mask_out;
- do {
- old |= mask;
- mask = xchg(&pending_mask, old);
- } while (mask != old);
- return 1;
- }
-
- ti = current_thread_info();
- nested = (ti->real_thread != NULL);
- if (!nested) {
- struct task_struct *task;
- struct thread_info *tti;
-
- task = cpu_tasks[ti->cpu].task;
- tti = task_thread_info(task);
-
- *ti = *tti;
- ti->real_thread = tti;
- task->stack = ti;
- }
-
- mask = xchg(&pending_mask, 0);
- *mask_out |= mask | nested;
- return 0;
-}
-
-unsigned long from_irq_stack(int nested)
-{
- struct thread_info *ti, *to;
- unsigned long mask;
-
- ti = current_thread_info();
-
- pending_mask = 1;
-
- to = ti->real_thread;
- current->stack = to;
- ti->real_thread = NULL;
- *to = *ti;
-
- mask = xchg(&pending_mask, 0);
- return mask & ~1;
-}
-
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index 56e7e525fc91..30bdc0a87dc8 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -43,7 +43,8 @@
* cares about its entry, so it's OK if another processor is modifying its
* entry.
*/
-struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { NULL } };
+struct task_struct *cpu_tasks[NR_CPUS];
+EXPORT_SYMBOL(cpu_tasks);
void free_stack(unsigned long stack, int order)
{
@@ -64,7 +65,7 @@ unsigned long alloc_stack(int order, int atomic)
static inline void set_current(struct task_struct *task)
{
- cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task) { task });
+ cpu_tasks[task_thread_info(task)->cpu] = task;
}
struct task_struct *__switch_to(struct task_struct *from, struct task_struct *to)
diff --git a/arch/um/kernel/skas/process.c b/arch/um/kernel/skas/process.c
index 68657988c8d1..05dcdc057af9 100644
--- a/arch/um/kernel/skas/process.c
+++ b/arch/um/kernel/skas/process.c
@@ -22,15 +22,13 @@ static int __init start_kernel_proc(void *unused)
{
block_signals_trace();
- cpu_tasks[0].task = current;
-
start_kernel();
return 0;
}
extern int userspace_pid[];
-extern char cpu0_irqstack[];
+static char cpu0_irqstack[THREAD_SIZE] __aligned(THREAD_SIZE);
int __init start_uml(void)
{
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index ec17576ce9fc..62ddb865eb91 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -65,9 +65,6 @@ struct cpuinfo_um boot_cpu_data = {
EXPORT_SYMBOL(boot_cpu_data);
-union thread_union cpu0_irqstack
- __section(".data..init_irqstack") =
- { .thread_info = INIT_THREAD_INFO(init_task) };
/* Changed in setup_arch, which is called in early boot */
static char host_info[(__NEW_UTS_LEN + 1) * 5];
@@ -244,6 +241,8 @@ static struct notifier_block panic_exit_notifier = {
void uml_finishsetup(void)
{
+ cpu_tasks[0] = &init_task;
+
atomic_notifier_chain_register(&panic_notifier_list,
&panic_exit_notifier);
@@ -418,7 +417,7 @@ void __init setup_arch(char **cmdline_p)
{
u8 rng_seed[32];
- stack_protections((unsigned long) &init_thread_info);
+ stack_protections((unsigned long) init_task.stack);
setup_physmem(uml_physmem, uml_reserved, physmem_size);
mem_total_pages(physmem_size, iomem_size);
uml_dtb_init();
diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S
index 5c92d58a78e8..a409d4b66114 100644
--- a/arch/um/kernel/uml.lds.S
+++ b/arch/um/kernel/uml.lds.S
@@ -77,8 +77,6 @@ SECTIONS
.data :
{
INIT_TASK_DATA(KERNEL_STACK_SIZE)
- . = ALIGN(KERNEL_STACK_SIZE);
- *(.data..init_irqstack)
DATA_DATA
*(.gnu.linkonce.d*)
CONSTRUCTORS