aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/entry-armv.S30
-rw-r--r--arch/arm/kernel/head.S3
-rw-r--r--arch/arm/kernel/process.c25
-rw-r--r--arch/arm/kernel/sys_arm.c2
-rw-r--r--arch/arm/kernel/traps.c63
-rw-r--r--arch/arm/kernel/vmlinux.lds.S3
6 files changed, 91 insertions, 35 deletions
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 080df907f242..e14278d59882 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -17,8 +17,8 @@
#include <asm/glue.h>
#include <asm/vfpmacros.h>
-#include <asm/hardware.h> @ should be moved into entry-macro.S
-#include <asm/arch/irqs.h> @ should be moved into entry-macro.S
+#include <asm/hardware.h> /* should be moved into entry-macro.S */
+#include <asm/arch/irqs.h> /* should be moved into entry-macro.S */
#include <asm/arch/entry-macro.S>
#include "entry-header.S"
@@ -269,7 +269,7 @@ __pabt_svc:
add r5, sp, #S_PC
ldmia r7, {r2 - r4} @ Get USR pc, cpsr
-#if __LINUX_ARM_ARCH__ < 6
+#if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
@ make sure our user space atomic helper is aborted
cmp r2, #VIRT_OFFSET
bichs r3, r3, #PSR_Z_BIT
@@ -505,9 +505,9 @@ ENTRY(__switch_to)
mra r4, r5, acc0
stmia ip, {r4, r5}
#endif
-#ifdef CONFIG_HAS_TLS_REG
+#if defined(CONFIG_HAS_TLS_REG)
mcr p15, 0, r3, c13, c0, 3 @ set TLS register
-#else
+#elif !defined(CONFIG_TLS_REG_EMUL)
mov r4, #0xffff0fff
str r3, [r4, #-15] @ TLS val at 0xffff0ff0
#endif
@@ -616,11 +616,17 @@ __kuser_helper_start:
__kuser_cmpxchg: @ 0xffff0fc0
-#if __LINUX_ARM_ARCH__ < 6
+#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
-#ifdef CONFIG_SMP /* sanity check */
-#error "CONFIG_SMP on a machine supporting pre-ARMv6 processors?"
-#endif
+ /*
+ * Poor you. No fast solution possible...
+ * The kernel itself must perform the operation.
+ * A special ghost syscall is used for that (see traps.c).
+ */
+ swi #0x9ffff0
+ mov pc, lr
+
+#elif __LINUX_ARM_ARCH__ < 6
/*
* Theory of operation:
@@ -690,11 +696,7 @@ __kuser_cmpxchg: @ 0xffff0fc0
__kuser_get_tls: @ 0xffff0fe0
-#ifndef CONFIG_HAS_TLS_REG
-
-#ifdef CONFIG_SMP /* sanity check */
-#error "CONFIG_SMP without CONFIG_HAS_TLS_REG is wrong"
-#endif
+#if !defined(CONFIG_HAS_TLS_REG) && !defined(CONFIG_TLS_REG_EMUL)
ldr r0, [pc, #(16 - 8)] @ TLS stored at 0xffff0ff0
mov pc, lr
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 171b3e811c71..4733877296d4 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -19,6 +19,7 @@
#include <asm/procinfo.h>
#include <asm/ptrace.h>
#include <asm/constants.h>
+#include <asm/thread_info.h>
#include <asm/system.h>
#define PROCINFO_MMUFLAGS 8
@@ -131,7 +132,7 @@ __switch_data:
.long processor_id @ r4
.long __machine_arch_type @ r5
.long cr_alignment @ r6
- .long init_thread_union+8192 @ sp
+ .long init_thread_union + THREAD_START_SP @ sp
/*
* The following fragment of code is executed with the MMU on, and uses
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 26eacd3e5def..8f146a4b4752 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -256,8 +256,6 @@ static unsigned long *thread_info_head;
static unsigned int nr_thread_info;
#define EXTRA_TASK_STRUCT 4
-#define ll_alloc_task_struct() ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
-#define ll_free_task_struct(p) free_pages((unsigned long)(p),1)
struct thread_info *alloc_thread_info(struct task_struct *task)
{
@@ -274,17 +272,16 @@ struct thread_info *alloc_thread_info(struct task_struct *task)
}
if (!thread)
- thread = ll_alloc_task_struct();
+ thread = (struct thread_info *)
+ __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
-#ifdef CONFIG_MAGIC_SYSRQ
+#ifdef CONFIG_DEBUG_STACK_USAGE
/*
* The stack must be cleared if you want SYSRQ-T to
* give sensible stack usage information
*/
- if (thread) {
- char *p = (char *)thread;
- memzero(p+KERNEL_STACK_SIZE, KERNEL_STACK_SIZE);
- }
+ if (thread)
+ memzero(thread, THREAD_SIZE);
#endif
return thread;
}
@@ -297,7 +294,7 @@ void free_thread_info(struct thread_info *thread)
thread_info_head = p;
nr_thread_info += 1;
} else
- ll_free_task_struct(thread);
+ free_pages((unsigned long)thread, THREAD_SIZE_ORDER);
}
/*
@@ -350,7 +347,7 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long stack_start,
struct thread_info *thread = p->thread_info;
struct pt_regs *childregs;
- childregs = ((struct pt_regs *)((unsigned long)thread + THREAD_SIZE - 8)) - 1;
+ childregs = ((struct pt_regs *)((unsigned long)thread + THREAD_START_SP)) - 1;
*childregs = *regs;
childregs->ARM_r0 = 0;
childregs->ARM_sp = stack_start;
@@ -447,15 +444,17 @@ EXPORT_SYMBOL(kernel_thread);
unsigned long get_wchan(struct task_struct *p)
{
unsigned long fp, lr;
- unsigned long stack_page;
+ unsigned long stack_start, stack_end;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
- stack_page = 4096 + (unsigned long)p->thread_info;
+ stack_start = (unsigned long)(p->thread_info + 1);
+ stack_end = ((unsigned long)p->thread_info) + THREAD_SIZE;
+
fp = thread_saved_fp(p);
do {
- if (fp < stack_page || fp > 4092+stack_page)
+ if (fp < stack_start || fp > stack_end)
return 0;
lr = pc_pointer (((unsigned long *)fp)[-1]);
if (!in_sched_functions(lr))
diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c
index ef32577da304..f897ce2ccf0d 100644
--- a/arch/arm/kernel/sys_arm.c
+++ b/arch/arm/kernel/sys_arm.c
@@ -302,7 +302,7 @@ long execve(const char *filename, char **argv, char **envp)
"b ret_to_user"
:
: "r" (current_thread_info()),
- "Ir" (THREAD_SIZE - 8 - sizeof(regs)),
+ "Ir" (THREAD_START_SP - sizeof(regs)),
"r" (&regs),
"Ir" (sizeof(regs))
: "r0", "r1", "r2", "r3", "ip", "memory");
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 3a001fe5540b..45d2a032d890 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -218,7 +218,8 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
tsk->comm, tsk->pid, tsk->thread_info + 1);
if (!user_mode(regs) || in_interrupt()) {
- dump_mem("Stack: ", regs->ARM_sp, 8192+(unsigned long)tsk->thread_info);
+ dump_mem("Stack: ", regs->ARM_sp,
+ THREAD_SIZE + (unsigned long)tsk->thread_info);
dump_backtrace(regs, tsk);
dump_instr(regs);
}
@@ -450,9 +451,9 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
case NR(set_tls):
thread->tp_value = regs->ARM_r0;
-#ifdef CONFIG_HAS_TLS_REG
+#if defined(CONFIG_HAS_TLS_REG)
asm ("mcr p15, 0, %0, c13, c0, 3" : : "r" (regs->ARM_r0) );
-#else
+#elif !defined(CONFIG_TLS_REG_EMUL)
/*
* User space must never try to access this directly.
* Expect your app to break eventually if you do so.
@@ -463,6 +464,55 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
#endif
return 0;
+#ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG
+ /*
+ * Atomically store r1 in *r2 if *r2 is equal to r0 for user space.
+ * Return zero in r0 if *MEM was changed or non-zero if no exchange
+ * happened. Also set the user C flag accordingly.
+ * If access permissions have to be fixed up then non-zero is
+ * returned and the operation has to be re-attempted.
+ *
+ * *NOTE*: This is a ghost syscall private to the kernel. Only the
+ * __kuser_cmpxchg code in entry-armv.S should be aware of its
+ * existence. Don't ever use this from user code.
+ */
+ case 0xfff0:
+ {
+ extern void do_DataAbort(unsigned long addr, unsigned int fsr,
+ struct pt_regs *regs);
+ unsigned long val;
+ unsigned long addr = regs->ARM_r2;
+ struct mm_struct *mm = current->mm;
+ pgd_t *pgd; pmd_t *pmd; pte_t *pte;
+
+ regs->ARM_cpsr &= ~PSR_C_BIT;
+ spin_lock(&mm->page_table_lock);
+ pgd = pgd_offset(mm, addr);
+ if (!pgd_present(*pgd))
+ goto bad_access;
+ pmd = pmd_offset(pgd, addr);
+ if (!pmd_present(*pmd))
+ goto bad_access;
+ pte = pte_offset_map(pmd, addr);
+ if (!pte_present(*pte) || !pte_write(*pte))
+ goto bad_access;
+ val = *(unsigned long *)addr;
+ val -= regs->ARM_r0;
+ if (val == 0) {
+ *(unsigned long *)addr = regs->ARM_r1;
+ regs->ARM_cpsr |= PSR_C_BIT;
+ }
+ spin_unlock(&mm->page_table_lock);
+ return val;
+
+ bad_access:
+ spin_unlock(&mm->page_table_lock);
+ /* simulate a read access fault */
+ do_DataAbort(addr, 15 + (1 << 11), regs);
+ return -1;
+ }
+#endif
+
default:
/* Calls 9f00xx..9f07ff are defined to return -ENOSYS
if not implemented, rather than raising SIGILL. This
@@ -497,11 +547,14 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
return 0;
}
-#if defined(CONFIG_CPU_32v6) && !defined(CONFIG_HAS_TLS_REG)
+#ifdef CONFIG_TLS_REG_EMUL
/*
* We might be running on an ARMv6+ processor which should have the TLS
- * register, but for some reason we can't use it and have to emulate it.
+ * register but for some reason we can't use it, or maybe an SMP system
+ * using a pre-ARMv6 processor (there are apparently a few prototypes like
+ * that in existence) and therefore access to that register must be
+ * emulated.
*/
static int get_tp_trap(struct pt_regs *regs, unsigned int instr)
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index a39c6a42d68a..ad2d66c93a5c 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -5,6 +5,7 @@
#include <asm-generic/vmlinux.lds.h>
#include <linux/config.h>
+#include <asm/thread_info.h>
OUTPUT_ARCH(arm)
ENTRY(stext)
@@ -103,7 +104,7 @@ SECTIONS
__data_loc = ALIGN(4); /* location in binary */
. = DATAADDR;
#else
- . = ALIGN(8192);
+ . = ALIGN(THREAD_SIZE);
__data_loc = .;
#endif