aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/entry-armv.S213
-rw-r--r--arch/arm/kernel/ptrace.c5
-rw-r--r--arch/arm/kernel/sys_arm.c12
-rw-r--r--arch/arm/kernel/traps.c58
4 files changed, 265 insertions, 23 deletions
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 2a5c3fe09a95..080df907f242 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -269,6 +269,12 @@ __pabt_svc:
add r5, sp, #S_PC
ldmia r7, {r2 - r4} @ Get USR pc, cpsr
+#if __LINUX_ARM_ARCH__ < 6
+ @ make sure our user space atomic helper is aborted
+ cmp r2, #VIRT_OFFSET
+ bichs r3, r3, #PSR_Z_BIT
+#endif
+
@
@ We are now ready to fill in the remaining blanks on the stack:
@
@@ -499,8 +505,12 @@ ENTRY(__switch_to)
mra r4, r5, acc0
stmia ip, {r4, r5}
#endif
+#ifdef CONFIG_HAS_TLS_REG
+ mcr p15, 0, r3, c13, c0, 3 @ set TLS register
+#else
mov r4, #0xffff0fff
- str r3, [r4, #-3] @ Set TLS ptr
+ str r3, [r4, #-15] @ TLS val at 0xffff0ff0
+#endif
mcr p15, 0, r6, c3, c0, 0 @ Set domain register
#ifdef CONFIG_VFP
@ Always disable VFP so we can lazily save/restore the old
@@ -519,6 +529,207 @@ ENTRY(__switch_to)
ldmib r2, {r4 - sl, fp, sp, pc} @ Load all regs saved previously
__INIT
+
+/*
+ * User helpers.
+ *
+ * These are segment of kernel provided user code reachable from user space
+ * at a fixed address in kernel memory. This is used to provide user space
+ * with some operations which require kernel help because of unimplemented
+ * native feature and/or instructions in many ARM CPUs. The idea is for
+ * this code to be executed directly in user mode for best efficiency but
+ * which is too intimate with the kernel counter part to be left to user
+ * libraries. In fact this code might even differ from one CPU to another
+ * depending on the available instruction set and restrictions like on
+ * SMP systems. In other words, the kernel reserves the right to change
+ * this code as needed without warning. Only the entry points and their
+ * results are guaranteed to be stable.
+ *
+ * Each segment is 32-byte aligned and will be moved to the top of the high
+ * vector page. New segments (if ever needed) must be added in front of
+ * existing ones. This mechanism should be used only for things that are
+ * really small and justified, and not be abused freely.
+ *
+ * User space is expected to implement those things inline when optimizing
+ * for a processor that has the necessary native support, but only if such
+ * resulting binaries are already to be incompatible with earlier ARM
+ * processors due to the use of unsupported instructions other than what
+ * is provided here. In other words don't make binaries unable to run on
+ * earlier processors just for the sake of not using these kernel helpers
+ * if your compiled code is not going to use the new instructions for other
+ * purpose.
+ */
+
+ .align 5
+ .globl __kuser_helper_start
+__kuser_helper_start:
+
+/*
+ * Reference prototype:
+ *
+ * int __kernel_cmpxchg(int oldval, int newval, int *ptr)
+ *
+ * Input:
+ *
+ * r0 = oldval
+ * r1 = newval
+ * r2 = ptr
+ * lr = return address
+ *
+ * Output:
+ *
+ * r0 = returned value (zero or non-zero)
+ * C flag = set if r0 == 0, clear if r0 != 0
+ *
+ * Clobbered:
+ *
+ * r3, ip, flags
+ *
+ * Definition and user space usage example:
+ *
+ * typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
+ * #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
+ *
+ * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
+ * Return zero if *ptr was changed or non-zero if no exchange happened.
+ * The C flag is also set if *ptr was changed to allow for assembly
+ * optimization in the calling code.
+ *
+ * For example, a user space atomic_add implementation could look like this:
+ *
+ * #define atomic_add(ptr, val) \
+ * ({ register unsigned int *__ptr asm("r2") = (ptr); \
+ * register unsigned int __result asm("r1"); \
+ * asm volatile ( \
+ * "1: @ atomic_add\n\t" \
+ * "ldr r0, [r2]\n\t" \
+ * "mov r3, #0xffff0fff\n\t" \
+ * "add lr, pc, #4\n\t" \
+ * "add r1, r0, %2\n\t" \
+ * "add pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \
+ * "bcc 1b" \
+ * : "=&r" (__result) \
+ * : "r" (__ptr), "rIL" (val) \
+ * : "r0","r3","ip","lr","cc","memory" ); \
+ * __result; })
+ */
+
+__kuser_cmpxchg: @ 0xffff0fc0
+
+#if __LINUX_ARM_ARCH__ < 6
+
+#ifdef CONFIG_SMP /* sanity check */
+#error "CONFIG_SMP on a machine supporting pre-ARMv6 processors?"
+#endif
+
+ /*
+ * Theory of operation:
+ *
+ * We set the Z flag before loading oldval. If ever an exception
+ * occurs we can not be sure the loaded value will still be the same
+ * when the exception returns, therefore the user exception handler
+ * will clear the Z flag whenever the interrupted user code was
+ * actually from the kernel address space (see the usr_entry macro).
+ *
+ * The post-increment on the str is used to prevent a race with an
+ * exception happening just after the str instruction which would
+ * clear the Z flag although the exchange was done.
+ */
+ teq ip, ip @ set Z flag
+ ldr ip, [r2] @ load current val
+ add r3, r2, #1 @ prepare store ptr
+ teqeq ip, r0 @ compare with oldval if still allowed
+ streq r1, [r3, #-1]! @ store newval if still allowed
+ subs r0, r2, r3 @ if r2 == r3 the str occured
+ mov pc, lr
+
+#else
+
+ ldrex r3, [r2]
+ subs r3, r3, r0
+ strexeq r3, r1, [r2]
+ rsbs r0, r3, #0
+ mov pc, lr
+
+#endif
+
+ .align 5
+
+/*
+ * Reference prototype:
+ *
+ * int __kernel_get_tls(void)
+ *
+ * Input:
+ *
+ * lr = return address
+ *
+ * Output:
+ *
+ * r0 = TLS value
+ *
+ * Clobbered:
+ *
+ * the Z flag might be lost
+ *
+ * Definition and user space usage example:
+ *
+ * typedef int (__kernel_get_tls_t)(void);
+ * #define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0)
+ *
+ * Get the TLS value as previously set via the __ARM_NR_set_tls syscall.
+ *
+ * This could be used as follows:
+ *
+ * #define __kernel_get_tls() \
+ * ({ register unsigned int __val asm("r0"); \
+ * asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \
+ * : "=r" (__val) : : "lr","cc" ); \
+ * __val; })
+ */
+
+__kuser_get_tls: @ 0xffff0fe0
+
+#ifndef CONFIG_HAS_TLS_REG
+
+#ifdef CONFIG_SMP /* sanity check */
+#error "CONFIG_SMP without CONFIG_HAS_TLS_REG is wrong"
+#endif
+
+ ldr r0, [pc, #(16 - 8)] @ TLS stored at 0xffff0ff0
+ mov pc, lr
+
+#else
+
+ mrc p15, 0, r0, c13, c0, 3 @ read TLS register
+ mov pc, lr
+
+#endif
+
+ .rep 5
+ .word 0 @ pad up to __kuser_helper_version
+ .endr
+
+/*
+ * Reference declaration:
+ *
+ * extern unsigned int __kernel_helper_version;
+ *
+ * Definition and user space usage example:
+ *
+ * #define __kernel_helper_version (*(unsigned int *)0xffff0ffc)
+ *
+ * User space may read this to determine the curent number of helpers
+ * available.
+ */
+
+__kuser_helper_version: @ 0xffff0ffc
+ .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
+
+ .globl __kuser_helper_end
+__kuser_helper_end:
+
+
/*
* Vector stubs.
*
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index efd7a341614b..cd99b83f14c2 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -19,6 +19,7 @@
#include <linux/user.h>
#include <linux/security.h>
#include <linux/init.h>
+#include <linux/signal.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
@@ -693,7 +694,7 @@ static int do_ptrace(int request, struct task_struct *child, long addr, long dat
case PTRACE_SYSCALL:
case PTRACE_CONT:
ret = -EIO;
- if ((unsigned long) data > _NSIG)
+ if (!valid_signal(data))
break;
if (request == PTRACE_SYSCALL)
set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
@@ -728,7 +729,7 @@ static int do_ptrace(int request, struct task_struct *child, long addr, long dat
*/
case PTRACE_SINGLESTEP:
ret = -EIO;
- if ((unsigned long) data > _NSIG)
+ if (!valid_signal(data))
break;
child->ptrace |= PT_SINGLESTEP;
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c
index 7ba6342cf93d..ef32577da304 100644
--- a/arch/arm/kernel/sys_arm.c
+++ b/arch/arm/kernel/sys_arm.c
@@ -227,18 +227,6 @@ asmlinkage int sys_ipc(uint call, int first, int second, int third,
}
}
-asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg,
- unsigned long __user *addr)
-{
- unsigned long ret;
- long err;
-
- err = do_shmat(shmid, shmaddr, shmflg, &ret);
- if (err == 0)
- err = put_user(ret, addr);
- return err;
-}
-
/* Fork a new task - this creates a new program thread.
* This is called indirectly via a small wrapper
*/
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 0078aeb85737..3a001fe5540b 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -450,13 +450,17 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
case NR(set_tls):
thread->tp_value = regs->ARM_r0;
+#ifdef CONFIG_HAS_TLS_REG
+ asm ("mcr p15, 0, %0, c13, c0, 3" : : "r" (regs->ARM_r0) );
+#else
/*
- * Our user accessible TLS ptr is located at 0xffff0ffc.
- * On SMP read access to this address must raise a fault
- * and be emulated from the data abort handler.
- * m
+ * User space must never try to access this directly.
+ * Expect your app to break eventually if you do so.
+ * The user helper at 0xffff0fe0 must be used instead.
+ * (see entry-armv.S for details)
*/
- *((unsigned long *)0xffff0ffc) = thread->tp_value;
+ *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
+#endif
return 0;
default:
@@ -493,6 +497,41 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
return 0;
}
+#if defined(CONFIG_CPU_32v6) && !defined(CONFIG_HAS_TLS_REG)
+
+/*
+ * We might be running on an ARMv6+ processor which should have the TLS
+ * register, but for some reason we can't use it and have to emulate it.
+ */
+
+static int get_tp_trap(struct pt_regs *regs, unsigned int instr)
+{
+ int reg = (instr >> 12) & 15;
+ if (reg == 15)
+ return 1;
+ regs->uregs[reg] = current_thread_info()->tp_value;
+ regs->ARM_pc += 4;
+ return 0;
+}
+
+static struct undef_hook arm_mrc_hook = {
+ .instr_mask = 0x0fff0fff,
+ .instr_val = 0x0e1d0f70,
+ .cpsr_mask = PSR_T_BIT,
+ .cpsr_val = 0,
+ .fn = get_tp_trap,
+};
+
+static int __init arm_mrc_hook_init(void)
+{
+ register_undef_hook(&arm_mrc_hook);
+ return 0;
+}
+
+late_initcall(arm_mrc_hook_init);
+
+#endif
+
void __bad_xchg(volatile void *ptr, int size)
{
printk("xchg: bad data size: pc 0x%p, ptr 0x%p, size %d\n",
@@ -580,14 +619,17 @@ void __init trap_init(void)
{
extern char __stubs_start[], __stubs_end[];
extern char __vectors_start[], __vectors_end[];
+ extern char __kuser_helper_start[], __kuser_helper_end[];
+ int kuser_sz = __kuser_helper_end - __kuser_helper_start;
/*
- * Copy the vectors and stubs (in entry-armv.S) into the
- * vector page, mapped at 0xffff0000, and ensure these are
- * visible to the instruction stream.
+ * Copy the vectors, stubs and kuser helpers (in entry-armv.S)
+ * into the vector page, mapped at 0xffff0000, and ensure these
+ * are visible to the instruction stream.
*/
memcpy((void *)0xffff0000, __vectors_start, __vectors_end - __vectors_start);
memcpy((void *)0xffff0200, __stubs_start, __stubs_end - __stubs_start);
+ memcpy((void *)0xffff1000 - kuser_sz, __kuser_helper_start, kuser_sz);
flush_icache_range(0xffff0000, 0xffff0000 + PAGE_SIZE);
modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
}