aboutsummaryrefslogtreecommitdiffstats
path: root/arch/parisc/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/parisc/include/asm')
-rw-r--r--arch/parisc/include/asm/cmpxchg.h9
-rw-r--r--arch/parisc/include/asm/eisa_eeprom.h4
-rw-r--r--arch/parisc/include/asm/ftrace.h2
-rw-r--r--arch/parisc/include/asm/futex.h70
-rw-r--r--arch/parisc/include/asm/ldcw.h2
-rw-r--r--arch/parisc/include/asm/syscall.h9
-rw-r--r--arch/parisc/include/asm/thread_info.h4
-rw-r--r--arch/parisc/include/asm/traps.h2
-rw-r--r--arch/parisc/include/asm/uaccess.h91
9 files changed, 87 insertions, 106 deletions
diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h
index 0a90b965cccb..7ada30900807 100644
--- a/arch/parisc/include/asm/cmpxchg.h
+++ b/arch/parisc/include/asm/cmpxchg.h
@@ -52,8 +52,7 @@ extern void __cmpxchg_called_with_bad_pointer(void);
/* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */
extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old,
unsigned int new_);
-extern unsigned long __cmpxchg_u64(volatile unsigned long *ptr,
- unsigned long old, unsigned long new_);
+extern u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new_);
/* don't worry...optimizer will get rid of most of this */
static inline unsigned long
@@ -61,7 +60,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
{
switch (size) {
#ifdef CONFIG_64BIT
- case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_);
+ case 8: return __cmpxchg_u64((u64 *)ptr, old, new_);
#endif
case 4: return __cmpxchg_u32((unsigned int *)ptr,
(unsigned int)old, (unsigned int)new_);
@@ -86,7 +85,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
{
switch (size) {
#ifdef CONFIG_64BIT
- case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_);
+ case 8: return __cmpxchg_u64((u64 *)ptr, old, new_);
#endif
case 4: return __cmpxchg_u32(ptr, old, new_);
default:
@@ -111,4 +110,6 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
#endif
+#define cmpxchg64(ptr, o, n) __cmpxchg_u64(ptr, o, n)
+
#endif /* _ASM_PARISC_CMPXCHG_H_ */
diff --git a/arch/parisc/include/asm/eisa_eeprom.h b/arch/parisc/include/asm/eisa_eeprom.h
index 8ce8b85ca588..5637ac962f8e 100644
--- a/arch/parisc/include/asm/eisa_eeprom.h
+++ b/arch/parisc/include/asm/eisa_eeprom.h
@@ -99,7 +99,7 @@ struct eeprom_eisa_slot_info
#define HPEE_MEMORY_DECODE_24BITS 0x04
#define HPEE_MEMORY_DECODE_32BITS 0x08
/* byte 2 and 3 are a 16bit LE value
- * containging the memory size in kilobytes */
+ * containing the memory size in kilobytes */
/* byte 4,5,6 are a 24bit LE value
* containing the memory base address */
@@ -135,7 +135,7 @@ struct eeprom_eisa_slot_info
#define HPEE_PORT_SHARED 0x40
#define HPEE_PORT_MORE 0x80
/* byte 1 and 2 is a 16bit LE value
- * conating the start port number */
+ * containing the start port number */
#define HPEE_PORT_INIT_MAX_LEN 60 /* in bytes here */
/* port init entry byte 0 */
diff --git a/arch/parisc/include/asm/ftrace.h b/arch/parisc/include/asm/ftrace.h
index 24cd81d58d70..d635c6b0269d 100644
--- a/arch/parisc/include/asm/ftrace.h
+++ b/arch/parisc/include/asm/ftrace.h
@@ -6,6 +6,8 @@ extern void mcount(void);
#define MCOUNT_INSN_SIZE 4
+extern unsigned long sys_call_table[];
+
extern unsigned long return_address(unsigned int);
#define ftrace_return_address(n) return_address(n)
diff --git a/arch/parisc/include/asm/futex.h b/arch/parisc/include/asm/futex.h
index 49df14805a9b..ac8bd586ace8 100644
--- a/arch/parisc/include/asm/futex.h
+++ b/arch/parisc/include/asm/futex.h
@@ -35,70 +35,57 @@ static inline int
futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
{
unsigned long int flags;
- u32 val;
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
int oparg = (encoded_op << 8) >> 20;
int cmparg = (encoded_op << 20) >> 20;
- int oldval = 0, ret;
+ int oldval, ret;
+ u32 tmp;
+
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(*uaddr)))
return -EFAULT;
+ _futex_spin_lock_irqsave(uaddr, &flags);
pagefault_disable();
- _futex_spin_lock_irqsave(uaddr, &flags);
+ ret = -EFAULT;
+ if (unlikely(get_user(oldval, uaddr) != 0))
+ goto out_pagefault_enable;
+
+ ret = 0;
+ tmp = oldval;
switch (op) {
case FUTEX_OP_SET:
- /* *(int *)UADDR2 = OPARG; */
- ret = get_user(oldval, uaddr);
- if (!ret)
- ret = put_user(oparg, uaddr);
+ tmp = oparg;
break;
case FUTEX_OP_ADD:
- /* *(int *)UADDR2 += OPARG; */
- ret = get_user(oldval, uaddr);
- if (!ret) {
- val = oldval + oparg;
- ret = put_user(val, uaddr);
- }
+ tmp += oparg;
break;
case FUTEX_OP_OR:
- /* *(int *)UADDR2 |= OPARG; */
- ret = get_user(oldval, uaddr);
- if (!ret) {
- val = oldval | oparg;
- ret = put_user(val, uaddr);
- }
+ tmp |= oparg;
break;
case FUTEX_OP_ANDN:
- /* *(int *)UADDR2 &= ~OPARG; */
- ret = get_user(oldval, uaddr);
- if (!ret) {
- val = oldval & ~oparg;
- ret = put_user(val, uaddr);
- }
+ tmp &= ~oparg;
break;
case FUTEX_OP_XOR:
- /* *(int *)UADDR2 ^= OPARG; */
- ret = get_user(oldval, uaddr);
- if (!ret) {
- val = oldval ^ oparg;
- ret = put_user(val, uaddr);
- }
+ tmp ^= oparg;
break;
default:
ret = -ENOSYS;
}
- _futex_spin_unlock_irqrestore(uaddr, &flags);
+ if (ret == 0 && unlikely(put_user(tmp, uaddr) != 0))
+ ret = -EFAULT;
+out_pagefault_enable:
pagefault_enable();
+ _futex_spin_unlock_irqrestore(uaddr, &flags);
- if (!ret) {
+ if (ret == 0) {
switch (cmp) {
case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
@@ -112,12 +99,10 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
return ret;
}
-/* Non-atomic version */
static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
- int ret;
u32 val;
unsigned long flags;
@@ -137,17 +122,20 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
*/
_futex_spin_lock_irqsave(uaddr, &flags);
+ if (unlikely(get_user(val, uaddr) != 0)) {
+ _futex_spin_unlock_irqrestore(uaddr, &flags);
+ return -EFAULT;
+ }
- ret = get_user(val, uaddr);
-
- if (!ret && val == oldval)
- ret = put_user(newval, uaddr);
+ if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) {
+ _futex_spin_unlock_irqrestore(uaddr, &flags);
+ return -EFAULT;
+ }
*uval = val;
-
_futex_spin_unlock_irqrestore(uaddr, &flags);
- return ret;
+ return 0;
}
#endif /*__KERNEL__*/
diff --git a/arch/parisc/include/asm/ldcw.h b/arch/parisc/include/asm/ldcw.h
index 8121aa6db2ff..8be707e1b6c7 100644
--- a/arch/parisc/include/asm/ldcw.h
+++ b/arch/parisc/include/asm/ldcw.h
@@ -40,7 +40,7 @@
memory to indicate to the compiler that the assembly code reads
or writes to items other than those listed in the input and output
operands. This may pessimize the code somewhat but __ldcw is
- usually used within code blocks surrounded by memory barriors. */
+ usually used within code blocks surrounded by memory barriers. */
#define __ldcw(a) ({ \
unsigned __ret; \
__asm__ __volatile__(__LDCW " 0(%1),%0" \
diff --git a/arch/parisc/include/asm/syscall.h b/arch/parisc/include/asm/syscall.h
index 637ce8d6f375..5e0b4e6bd99d 100644
--- a/arch/parisc/include/asm/syscall.h
+++ b/arch/parisc/include/asm/syscall.h
@@ -8,6 +8,8 @@
#include <linux/err.h>
#include <asm/ptrace.h>
+#define NR_syscalls (__NR_Linux_syscalls)
+
static inline long syscall_get_nr(struct task_struct *tsk,
struct pt_regs *regs)
{
@@ -33,12 +35,19 @@ static inline void syscall_get_arguments(struct task_struct *tsk,
args[1] = regs->gr[25];
case 1:
args[0] = regs->gr[26];
+ case 0:
break;
default:
BUG();
}
}
+static inline long syscall_get_return_value(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->gr[28];
+}
+
static inline void syscall_set_return_value(struct task_struct *task,
struct pt_regs *regs,
int error, long val)
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h
index e96e693fd58c..7581330ea35b 100644
--- a/arch/parisc/include/asm/thread_info.h
+++ b/arch/parisc/include/asm/thread_info.h
@@ -55,6 +55,7 @@ struct thread_info {
#define TIF_SINGLESTEP 9 /* single stepping? */
#define TIF_BLOCKSTEP 10 /* branch stepping? */
#define TIF_SECCOMP 11 /* secure computing */
+#define TIF_SYSCALL_TRACEPOINT 12 /* syscall tracepoint instrumentation */
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
@@ -66,12 +67,13 @@ struct thread_info {
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
+#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \
_TIF_NEED_RESCHED)
#define _TIF_SYSCALL_TRACE_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \
_TIF_BLOCKSTEP | _TIF_SYSCALL_AUDIT | \
- _TIF_SECCOMP)
+ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
#ifdef CONFIG_64BIT
# ifdef CONFIG_COMPAT
diff --git a/arch/parisc/include/asm/traps.h b/arch/parisc/include/asm/traps.h
index 4736020ba5ea..5e953ab4530d 100644
--- a/arch/parisc/include/asm/traps.h
+++ b/arch/parisc/include/asm/traps.h
@@ -8,6 +8,8 @@ struct pt_regs;
void parisc_terminate(char *msg, struct pt_regs *regs,
int code, unsigned long offset) __noreturn __cold;
+void die_if_kernel(char *str, struct pt_regs *regs, long err);
+
/* mm/fault.c */
void do_page_fault(struct pt_regs *regs, unsigned long code,
unsigned long address);
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index 7955e43f3f3f..0f59fd9ca205 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -40,14 +40,10 @@ static inline long access_ok(int type, const void __user * addr,
#define get_user __get_user
#if !defined(CONFIG_64BIT)
-#define LDD_KERNEL(ptr) BUILD_BUG()
-#define LDD_USER(ptr) BUILD_BUG()
-#define STD_KERNEL(x, ptr) __put_kernel_asm64(x, ptr)
+#define LDD_USER(ptr) __get_user_asm64(ptr)
#define STD_USER(x, ptr) __put_user_asm64(x, ptr)
#else
-#define LDD_KERNEL(ptr) __get_kernel_asm("ldd", ptr)
#define LDD_USER(ptr) __get_user_asm("ldd", ptr)
-#define STD_KERNEL(x, ptr) __put_kernel_asm("std", x, ptr)
#define STD_USER(x, ptr) __put_user_asm("std", x, ptr)
#endif
@@ -80,70 +76,70 @@ struct exception_data {
unsigned long fault_addr;
};
+/*
+ * load_sr2() preloads the space register %%sr2 - based on the value of
+ * get_fs() - with either a value of 0 to access kernel space (KERNEL_DS which
+ * is 0), or with the current value of %%sr3 to access user space (USER_DS)
+ * memory. The following __get_user_asm() and __put_user_asm() functions have
+ * %%sr2 hard-coded to access the requested memory.
+ */
+#define load_sr2() \
+ __asm__(" or,= %0,%%r0,%%r0\n\t" \
+ " mfsp %%sr3,%0\n\t" \
+ " mtsp %0,%%sr2\n\t" \
+ : : "r"(get_fs()) : )
+
#define __get_user(x, ptr) \
({ \
register long __gu_err __asm__ ("r8") = 0; \
register long __gu_val __asm__ ("r9") = 0; \
\
- if (segment_eq(get_fs(), KERNEL_DS)) { \
- switch (sizeof(*(ptr))) { \
- case 1: __get_kernel_asm("ldb", ptr); break; \
- case 2: __get_kernel_asm("ldh", ptr); break; \
- case 4: __get_kernel_asm("ldw", ptr); break; \
- case 8: LDD_KERNEL(ptr); break; \
- default: BUILD_BUG(); break; \
- } \
- } \
- else { \
- switch (sizeof(*(ptr))) { \
+ load_sr2(); \
+ switch (sizeof(*(ptr))) { \
case 1: __get_user_asm("ldb", ptr); break; \
case 2: __get_user_asm("ldh", ptr); break; \
case 4: __get_user_asm("ldw", ptr); break; \
case 8: LDD_USER(ptr); break; \
default: BUILD_BUG(); break; \
- } \
} \
\
(x) = (__force __typeof__(*(ptr))) __gu_val; \
__gu_err; \
})
-#define __get_kernel_asm(ldx, ptr) \
- __asm__("\n1:\t" ldx "\t0(%2),%0\n\t" \
+#define __get_user_asm(ldx, ptr) \
+ __asm__("\n1:\t" ldx "\t0(%%sr2,%2),%0\n\t" \
ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\
: "=r"(__gu_val), "=r"(__gu_err) \
: "r"(ptr), "1"(__gu_err) \
: "r1");
-#define __get_user_asm(ldx, ptr) \
- __asm__("\n1:\t" ldx "\t0(%%sr3,%2),%0\n\t" \
- ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\
- : "=r"(__gu_val), "=r"(__gu_err) \
+#if !defined(CONFIG_64BIT)
+
+#define __get_user_asm64(ptr) \
+ __asm__("\n1:\tldw 0(%%sr2,%2),%0" \
+ "\n2:\tldw 4(%%sr2,%2),%R0\n\t" \
+ ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_2)\
+ ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_get_user_skip_1)\
+ : "=r"(__gu_val), "=r"(__gu_err) \
: "r"(ptr), "1"(__gu_err) \
: "r1");
+#endif /* !defined(CONFIG_64BIT) */
+
+
#define __put_user(x, ptr) \
({ \
register long __pu_err __asm__ ("r8") = 0; \
__typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \
\
- if (segment_eq(get_fs(), KERNEL_DS)) { \
- switch (sizeof(*(ptr))) { \
- case 1: __put_kernel_asm("stb", __x, ptr); break; \
- case 2: __put_kernel_asm("sth", __x, ptr); break; \
- case 4: __put_kernel_asm("stw", __x, ptr); break; \
- case 8: STD_KERNEL(__x, ptr); break; \
- default: BUILD_BUG(); break; \
- } \
- } \
- else { \
- switch (sizeof(*(ptr))) { \
+ load_sr2(); \
+ switch (sizeof(*(ptr))) { \
case 1: __put_user_asm("stb", __x, ptr); break; \
case 2: __put_user_asm("sth", __x, ptr); break; \
case 4: __put_user_asm("stw", __x, ptr); break; \
case 8: STD_USER(__x, ptr); break; \
default: BUILD_BUG(); break; \
- } \
} \
\
__pu_err; \
@@ -159,17 +155,9 @@ struct exception_data {
* r8/r9 are already listed as err/val.
*/
-#define __put_kernel_asm(stx, x, ptr) \
- __asm__ __volatile__ ( \
- "\n1:\t" stx "\t%2,0(%1)\n\t" \
- ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_1)\
- : "=r"(__pu_err) \
- : "r"(ptr), "r"(x), "0"(__pu_err) \
- : "r1")
-
#define __put_user_asm(stx, x, ptr) \
__asm__ __volatile__ ( \
- "\n1:\t" stx "\t%2,0(%%sr3,%1)\n\t" \
+ "\n1:\t" stx "\t%2,0(%%sr2,%1)\n\t" \
ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_1)\
: "=r"(__pu_err) \
: "r"(ptr), "r"(x), "0"(__pu_err) \
@@ -178,21 +166,10 @@ struct exception_data {
#if !defined(CONFIG_64BIT)
-#define __put_kernel_asm64(__val, ptr) do { \
- __asm__ __volatile__ ( \
- "\n1:\tstw %2,0(%1)" \
- "\n2:\tstw %R2,4(%1)\n\t" \
- ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_2)\
- ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_put_user_skip_1)\
- : "=r"(__pu_err) \
- : "r"(ptr), "r"(__val), "0"(__pu_err) \
- : "r1"); \
-} while (0)
-
#define __put_user_asm64(__val, ptr) do { \
__asm__ __volatile__ ( \
- "\n1:\tstw %2,0(%%sr3,%1)" \
- "\n2:\tstw %R2,4(%%sr3,%1)\n\t" \
+ "\n1:\tstw %2,0(%%sr2,%1)" \
+ "\n2:\tstw %R2,4(%%sr2,%1)\n\t" \
ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_2)\
ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_put_user_skip_1)\
: "=r"(__pu_err) \