aboutsummaryrefslogtreecommitdiffstats
path: root/arch/parisc/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/parisc/include/asm')
-rw-r--r--arch/parisc/include/asm/assembly.h10
-rw-r--r--arch/parisc/include/asm/futex.h59
-rw-r--r--arch/parisc/include/asm/io.h6
-rw-r--r--arch/parisc/include/asm/special_insns.h44
-rw-r--r--arch/parisc/include/asm/uaccess.h12
5 files changed, 82 insertions, 49 deletions
diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h
index 6d13ae236fcb..6369082c6c74 100644
--- a/arch/parisc/include/asm/assembly.h
+++ b/arch/parisc/include/asm/assembly.h
@@ -158,6 +158,16 @@
#endif
.endm
+ /* The depi instruction leaves the most significant 32 bits of the
+ * target register in an undefined state on PA 2.0 systems. */
+ .macro depi_safe i, p, len, t
+#ifdef CONFIG_64BIT
+ depdi \i, 32+(\p), \len, \t
+#else
+ depi \i, \p, \len, \t
+#endif
+ .endm
+
/* load 32-bit 'value' into 'reg' compensating for the ldil
* sign-extension when running in wide mode.
* WARNING!! neither 'value' nor 'reg' can be expressions
diff --git a/arch/parisc/include/asm/futex.h b/arch/parisc/include/asm/futex.h
index 9cd4dd6e63ad..b5835325d44b 100644
--- a/arch/parisc/include/asm/futex.h
+++ b/arch/parisc/include/asm/futex.h
@@ -8,39 +8,47 @@
#include <asm/errno.h>
/* The following has to match the LWS code in syscall.S. We have
- sixteen four-word locks. */
+ * 256 four-word locks. We use bits 20-27 of the futex virtual
+ * address for the hash index.
+ */
+
+static inline unsigned long _futex_hash_index(unsigned long ua)
+{
+ return (ua >> 2) & 0x3fc;
+}
static inline void
-_futex_spin_lock(u32 __user *uaddr)
+_futex_spin_lock_irqsave(arch_spinlock_t *s, unsigned long *flags)
{
- extern u32 lws_lock_start[];
- long index = ((long)uaddr & 0x7f8) >> 1;
- arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index];
- preempt_disable();
+ local_irq_save(*flags);
arch_spin_lock(s);
}
static inline void
-_futex_spin_unlock(u32 __user *uaddr)
+_futex_spin_unlock_irqrestore(arch_spinlock_t *s, unsigned long *flags)
{
- extern u32 lws_lock_start[];
- long index = ((long)uaddr & 0x7f8) >> 1;
- arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index];
arch_spin_unlock(s);
- preempt_enable();
+ local_irq_restore(*flags);
}
static inline int
arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
{
+ extern u32 lws_lock_start[];
+ unsigned long ua = (unsigned long)uaddr;
+ arch_spinlock_t *s;
+ unsigned long flags;
int oldval, ret;
u32 tmp;
- ret = -EFAULT;
+ s = (arch_spinlock_t *)&lws_lock_start[_futex_hash_index(ua)];
+ _futex_spin_lock_irqsave(s, &flags);
- _futex_spin_lock(uaddr);
- if (unlikely(get_user(oldval, uaddr) != 0))
+ /* Return -EFAULT if we encounter a page fault or COW break */
+ if (unlikely(get_user(oldval, uaddr) != 0)) {
+ ret = -EFAULT;
goto out_pagefault_enable;
+ }
ret = 0;
tmp = oldval;
@@ -63,13 +71,14 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
break;
default:
ret = -ENOSYS;
+ goto out_pagefault_enable;
}
- if (ret == 0 && unlikely(put_user(tmp, uaddr) != 0))
+ if (unlikely(put_user(tmp, uaddr) != 0))
ret = -EFAULT;
out_pagefault_enable:
- _futex_spin_unlock(uaddr);
+ _futex_spin_unlock_irqrestore(s, &flags);
if (!ret)
*oval = oldval;
@@ -81,7 +90,11 @@ static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
+ extern u32 lws_lock_start[];
+ unsigned long ua = (unsigned long)uaddr;
+ arch_spinlock_t *s;
u32 val;
+ unsigned long flags;
/* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is
* our gateway page, and causes no end of trouble...
@@ -94,23 +107,25 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
/* HPPA has no cmpxchg in hardware and therefore the
* best we can do here is use an array of locks. The
- * lock selected is based on a hash of the userspace
- * address. This should scale to a couple of CPUs.
+ * lock selected is based on a hash of the virtual
+ * address of the futex. This should scale to a couple
+ * of CPUs.
*/
- _futex_spin_lock(uaddr);
+ s = (arch_spinlock_t *)&lws_lock_start[_futex_hash_index(ua)];
+ _futex_spin_lock_irqsave(s, &flags);
if (unlikely(get_user(val, uaddr) != 0)) {
- _futex_spin_unlock(uaddr);
+ _futex_spin_unlock_irqrestore(s, &flags);
return -EFAULT;
}
if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) {
- _futex_spin_unlock(uaddr);
+ _futex_spin_unlock_irqrestore(s, &flags);
return -EFAULT;
}
*uval = val;
- _futex_spin_unlock(uaddr);
+ _futex_spin_unlock_irqrestore(s, &flags);
return 0;
}
diff --git a/arch/parisc/include/asm/io.h b/arch/parisc/include/asm/io.h
index 0b5259102319..837ddddbac6a 100644
--- a/arch/parisc/include/asm/io.h
+++ b/arch/parisc/include/asm/io.h
@@ -273,9 +273,9 @@ static inline int inl(unsigned long addr)
return -1;
}
-#define outb(x, y) BUG()
-#define outw(x, y) BUG()
-#define outl(x, y) BUG()
+#define outb(x, y) ({(void)(x); (void)(y); BUG(); 0;})
+#define outw(x, y) ({(void)(x); (void)(y); BUG(); 0;})
+#define outl(x, y) ({(void)(x); (void)(y); BUG(); 0;})
#endif
/*
diff --git a/arch/parisc/include/asm/special_insns.h b/arch/parisc/include/asm/special_insns.h
index a303ae9a77f4..16ee41e77174 100644
--- a/arch/parisc/include/asm/special_insns.h
+++ b/arch/parisc/include/asm/special_insns.h
@@ -2,28 +2,32 @@
#ifndef __PARISC_SPECIAL_INSNS_H
#define __PARISC_SPECIAL_INSNS_H
-#define lpa(va) ({ \
- unsigned long pa; \
- __asm__ __volatile__( \
- "copy %%r0,%0\n\t" \
- "lpa %%r0(%1),%0" \
- : "=r" (pa) \
- : "r" (va) \
- : "memory" \
- ); \
- pa; \
+#define lpa(va) ({ \
+ unsigned long pa; \
+ __asm__ __volatile__( \
+ "copy %%r0,%0\n" \
+ "8:\tlpa %%r0(%1),%0\n" \
+ "9:\n" \
+ ASM_EXCEPTIONTABLE_ENTRY(8b, 9b) \
+ : "=&r" (pa) \
+ : "r" (va) \
+ : "memory" \
+ ); \
+ pa; \
})
-#define lpa_user(va) ({ \
- unsigned long pa; \
- __asm__ __volatile__( \
- "copy %%r0,%0\n\t" \
- "lpa %%r0(%%sr3,%1),%0" \
- : "=r" (pa) \
- : "r" (va) \
- : "memory" \
- ); \
- pa; \
+#define lpa_user(va) ({ \
+ unsigned long pa; \
+ __asm__ __volatile__( \
+ "copy %%r0,%0\n" \
+ "8:\tlpa %%r0(%%sr3,%1),%0\n" \
+ "9:\n" \
+ ASM_EXCEPTIONTABLE_ENTRY(8b, 9b) \
+ : "=&r" (pa) \
+ : "r" (va) \
+ : "memory" \
+ ); \
+ pa; \
})
#define mfctl(reg) ({ \
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index 192ad9e11b25..ebf8a845b017 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -53,15 +53,18 @@ struct exception_table_entry {
/*
* ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry
* (with lowest bit set) for which the fault handler in fixup_exception() will
- * load -EFAULT into %r8 for a read or write fault, and zeroes the target
+ * load -EFAULT into %r29 for a read or write fault, and zeroes the target
* register in case of a read fault in get_user().
*/
+#define ASM_EXCEPTIONTABLE_REG 29
+#define ASM_EXCEPTIONTABLE_VAR(__variable) \
+ register long __variable __asm__ ("r29") = 0
#define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\
ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1)
#define __get_user_internal(sr, val, ptr) \
({ \
- register long __gu_err __asm__ ("r8") = 0; \
+ ASM_EXCEPTIONTABLE_VAR(__gu_err); \
\
switch (sizeof(*(ptr))) { \
case 1: __get_user_asm(sr, val, "ldb", ptr); break; \
@@ -131,7 +134,7 @@ struct exception_table_entry {
#define __put_user_internal(sr, x, ptr) \
({ \
- register long __pu_err __asm__ ("r8") = 0; \
+ ASM_EXCEPTIONTABLE_VAR(__pu_err); \
__typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \
\
switch (sizeof(*(ptr))) { \
@@ -168,7 +171,8 @@ struct exception_table_entry {
* gcc knows about, so there are no aliasing issues. These macros must
* also be aware that fixups are executed in the context of the fault,
* and any registers used there must be listed as clobbers.
- * r8 is already listed as err.
+ * The register holding the possible EFAULT error (ASM_EXCEPTIONTABLE_REG)
+ * is already listed as input and output register.
*/
#define __put_user_asm(sr, stx, x, ptr) \