aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-06-11 11:41:00 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-06-11 11:41:00 -0700
commit22353f35c895acb7a8ca27ebdc6397c993b4213e (patch)
treeb1b52ca357ac2e5bb1428dea948bd4d175e40e07 /include
parentMerge git://git.kernel.org/pub/scm/linux/kernel/git/sfrench/cifs-2.6 (diff)
parent[MIPS] Fix smp barriers in test_and_{change,clear,set}_bit (diff)
downloadlinux-dev-22353f35c895acb7a8ca27ebdc6397c993b4213e.tar.xz
linux-dev-22353f35c895acb7a8ca27ebdc6397c993b4213e.zip
Merge branch 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus
* 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus: [MIPS] Fix smp barriers in test_and_{change,clear,set}_bit [MIPS] Fix IP27 build [MIPS] Fix modpost warnings by making start_secondary __cpuinit [MIPS] SMTC: Fix build error caused by nonsense code. [MIPS] SMTC: The MT ASE requires to initialize c0_pagemask and c0_wired. [MIPS] SMTC: Don't continue in set_vi_srs_handler on detected bad arguments. [MIPS] SMTC: Fix warning. [MIPS] Wire up utimensat, signalfd, timerfd, eventfd [MIPS] Atlas: Fix build. [MIPS] Always install the DSP exception handler. [MIPS] SMTC: Don't set and restore irqregs ptr from self_ipi. [MIPS] Fix KMODE for the R3000
Diffstat (limited to 'include')
-rw-r--r--include/asm-mips/bitops.h51
-rw-r--r--include/asm-mips/stackframe.h52
-rw-r--r--include/asm-mips/unistd.h24
3 files changed, 65 insertions, 62 deletions
diff --git a/include/asm-mips/bitops.h b/include/asm-mips/bitops.h
index d995413e11fd..ffe245b4258f 100644
--- a/include/asm-mips/bitops.h
+++ b/include/asm-mips/bitops.h
@@ -238,10 +238,11 @@ static inline int test_and_set_bit(unsigned long nr,
volatile unsigned long *addr)
{
unsigned short bit = nr & SZLONG_MASK;
+ unsigned long res;
if (cpu_has_llsc && R10000_LLSC_WAR) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
- unsigned long temp, res;
+ unsigned long temp;
__asm__ __volatile__(
" .set mips3 \n"
@@ -254,11 +255,9 @@ static inline int test_and_set_bit(unsigned long nr,
: "=&r" (temp), "=m" (*m), "=&r" (res)
: "r" (1UL << bit), "m" (*m)
: "memory");
-
- return res != 0;
} else if (cpu_has_llsc) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
- unsigned long temp, res;
+ unsigned long temp;
__asm__ __volatile__(
" .set push \n"
@@ -277,25 +276,22 @@ static inline int test_and_set_bit(unsigned long nr,
: "=&r" (temp), "=m" (*m), "=&r" (res)
: "r" (1UL << bit), "m" (*m)
: "memory");
-
- return res != 0;
} else {
volatile unsigned long *a = addr;
unsigned long mask;
- int retval;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
- retval = (mask & *a) != 0;
+ res = (mask & *a);
*a |= mask;
raw_local_irq_restore(flags);
-
- return retval;
}
smp_mb();
+
+ return res != 0;
}
/*
@@ -310,6 +306,7 @@ static inline int test_and_clear_bit(unsigned long nr,
volatile unsigned long *addr)
{
unsigned short bit = nr & SZLONG_MASK;
+ unsigned long res;
if (cpu_has_llsc && R10000_LLSC_WAR) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
@@ -327,12 +324,10 @@ static inline int test_and_clear_bit(unsigned long nr,
: "=&r" (temp), "=m" (*m), "=&r" (res)
: "r" (1UL << bit), "m" (*m)
: "memory");
-
- return res != 0;
#ifdef CONFIG_CPU_MIPSR2
} else if (__builtin_constant_p(nr)) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
- unsigned long temp, res;
+ unsigned long temp;
__asm__ __volatile__(
"1: " __LL "%0, %1 # test_and_clear_bit \n"
@@ -346,12 +341,10 @@ static inline int test_and_clear_bit(unsigned long nr,
: "=&r" (temp), "=m" (*m), "=&r" (res)
: "ri" (bit), "m" (*m)
: "memory");
-
- return res;
#endif
} else if (cpu_has_llsc) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
- unsigned long temp, res;
+ unsigned long temp;
__asm__ __volatile__(
" .set push \n"
@@ -371,25 +364,22 @@ static inline int test_and_clear_bit(unsigned long nr,
: "=&r" (temp), "=m" (*m), "=&r" (res)
: "r" (1UL << bit), "m" (*m)
: "memory");
-
- return res != 0;
} else {
volatile unsigned long *a = addr;
unsigned long mask;
- int retval;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
- retval = (mask & *a) != 0;
+ res = (mask & *a);
*a &= ~mask;
raw_local_irq_restore(flags);
-
- return retval;
}
smp_mb();
+
+ return res != 0;
}
/*
@@ -404,10 +394,11 @@ static inline int test_and_change_bit(unsigned long nr,
volatile unsigned long *addr)
{
unsigned short bit = nr & SZLONG_MASK;
+ unsigned long res;
if (cpu_has_llsc && R10000_LLSC_WAR) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
- unsigned long temp, res;
+ unsigned long temp;
__asm__ __volatile__(
" .set mips3 \n"
@@ -420,11 +411,9 @@ static inline int test_and_change_bit(unsigned long nr,
: "=&r" (temp), "=m" (*m), "=&r" (res)
: "r" (1UL << bit), "m" (*m)
: "memory");
-
- return res != 0;
} else if (cpu_has_llsc) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
- unsigned long temp, res;
+ unsigned long temp;
__asm__ __volatile__(
" .set push \n"
@@ -443,24 +432,22 @@ static inline int test_and_change_bit(unsigned long nr,
: "=&r" (temp), "=m" (*m), "=&r" (res)
: "r" (1UL << bit), "m" (*m)
: "memory");
-
- return res != 0;
} else {
volatile unsigned long *a = addr;
- unsigned long mask, retval;
+ unsigned long mask;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
- retval = (mask & *a) != 0;
+ res = (mask & *a);
*a ^= mask;
raw_local_irq_restore(flags);
-
- return retval;
}
smp_mb();
+
+ return res != 0;
}
#include <asm-generic/bitops/non-atomic.h>
diff --git a/include/asm-mips/stackframe.h b/include/asm-mips/stackframe.h
index 7afa1fdf70ca..ed33366b85b8 100644
--- a/include/asm-mips/stackframe.h
+++ b/include/asm-mips/stackframe.h
@@ -17,6 +17,18 @@
#include <asm/mipsregs.h>
#include <asm/asm-offsets.h>
+/*
+ * For SMTC kernel, global IE should be left set, and interrupts
+ * controlled exclusively via IXMT.
+ */
+#ifdef CONFIG_MIPS_MT_SMTC
+#define STATMASK 0x1e
+#elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
+#define STATMASK 0x3f
+#else
+#define STATMASK 0x1f
+#endif
+
#ifdef CONFIG_MIPS_MT_SMTC
#include <asm/mipsmtregs.h>
#endif /* CONFIG_MIPS_MT_SMTC */
@@ -236,10 +248,10 @@
.set reorder
.set noat
mfc0 a0, CP0_STATUS
- ori a0, 0x1f
- xori a0, 0x1f
- mtc0 a0, CP0_STATUS
li v1, 0xff00
+ ori a0, STATMASK
+ xori a0, STATMASK
+ mtc0 a0, CP0_STATUS
and a0, v1
LONG_L v0, PT_STATUS(sp)
nor v1, $0, v1
@@ -249,10 +261,6 @@
LONG_L $31, PT_R31(sp)
LONG_L $28, PT_R28(sp)
LONG_L $25, PT_R25(sp)
-#ifdef CONFIG_64BIT
- LONG_L $8, PT_R8(sp)
- LONG_L $9, PT_R9(sp)
-#endif
LONG_L $7, PT_R7(sp)
LONG_L $6, PT_R6(sp)
LONG_L $5, PT_R5(sp)
@@ -273,16 +281,6 @@
.endm
#else
-/*
- * For SMTC kernel, global IE should be left set, and interrupts
- * controlled exclusively via IXMT.
- */
-
-#ifdef CONFIG_MIPS_MT_SMTC
-#define STATMASK 0x1e
-#else
-#define STATMASK 0x1f
-#endif
.macro RESTORE_SOME
.set push
.set reorder
@@ -385,9 +383,9 @@
.macro CLI
#if !defined(CONFIG_MIPS_MT_SMTC)
mfc0 t0, CP0_STATUS
- li t1, ST0_CU0 | 0x1f
+ li t1, ST0_CU0 | STATMASK
or t0, t1
- xori t0, 0x1f
+ xori t0, STATMASK
mtc0 t0, CP0_STATUS
#else /* CONFIG_MIPS_MT_SMTC */
/*
@@ -420,9 +418,9 @@
.macro STI
#if !defined(CONFIG_MIPS_MT_SMTC)
mfc0 t0, CP0_STATUS
- li t1, ST0_CU0 | 0x1f
+ li t1, ST0_CU0 | STATMASK
or t0, t1
- xori t0, 0x1e
+ xori t0, STATMASK & ~1
mtc0 t0, CP0_STATUS
#else /* CONFIG_MIPS_MT_SMTC */
/*
@@ -451,7 +449,8 @@
.endm
/*
- * Just move to kernel mode and leave interrupts as they are.
+ * Just move to kernel mode and leave interrupts as they are. Note
+ * for the R3000 this means copying the previous enable from IEp.
* Set cp0 enable bit as sign that we're running on the kernel stack
*/
.macro KMODE
@@ -482,9 +481,14 @@
move ra, t0
#endif /* CONFIG_MIPS_MT_SMTC */
mfc0 t0, CP0_STATUS
- li t1, ST0_CU0 | 0x1e
+ li t1, ST0_CU0 | (STATMASK & ~1)
+#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
+ andi t2, t0, ST0_IEP
+ srl t2, 2
+ or t0, t2
+#endif
or t0, t1
- xori t0, 0x1e
+ xori t0, STATMASK & ~1
mtc0 t0, CP0_STATUS
#ifdef CONFIG_MIPS_MT_SMTC
_ehb
diff --git a/include/asm-mips/unistd.h b/include/asm-mips/unistd.h
index 91c306fcfb72..59d6fec8fbff 100644
--- a/include/asm-mips/unistd.h
+++ b/include/asm-mips/unistd.h
@@ -336,16 +336,20 @@
#define __NR_epoll_pwait (__NR_Linux + 313)
#define __NR_ioprio_set (__NR_Linux + 314)
#define __NR_ioprio_get (__NR_Linux + 315)
+#define __NR_utimensat (__NR_Linux + 316)
+#define __NR_signalfd (__NR_Linux + 317)
+#define __NR_timerfd (__NR_Linux + 318)
+#define __NR_eventfd (__NR_Linux + 319)
/*
* Offset of the last Linux o32 flavoured syscall
*/
-#define __NR_Linux_syscalls 315
+#define __NR_Linux_syscalls 319
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
#define __NR_O32_Linux 4000
-#define __NR_O32_Linux_syscalls 315
+#define __NR_O32_Linux_syscalls 319
#if _MIPS_SIM == _MIPS_SIM_ABI64
@@ -628,16 +632,20 @@
#define __NR_epoll_pwait (__NR_Linux + 272)
#define __NR_ioprio_set (__NR_Linux + 273)
#define __NR_ioprio_get (__NR_Linux + 274)
+#define __NR_utimensat (__NR_Linux + 275)
+#define __NR_signalfd (__NR_Linux + 276)
+#define __NR_timerfd (__NR_Linux + 277)
+#define __NR_eventfd (__NR_Linux + 278)
/*
* Offset of the last Linux 64-bit flavoured syscall
*/
-#define __NR_Linux_syscalls 274
+#define __NR_Linux_syscalls 278
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
#define __NR_64_Linux 5000
-#define __NR_64_Linux_syscalls 274
+#define __NR_64_Linux_syscalls 278
#if _MIPS_SIM == _MIPS_SIM_NABI32
@@ -924,16 +932,20 @@
#define __NR_epoll_pwait (__NR_Linux + 276)
#define __NR_ioprio_set (__NR_Linux + 277)
#define __NR_ioprio_get (__NR_Linux + 278)
+#define __NR_utimensat (__NR_Linux + 279)
+#define __NR_signalfd (__NR_Linux + 280)
+#define __NR_timerfd (__NR_Linux + 281)
+#define __NR_eventfd (__NR_Linux + 282)
/*
* Offset of the last N32 flavoured syscall
*/
-#define __NR_Linux_syscalls 278
+#define __NR_Linux_syscalls 282
#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
#define __NR_N32_Linux 6000
-#define __NR_N32_Linux_syscalls 278
+#define __NR_N32_Linux_syscalls 282
#ifdef __KERNEL__