aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-mips/atomic.h
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2005-12-07 18:57:52 +0000
committer <ralf@denk.linux-mips.net>2006-01-10 13:39:06 +0000
commitb2d28b7ea57edb4dee34a70fcd89083134017d4d (patch)
tree142b26e9468f49f37117b194d1f1dcaa2d8abf73 /include/asm-mips/atomic.h
parentMIPS: FP: Remove silly trick to avoid warning. (diff)
downloadlinux-dev-b2d28b7ea57edb4dee34a70fcd89083134017d4d.tar.xz
linux-dev-b2d28b7ea57edb4dee34a70fcd89083134017d4d.zip
MIPS: Get rid of atomic_lock.
It was resulting in build errors for some configurations. Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'include/asm-mips/atomic.h')
-rw-r--r--include/asm-mips/atomic.h43
1 files changed, 21 insertions, 22 deletions
diff --git a/include/asm-mips/atomic.h b/include/asm-mips/atomic.h
index 94a95872d727..654b97d3e13a 100644
--- a/include/asm-mips/atomic.h
+++ b/include/asm-mips/atomic.h
@@ -24,10 +24,9 @@
#define _ASM_ATOMIC_H
#include <asm/cpu-features.h>
+#include <asm/interrupt.h>
#include <asm/war.h>
-extern spinlock_t atomic_lock;
-
typedef struct { volatile int counter; } atomic_t;
#define ATOMIC_INIT(i) { (i) }
@@ -85,9 +84,9 @@ static __inline__ void atomic_add(int i, atomic_t * v)
} else {
unsigned long flags;
- spin_lock_irqsave(&atomic_lock, flags);
+ local_irq_save(flags);
v->counter += i;
- spin_unlock_irqrestore(&atomic_lock, flags);
+ local_irq_restore(flags);
}
}
@@ -127,9 +126,9 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
} else {
unsigned long flags;
- spin_lock_irqsave(&atomic_lock, flags);
+ local_irq_save(flags);
v->counter -= i;
- spin_unlock_irqrestore(&atomic_lock, flags);
+ local_irq_restore(flags);
}
}
@@ -173,11 +172,11 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
} else {
unsigned long flags;
- spin_lock_irqsave(&atomic_lock, flags);
+ local_irq_save(flags);
result = v->counter;
result += i;
v->counter = result;
- spin_unlock_irqrestore(&atomic_lock, flags);
+ local_irq_restore(flags);
}
return result;
@@ -220,11 +219,11 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
} else {
unsigned long flags;
- spin_lock_irqsave(&atomic_lock, flags);
+ local_irq_save(flags);
result = v->counter;
result -= i;
v->counter = result;
- spin_unlock_irqrestore(&atomic_lock, flags);
+ local_irq_restore(flags);
}
return result;
@@ -277,12 +276,12 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
} else {
unsigned long flags;
- spin_lock_irqsave(&atomic_lock, flags);
+ local_irq_save(flags);
result = v->counter;
result -= i;
if (result >= 0)
v->counter = result;
- spin_unlock_irqrestore(&atomic_lock, flags);
+ local_irq_restore(flags);
}
return result;
@@ -433,9 +432,9 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
} else {
unsigned long flags;
- spin_lock_irqsave(&atomic_lock, flags);
+ local_irq_save(flags);
v->counter += i;
- spin_unlock_irqrestore(&atomic_lock, flags);
+ local_irq_restore(flags);
}
}
@@ -475,9 +474,9 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
} else {
unsigned long flags;
- spin_lock_irqsave(&atomic_lock, flags);
+ local_irq_save(flags);
v->counter -= i;
- spin_unlock_irqrestore(&atomic_lock, flags);
+ local_irq_restore(flags);
}
}
@@ -521,11 +520,11 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
} else {
unsigned long flags;
- spin_lock_irqsave(&atomic_lock, flags);
+ local_irq_save(flags);
result = v->counter;
result += i;
v->counter = result;
- spin_unlock_irqrestore(&atomic_lock, flags);
+ local_irq_restore(flags);
}
return result;
@@ -568,11 +567,11 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
} else {
unsigned long flags;
- spin_lock_irqsave(&atomic_lock, flags);
+ local_irq_save(flags);
result = v->counter;
result -= i;
v->counter = result;
- spin_unlock_irqrestore(&atomic_lock, flags);
+ local_irq_restore(flags);
}
return result;
@@ -625,12 +624,12 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
} else {
unsigned long flags;
- spin_lock_irqsave(&atomic_lock, flags);
+ local_irq_save(flags);
result = v->counter;
result -= i;
if (result >= 0)
v->counter = result;
- spin_unlock_irqrestore(&atomic_lock, flags);
+ local_irq_restore(flags);
}
return result;