aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-ia64
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2007-10-18 03:06:52 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-18 14:37:29 -0700
commit87371e4fa4901d84ce83356b909b83f31f40758f (patch)
treee80ec52481501392a8c6cf9b9d4ba937c48db0cb /include/asm-ia64
parentalpha: lock bitops (diff)
downloadlinux-dev-87371e4fa4901d84ce83356b909b83f31f40758f.tar.xz
linux-dev-87371e4fa4901d84ce83356b909b83f31f40758f.zip
ia64: lock bitops
Convert ia64 to new bitops. Signed-off-by: Nick Piggin <npiggin@suse.de> Cc: "Luck, Tony" <tony.luck@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/asm-ia64')
-rw-r--r--include/asm-ia64/bitops.h43
1 files changed, 41 insertions, 2 deletions
diff --git a/include/asm-ia64/bitops.h b/include/asm-ia64/bitops.h
index 569dd62fe192..2144f1a8ed6f 100644
--- a/include/asm-ia64/bitops.h
+++ b/include/asm-ia64/bitops.h
@@ -94,6 +94,38 @@ clear_bit (int nr, volatile void *addr)
}
/**
+ * clear_bit_unlock - Clears a bit in memory with release
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * clear_bit_unlock() is atomic and may not be reordered. It does
+ * contain a memory barrier suitable for unlock type operations.
+ */
+static __inline__ void
+clear_bit_unlock (int nr, volatile void *addr)
+{
+ __u32 mask, old, new;
+ volatile __u32 *m;
+ CMPXCHG_BUGCHECK_DECL
+
+ m = (volatile __u32 *) addr + (nr >> 5);
+ mask = ~(1 << (nr & 31));
+ do {
+ CMPXCHG_BUGCHECK(m);
+ old = *m;
+ new = old & mask;
+ } while (cmpxchg_rel(m, old, new) != old);
+}
+
+/**
+ * __clear_bit_unlock - Non-atomically clear a bit with release
+ *
+ * This is like clear_bit_unlock, but the implementation may use a non-atomic
+ * store (this one uses an atomic, however).
+ */
+#define __clear_bit_unlock clear_bit_unlock
+
+/**
* __clear_bit - Clears a bit in memory (non-atomic version)
*/
static __inline__ void
@@ -170,6 +202,15 @@ test_and_set_bit (int nr, volatile void *addr)
}
/**
+ * test_and_set_bit_lock - Set a bit and return its old value for lock
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This is the same as test_and_set_bit on ia64
+ */
+#define test_and_set_bit_lock test_and_set_bit
+
+/**
* __test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
@@ -371,8 +412,6 @@ hweight64 (unsigned long x)
#define hweight16(x) (unsigned int) hweight64((x) & 0xfffful)
#define hweight8(x) (unsigned int) hweight64((x) & 0xfful)
-#include <asm-generic/bitops/lock.h>
-
#endif /* __KERNEL__ */
#include <asm-generic/bitops/find.h>