aboutsummaryrefslogtreecommitdiffstats
path: root/arch/metag/include/asm/spinlock_lock1.h
diff options
context:
space:
mode:
authorJames Hogan <james.hogan@imgtec.com>2012-10-09 11:00:24 +0100
committerJames Hogan <james.hogan@imgtec.com>2013-03-02 20:09:50 +0000
commit6006c0d8ce9441dd1363bf14f18a8e28d3588460 (patch)
tree786183053c89e11b3058b8a16f7953744b819340 /arch/metag/include/asm/spinlock_lock1.h
parentmetag: Module support (diff)
downloadlinux-dev-6006c0d8ce9441dd1363bf14f18a8e28d3588460.tar.xz
linux-dev-6006c0d8ce9441dd1363bf14f18a8e28d3588460.zip
metag: Atomics, locks and bitops
Add header files to implement Meta hardware thread locks (used by some other atomic operations), atomics, spinlocks, and bitops. There are 2 main types of atomic primitives for metag (in addition to IRQs off on UP): - LOCK instructions provide locking between hardware threads. - LNKGET/LNKSET instructions provide load-linked/store-conditional operations allowing for lighter weight atomics on Meta2 LOCK instructions allow for hardware threads to acquire voluntary or exclusive hardware thread locks: - LOCK0 releases exclusive and voluntary lock from the running hardware thread. - LOCK1 acquires the voluntary hardware lock, blocking until it becomes available. - LOCK2 implies LOCK1, and additionally acquires the exclusive hardware lock, blocking all other hardware threads from executing. Signed-off-by: James Hogan <james.hogan@imgtec.com>
Diffstat (limited to 'arch/metag/include/asm/spinlock_lock1.h')
-rw-r--r--arch/metag/include/asm/spinlock_lock1.h184
1 files changed, 184 insertions, 0 deletions
diff --git a/arch/metag/include/asm/spinlock_lock1.h b/arch/metag/include/asm/spinlock_lock1.h
new file mode 100644
index 000000000000..c630444cffe9
--- /dev/null
+++ b/arch/metag/include/asm/spinlock_lock1.h
@@ -0,0 +1,184 @@
+#ifndef __ASM_SPINLOCK_LOCK1_H
+#define __ASM_SPINLOCK_LOCK1_H
+
+#include <asm/bug.h>
+#include <asm/global_lock.h>
+
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
+{
+ int ret;
+
+ barrier();
+ ret = lock->lock;
+ WARN_ON(ret != 0 && ret != 1);
+ return ret;
+}
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ unsigned int we_won = 0;
+ unsigned long flags;
+
+again:
+ __global_lock1(flags);
+ if (lock->lock == 0) {
+ fence();
+ lock->lock = 1;
+ we_won = 1;
+ }
+ __global_unlock1(flags);
+ if (we_won == 0)
+ goto again;
+ WARN_ON(lock->lock != 1);
+}
+
+/* Returns 0 if failed to acquire lock */
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+ unsigned long flags;
+ unsigned int ret;
+
+ __global_lock1(flags);
+ ret = lock->lock;
+ if (ret == 0) {
+ fence();
+ lock->lock = 1;
+ }
+ __global_unlock1(flags);
+ return (ret == 0);
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+ barrier();
+ WARN_ON(!lock->lock);
+ lock->lock = 0;
+}
+
+/*
+ * RWLOCKS
+ *
+ *
+ * Write locks are easy - we just set bit 31. When unlocking, we can
+ * just write zero since the lock is exclusively held.
+ */
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+ unsigned long flags;
+ unsigned int we_won = 0;
+
+again:
+ __global_lock1(flags);
+ if (rw->lock == 0) {
+ fence();
+ rw->lock = 0x80000000;
+ we_won = 1;
+ }
+ __global_unlock1(flags);
+ if (we_won == 0)
+ goto again;
+ WARN_ON(rw->lock != 0x80000000);
+}
+
+static inline int arch_write_trylock(arch_rwlock_t *rw)
+{
+ unsigned long flags;
+ unsigned int ret;
+
+ __global_lock1(flags);
+ ret = rw->lock;
+ if (ret == 0) {
+ fence();
+ rw->lock = 0x80000000;
+ }
+ __global_unlock1(flags);
+
+ return (ret == 0);
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+ barrier();
+ WARN_ON(rw->lock != 0x80000000);
+ rw->lock = 0;
+}
+
+/* write_can_lock - would write_trylock() succeed? */
+static inline int arch_write_can_lock(arch_rwlock_t *rw)
+{
+ unsigned int ret;
+
+ barrier();
+ ret = rw->lock;
+ return (ret == 0);
+}
+
+/*
+ * Read locks are a bit more hairy:
+ * - Exclusively load the lock value.
+ * - Increment it.
+ * - Store new lock value if positive, and we still own this location.
+ * If the value is negative, we've already failed.
+ * - If we failed to store the value, we want a negative result.
+ * - If we failed, try again.
+ * Unlocking is similarly hairy. We may have multiple read locks
+ * currently active. However, we know we won't have any write
+ * locks.
+ */
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+ unsigned long flags;
+ unsigned int we_won = 0, ret;
+
+again:
+ __global_lock1(flags);
+ ret = rw->lock;
+ if (ret < 0x80000000) {
+ fence();
+ rw->lock = ret + 1;
+ we_won = 1;
+ }
+ __global_unlock1(flags);
+ if (!we_won)
+ goto again;
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+ unsigned long flags;
+ unsigned int ret;
+
+ __global_lock1(flags);
+ fence();
+ ret = rw->lock--;
+ __global_unlock1(flags);
+ WARN_ON(ret == 0);
+}
+
+static inline int arch_read_trylock(arch_rwlock_t *rw)
+{
+ unsigned long flags;
+ unsigned int ret;
+
+ __global_lock1(flags);
+ ret = rw->lock;
+ if (ret < 0x80000000) {
+ fence();
+ rw->lock = ret + 1;
+ }
+ __global_unlock1(flags);
+ return (ret < 0x80000000);
+}
+
+/* read_can_lock - would read_trylock() succeed? */
+static inline int arch_read_can_lock(arch_rwlock_t *rw)
+{
+ unsigned int ret;
+
+ barrier();
+ ret = rw->lock;
+ return (ret < 0x80000000);
+}
+
+#endif /* __ASM_SPINLOCK_LOCK1_H */