/* * S390 version * Copyright IBM Corp. 1999 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) * * Derived from "include/asm-i386/spinlock.h" */ #ifndef __ASM_SPINLOCK_H #define __ASM_SPINLOCK_H #include #define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval) extern int spin_retry; static inline int _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new) { return __sync_bool_compare_and_swap(lock, old, new); } /* * Simple spin lock operations. There are two variants, one clears IRQ's * on the local processor, one does not. * * We make no fairness assumptions. They have a cost. * * (the type definitions are in asm/spinlock_types.h) */ void arch_lock_relax(unsigned int cpu); void arch_spin_lock_wait(arch_spinlock_t *); int arch_spin_trylock_retry(arch_spinlock_t *); void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); static inline void arch_spin_relax(arch_spinlock_t *lock) { arch_lock_relax(lock->lock); } static inline u32 arch_spin_lockval(int cpu) { return ~cpu; } static inline int arch_spin_value_unlocked(arch_spinlock_t lock) { return lock.lock == 0; } static inline int arch_spin_is_locked(arch_spinlock_t *lp) { return ACCESS_ONCE(lp->lock) != 0; } static inline int arch_spin_trylock_once(arch_spinlock_t *lp) { barrier(); return likely(arch_spin_value_unlocked(*lp) && _raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL)); } static inline void arch_spin_lock(arch_spinlock_t *lp) { if (!arch_spin_trylock_once(lp)) arch_spin_lock_wait(lp); } static inline void arch_spin_lock_flags(arch_spinlock_t *lp, unsigned long flags) { if (!arch_spin_trylock_once(lp)) arch_spin_lock_wait_flags(lp, flags); } static inline int arch_spin_trylock(arch_spinlock_t *lp) { if (!arch_spin_trylock_once(lp)) return arch_spin_trylock_retry(lp); return 1; } static inline void arch_spin_unlock(arch_spinlock_t *lp) { typecheck(unsigned int, lp->lock); asm volatile( __ASM_BARRIER "st %1,%0\n" : "+Q" (lp->lock) : "d" (0) : "cc", "memory"); } static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) { while (arch_spin_is_locked(lock)) arch_spin_relax(lock); } /* * Read-write spinlocks, allowing multiple readers * but only one writer. * * NOTE! it is quite common to have readers in interrupts * but no interrupt writers. For those circumstances we * can "mix" irq-safe locks - any writer needs to get a * irq-safe write-lock, but readers can get non-irqsafe * read-locks. */ /** * read_can_lock - would read_trylock() succeed? * @lock: the rwlock in question. */ #define arch_read_can_lock(x) ((int)(x)->lock >= 0) /** * write_can_lock - would write_trylock() succeed? * @lock: the rwlock in question. */ #define arch_write_can_lock(x) ((x)->lock == 0) extern int _raw_read_trylock_retry(arch_rwlock_t *lp); extern int _raw_write_trylock_retry(arch_rwlock_t *lp); #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) static inline int arch_read_trylock_once(arch_rwlock_t *rw) { unsigned int old = ACCESS_ONCE(rw->lock); return likely((int) old >= 0 && _raw_compare_and_swap(&rw->lock, old, old + 1)); } static inline int arch_write_trylock_once(arch_rwlock_t *rw) { unsigned int old = ACCESS_ONCE(rw->lock); return likely(old == 0 && _raw_compare_and_swap(&rw->lock, 0, 0x80000000)); } #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES #define __RAW_OP_OR "lao" #define __RAW_OP_AND "lan" #define __RAW_OP_ADD "laa" #define __RAW_LOCK(ptr, op_val, op_string) \ ({ \ unsigned int old_val; \ \ typecheck(unsigned int *, ptr); \ asm volatile( \ op_string " %0,%2,%1\n" \ "bcr 14,0\n" \ : "=d" (old_val), "+Q" (*ptr) \ : "d" (op_val) \ : "cc", "memory"); \ old_val; \ }) #define __RAW_UNLOCK(ptr, op_val, op_string) \ ({ \ unsigned int old_val; \ \ typecheck(unsigned int *, ptr); \ asm volatile( \ "bcr 14,0\n" \ op_string " %0,%2,%1\n" \ : "=d" (old_val), "+Q" (*ptr) \ : "d" (op_val) \ : "cc", "memory"); \ old_val; \ }) extern void _raw_read_lock_wait(arch_rwlock_t *lp); extern void _raw_write_lock_wait(arch_rwlock_t *lp, unsigned int prev); static inline void arch_read_lock(arch_rwlock_t *rw) { unsigned int old; old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD); if ((int) old < 0) _raw_read_lock_wait(rw); } static inline void arch_read_unlock(arch_rwlock_t *rw) { __RAW_UNLOCK(&rw->lock, -1, __RAW_OP_ADD); } static inline void arch_write_lock(arch_rwlock_t *rw) { unsigned int old; old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR); if (old != 0) _raw_write_lock_wait(rw, old); rw->owner = SPINLOCK_LOCKVAL; } static inline void arch_write_unlock(arch_rwlock_t *rw) { rw->owner = 0; __RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND); } #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */ extern void _raw_read_lock_wait(arch_rwlock_t *lp); extern void _raw_write_lock_wait(arch_rwlock_t *lp); static inline void arch_read_lock(arch_rwlock_t *rw) { if (!arch_read_trylock_once(rw)) _raw_read_lock_wait(rw); } static inline void arch_read_unlock(arch_rwlock_t *rw) { unsigned int old; do { old = ACCESS_ONCE(rw->lock); } while (!_raw_compare_and_swap(&rw->lock, old, old - 1)); } static inline void arch_write_lock(arch_rwlock_t *rw) { if (!arch_write_trylock_once(rw)) _raw_write_lock_wait(rw); rw->owner = SPINLOCK_LOCKVAL; } static inline void arch_write_unlock(arch_rwlock_t *rw) { typecheck(unsigned int, rw->lock); rw->owner = 0; asm volatile( __ASM_BARRIER "st %1,%0\n" : "+Q" (rw->lock) : "d" (0) : "cc", "memory"); } #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ static inline int arch_read_trylock(arch_rwlock_t *rw) { if (!arch_read_trylock_once(rw)) return _raw_read_trylock_retry(rw); return 1; } static inline int arch_write_trylock(arch_rwlock_t *rw) { if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw)) return 0; rw->owner = SPINLOCK_LOCKVAL; return 1; } static inline void arch_read_relax(arch_rwlock_t *rw) { arch_lock_relax(rw->owner); } static inline void arch_write_relax(arch_rwlock_t *rw) { arch_lock_relax(rw->owner); } #endif /* __ASM_SPINLOCK_H */