aboutsummaryrefslogtreecommitdiffstats
path: root/arch/blackfin/include/asm/spinlock.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-11-13 12:38:26 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-13 12:38:26 -0800
commit8e9a2dba8686187d8c8179e5b86640e653963889 (patch)
treea4ba543649219cbb28d91aab65b785d763f5d069 /arch/blackfin/include/asm/spinlock.h
parentMerge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip (diff)
parentlocking/x86: Use LOCK ADD for smp_mb() instead of MFENCE (diff)
downloadlinux-dev-8e9a2dba8686187d8c8179e5b86640e653963889.tar.xz
linux-dev-8e9a2dba8686187d8c8179e5b86640e653963889.zip
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull core locking updates from Ingo Molnar: "The main changes in this cycle are: - Another attempt at enabling cross-release lockdep dependency tracking (automatically part of CONFIG_PROVE_LOCKING=y), this time with better performance and fewer false positives. (Byungchul Park) - Introduce lockdep_assert_irqs_enabled()/disabled() and convert open-coded equivalents to lockdep variants. (Frederic Weisbecker) - Add down_read_killable() and use it in the VFS's iterate_dir() method. (Kirill Tkhai) - Convert remaining uses of ACCESS_ONCE() to READ_ONCE()/WRITE_ONCE(). Most of the conversion was Coccinelle driven. (Mark Rutland, Paul E. McKenney) - Get rid of lockless_dereference(), by strengthening Alpha atomics, strengthening READ_ONCE() with smp_read_barrier_depends() and thus being able to convert users of lockless_dereference() to READ_ONCE(). (Will Deacon) - Various micro-optimizations: - better PV qspinlocks (Waiman Long), - better x86 barriers (Michael S. Tsirkin) - better x86 refcounts (Kees Cook) - ... plus other fixes and enhancements. (Borislav Petkov, Juergen Gross, Miguel Bernal Marin)" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (70 commits) locking/x86: Use LOCK ADD for smp_mb() instead of MFENCE rcu: Use lockdep to assert IRQs are disabled/enabled netpoll: Use lockdep to assert IRQs are disabled/enabled timers/posix-cpu-timers: Use lockdep to assert IRQs are disabled/enabled sched/clock, sched/cputime: Use lockdep to assert IRQs are disabled/enabled irq_work: Use lockdep to assert IRQs are disabled/enabled irq/timings: Use lockdep to assert IRQs are disabled/enabled perf/core: Use lockdep to assert IRQs are disabled/enabled x86: Use lockdep to assert IRQs are disabled/enabled smp/core: Use lockdep to assert IRQs are disabled/enabled timers/hrtimer: Use lockdep to assert IRQs are disabled/enabled timers/nohz: Use lockdep to assert IRQs are disabled/enabled workqueue: Use lockdep to assert IRQs are disabled/enabled irq/softirqs: Use lockdep to assert IRQs are disabled/enabled locking/lockdep: Add IRQs disabled/enabled assertion APIs: lockdep_assert_irqs_enabled()/disabled() locking/pvqspinlock: Implement hybrid PV queued/unfair locks locking/rwlocks: Fix comments x86/paravirt: Set up the virt_spin_lock_key after static keys get initialized block, locking/lockdep: Assign a lock_class per gendisk used for wait_for_completion() workqueue: Remove now redundant lock acquisitions wrt. workqueue flushes ...
Diffstat (limited to 'arch/blackfin/include/asm/spinlock.h')
-rw-r--r--arch/blackfin/include/asm/spinlock.h20
1 files changed, 0 insertions, 20 deletions
diff --git a/arch/blackfin/include/asm/spinlock.h b/arch/blackfin/include/asm/spinlock.h
index f6431439d15d..839d1441af3a 100644
--- a/arch/blackfin/include/asm/spinlock.h
+++ b/arch/blackfin/include/asm/spinlock.h
@@ -36,8 +36,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
__raw_spin_lock_asm(&lock->lock);
}
-#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
return __raw_spin_trylock_asm(&lock->lock);
@@ -48,23 +46,11 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
__raw_spin_unlock_asm(&lock->lock);
}
-static inline int arch_read_can_lock(arch_rwlock_t *rw)
-{
- return __raw_uncached_fetch_asm(&rw->lock) > 0;
-}
-
-static inline int arch_write_can_lock(arch_rwlock_t *rw)
-{
- return __raw_uncached_fetch_asm(&rw->lock) == RW_LOCK_BIAS;
-}
-
static inline void arch_read_lock(arch_rwlock_t *rw)
{
__raw_read_lock_asm(&rw->lock);
}
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
return __raw_read_trylock_asm(&rw->lock);
@@ -80,8 +66,6 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
__raw_write_lock_asm(&rw->lock);
}
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
return __raw_write_trylock_asm(&rw->lock);
@@ -92,10 +76,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
__raw_write_unlock_asm(&rw->lock);
}
-#define arch_spin_relax(lock) cpu_relax()
-#define arch_read_relax(lock) cpu_relax()
-#define arch_write_relax(lock) cpu_relax()
-
#endif
#endif /* !__BFIN_SPINLOCK_H */