aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2021-10-22 13:59:38 +0200
committerPeter Zijlstra <peterz@infradead.org>2021-10-30 16:37:28 +0200
commitf98a3dccfcb0b9b9c3bef8df9edd61cda80ad937 (patch)
treea4cd719ee2fe9681435bdf9150ca96d1d8b834a8 /arch
parentlocking/rwsem: Fix comments about reader optimistic lock stealing conditions (diff)
downloadlinux-dev-f98a3dccfcb0b9b9c3bef8df9edd61cda80ad937.tar.xz
linux-dev-f98a3dccfcb0b9b9c3bef8df9edd61cda80ad937.zip
locking: Remove spin_lock_flags() etc
parisc, ia64 and powerpc32 are the only remaining architectures that provide custom arch_{spin,read,write}_lock_flags() functions, which are meant to re-enable interrupts while waiting for a spinlock. However, none of these can actually run into this codepath, because it is only called on architectures without CONFIG_GENERIC_LOCKBREAK, or when CONFIG_DEBUG_LOCK_ALLOC is set without CONFIG_LOCKDEP, and none of those combinations are possible on the three architectures. Going back in the git history, it appears that arch/mn10300 may have been able to run into this code path, but there is a good chance that it never worked. On the architectures that still exist, it was already impossible to hit back in 2008 after the introduction of CONFIG_GENERIC_LOCKBREAK, and possibly earlier. As this is all dead code, just remove it and the helper functions built around it. For arch/ia64, the inline asm could be cleaned up, but it seems safer to leave it untouched. Signed-off-by: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Helge Deller <deller@gmx.de> # parisc Link: https://lore.kernel.org/r/20211022120058.1031690-1-arnd@kernel.org
Diffstat (limited to 'arch')
-rw-r--r--arch/ia64/include/asm/spinlock.h23
-rw-r--r--arch/openrisc/include/asm/spinlock.h3
-rw-r--r--arch/parisc/include/asm/spinlock.h15
-rw-r--r--arch/powerpc/include/asm/simple_spinlock.h21
-rw-r--r--arch/s390/include/asm/spinlock.h8
5 files changed, 6 insertions, 64 deletions
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
index 864775970c50..0e5c1ad3239c 100644
--- a/arch/ia64/include/asm/spinlock.h
+++ b/arch/ia64/include/asm/spinlock.h
@@ -124,18 +124,13 @@ static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
__ticket_spin_unlock(lock);
}
-static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
- unsigned long flags)
-{
- arch_spin_lock(lock);
-}
-#define arch_spin_lock_flags arch_spin_lock_flags
-
#ifdef ASM_SUPPORTED
static __always_inline void
-arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
+arch_read_lock(arch_rwlock_t *lock)
{
+ unsigned long flags = 0;
+
__asm__ __volatile__ (
"tbit.nz p6, p0 = %1,%2\n"
"br.few 3f\n"
@@ -157,13 +152,8 @@ arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
: "p6", "p7", "r2", "memory");
}
-#define arch_read_lock_flags arch_read_lock_flags
-#define arch_read_lock(lock) arch_read_lock_flags(lock, 0)
-
#else /* !ASM_SUPPORTED */
-#define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
-
#define arch_read_lock(rw) \
do { \
arch_rwlock_t *__read_lock_ptr = (rw); \
@@ -186,8 +176,10 @@ do { \
#ifdef ASM_SUPPORTED
static __always_inline void
-arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
+arch_write_lock(arch_rwlock_t *lock)
{
+ unsigned long flags = 0;
+
__asm__ __volatile__ (
"tbit.nz p6, p0 = %1, %2\n"
"mov ar.ccv = r0\n"
@@ -210,9 +202,6 @@ arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
: "ar.ccv", "p6", "p7", "r2", "r29", "memory");
}
-#define arch_write_lock_flags arch_write_lock_flags
-#define arch_write_lock(rw) arch_write_lock_flags(rw, 0)
-
#define arch_write_trylock(rw) \
({ \
register long result; \
diff --git a/arch/openrisc/include/asm/spinlock.h b/arch/openrisc/include/asm/spinlock.h
index a8940bdfcb7e..264944a71535 100644
--- a/arch/openrisc/include/asm/spinlock.h
+++ b/arch/openrisc/include/asm/spinlock.h
@@ -19,9 +19,6 @@
#include <asm/qrwlock.h>
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
index fa5ee8a45dbd..a6e5d66a7656 100644
--- a/arch/parisc/include/asm/spinlock.h
+++ b/arch/parisc/include/asm/spinlock.h
@@ -23,21 +23,6 @@ static inline void arch_spin_lock(arch_spinlock_t *x)
continue;
}
-static inline void arch_spin_lock_flags(arch_spinlock_t *x,
- unsigned long flags)
-{
- volatile unsigned int *a;
-
- a = __ldcw_align(x);
- while (__ldcw(a) == 0)
- while (*a == 0)
- if (flags & PSW_SM_I) {
- local_irq_enable();
- local_irq_disable();
- }
-}
-#define arch_spin_lock_flags arch_spin_lock_flags
-
static inline void arch_spin_unlock(arch_spinlock_t *x)
{
volatile unsigned int *a;
diff --git a/arch/powerpc/include/asm/simple_spinlock.h b/arch/powerpc/include/asm/simple_spinlock.h
index 8985791a2ba5..7ae6aeef8464 100644
--- a/arch/powerpc/include/asm/simple_spinlock.h
+++ b/arch/powerpc/include/asm/simple_spinlock.h
@@ -123,27 +123,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
}
}
-static inline
-void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
-{
- unsigned long flags_dis;
-
- while (1) {
- if (likely(__arch_spin_trylock(lock) == 0))
- break;
- local_save_flags(flags_dis);
- local_irq_restore(flags);
- do {
- HMT_low();
- if (is_shared_processor())
- splpar_spin_yield(lock);
- } while (unlikely(lock->slock != 0));
- HMT_medium();
- local_irq_restore(flags_dis);
- }
-}
-#define arch_spin_lock_flags arch_spin_lock_flags
-
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
__asm__ __volatile__("# arch_spin_unlock\n\t"
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index ef59588a3042..888a2f1c9ee3 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -67,14 +67,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lp)
arch_spin_lock_wait(lp);
}
-static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
- unsigned long flags)
-{
- if (!arch_spin_trylock_once(lp))
- arch_spin_lock_wait(lp);
-}
-#define arch_spin_lock_flags arch_spin_lock_flags
-
static inline int arch_spin_trylock(arch_spinlock_t *lp)
{
if (!arch_spin_trylock_once(lp))