From d1be6a28b13ce6d1bc42bf9b6a9454c65839225b Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 22 Feb 2019 12:48:44 +0000 Subject: asm-generic/mmiowb: Add generic implementation of mmiowb() tracking In preparation for removing all explicit mmiowb() calls from driver code, implement a tracking system in asm-generic based loosely on the PowerPC implementation. This allows architectures with a non-empty mmiowb() definition to have the barrier automatically inserted in spin_unlock() following a critical section containing an I/O write. Acked-by: Linus Torvalds Signed-off-by: Will Deacon --- kernel/Kconfig.locks | 7 +++++++ kernel/locking/spinlock.c | 7 +++++++ 2 files changed, 14 insertions(+) (limited to 'kernel') diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks index fbba478ae522..6ba2570eddad 100644 --- a/kernel/Kconfig.locks +++ b/kernel/Kconfig.locks @@ -251,3 +251,10 @@ config ARCH_USE_QUEUED_RWLOCKS config QUEUED_RWLOCKS def_bool y if ARCH_USE_QUEUED_RWLOCKS depends on SMP + +config ARCH_HAS_MMIOWB + bool + +config MMIOWB + def_bool y if ARCH_HAS_MMIOWB + depends on SMP diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c index 936f3d14dd6b..0ff08380f531 100644 --- a/kernel/locking/spinlock.c +++ b/kernel/locking/spinlock.c @@ -22,6 +22,13 @@ #include #include +#ifdef CONFIG_MMIOWB +#ifndef arch_mmiowb_state +DEFINE_PER_CPU(struct mmiowb_state, __mmiowb_state); +EXPORT_PER_CPU_SYMBOL(__mmiowb_state); +#endif +#endif + /* * If lockdep is enabled then we use the non-preemption spin-ops * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are -- cgit v1.2.3-59-g8ed1b From 60ca1e5a200cd294a12907fa36dece4241db4ab8 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 22 Feb 2019 12:59:59 +0000 Subject: mmiowb: Hook up mmiowb helpers to spinlocks and generic I/O accessors Removing explicit calls to mmiowb() from driver code means that we must now call into the generic mmiowb_spin_{lock,unlock}() functions from the core spinlock code. In order to elide barriers following critical sections without any I/O writes, we also hook into the asm-generic I/O routines. Acked-by: Linus Torvalds Signed-off-by: Will Deacon --- include/asm-generic/io.h | 3 ++- include/linux/spinlock.h | 11 ++++++++++- kernel/locking/spinlock_debug.c | 6 +++++- 3 files changed, 17 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index 303871651f8a..bc490a746602 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -19,6 +19,7 @@ #include #endif +#include #include #ifndef mmiowb @@ -49,7 +50,7 @@ /* serialize device access against a spin_unlock, usually handled there. */ #ifndef __io_aw -#define __io_aw() barrier() +#define __io_aw() mmiowb_set_pending() #endif #ifndef __io_pbw diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index e089157dcf97..ed7c4d6b8235 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -57,6 +57,7 @@ #include #include #include +#include /* @@ -178,6 +179,7 @@ static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock) { __acquire(lock); arch_spin_lock(&lock->raw_lock); + mmiowb_spin_lock(); } #ifndef arch_spin_lock_flags @@ -189,15 +191,22 @@ do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lo { __acquire(lock); arch_spin_lock_flags(&lock->raw_lock, *flags); + mmiowb_spin_lock(); } static inline int do_raw_spin_trylock(raw_spinlock_t *lock) { - return arch_spin_trylock(&(lock)->raw_lock); + int ret = arch_spin_trylock(&(lock)->raw_lock); + + if (ret) + mmiowb_spin_lock(); + + return ret; } static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) { + mmiowb_spin_unlock(); arch_spin_unlock(&lock->raw_lock); __release(lock); } diff --git a/kernel/locking/spinlock_debug.c b/kernel/locking/spinlock_debug.c index 9aa0fccd5d43..399669f7eba8 100644 --- a/kernel/locking/spinlock_debug.c +++ b/kernel/locking/spinlock_debug.c @@ -111,6 +111,7 @@ void do_raw_spin_lock(raw_spinlock_t *lock) { debug_spin_lock_before(lock); arch_spin_lock(&lock->raw_lock); + mmiowb_spin_lock(); debug_spin_lock_after(lock); } @@ -118,8 +119,10 @@ int do_raw_spin_trylock(raw_spinlock_t *lock) { int ret = arch_spin_trylock(&lock->raw_lock); - if (ret) + if (ret) { + mmiowb_spin_lock(); debug_spin_lock_after(lock); + } #ifndef CONFIG_SMP /* * Must not happen on UP: @@ -131,6 +134,7 @@ int do_raw_spin_trylock(raw_spinlock_t *lock) void do_raw_spin_unlock(raw_spinlock_t *lock) { + mmiowb_spin_unlock(); debug_spin_unlock(lock); arch_spin_unlock(&lock->raw_lock); } -- cgit v1.2.3-59-g8ed1b