aboutsummaryrefslogtreecommitdiffstats
path: root/arch/alpha
diff options
context:
space:
mode:
authorMatt Turner <mattst88@gmail.com>2015-08-04 14:35:05 -0700
committerMatt Turner <mattst88@gmail.com>2015-08-30 18:01:16 -0700
commit8f8dcb3f7fe4febbfa96e64d4ad47de958c5cc34 (patch)
tree58f9dca31e0fe7d0149ea90b9942594db6c38fae /arch/alpha
parentLinux 4.2 (diff)
downloadlinux-dev-8f8dcb3f7fe4febbfa96e64d4ad47de958c5cc34.tar.xz
linux-dev-8f8dcb3f7fe4febbfa96e64d4ad47de958c5cc34.zip
alpha: select CONFIG_ARCH_USE_CMPXCHG_LOCKREF.
On Alpha we have spinlocks that are 32b in size and an efficient cmpxchg64 implementation, so we qualify to make use of cmpxchg backed lockrefs. Select the ARCH_USE_CMPXCHG_LOCKREF Kconfig symbol and provide a trivial implementation of arch_spin_value_unlocked to satisfy the lockref code. Using Linus' simple testcase from http://article.gmane.org/gmane.linux.file-systems/77466 on a dual CPU ES47 system I see around an 8% gain: N Min Max Median Avg Stddev x 30 6194580 6295654 6272504 6272514 17694.232 + 30 6731164 6786334 6767982 6764274 13738.863 Difference at 95.0% confidence 491760 +/- 8188.17 7.83992% +/- 0.130541% (Student's t, pooled s = 15840.5) Signed-off-by: Matt Turner <mattst88@gmail.com>
Diffstat (limited to 'arch/alpha')
-rw-r--r--arch/alpha/Kconfig1
-rw-r--r--arch/alpha/include/asm/spinlock.h5
2 files changed, 6 insertions, 0 deletions
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index bf9e9d3b3792..f515a4dbf7a0 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -3,6 +3,7 @@ config ALPHA
default y
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
+ select ARCH_USE_CMPXCHG_LOCKREF
select HAVE_AOUT
select HAVE_IDE
select HAVE_OPROFILE
diff --git a/arch/alpha/include/asm/spinlock.h b/arch/alpha/include/asm/spinlock.h
index 37b570d01202..fed9c6f44c19 100644
--- a/arch/alpha/include/asm/spinlock.h
+++ b/arch/alpha/include/asm/spinlock.h
@@ -16,6 +16,11 @@
#define arch_spin_unlock_wait(x) \
do { cpu_relax(); } while ((x)->lock)
+static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+ return lock.lock == 0;
+}
+
static inline void arch_spin_unlock(arch_spinlock_t * lock)
{
mb();