aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/include
diff options
context:
space:
mode:
authorPhilipp Hachtmann <phacht@linux.vnet.ibm.com>2014-04-07 18:25:23 +0200
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2014-05-20 08:58:42 +0200
commit6c8cd5bbda7e6be166cf2e2dd4be5890193e17ac (patch)
tree0245d1a206b04c2cd2b5b4914dfb696205673861 /arch/s390/include
parents390/spinlock: cleanup spinlock code (diff)
downloadlinux-dev-6c8cd5bbda7e6be166cf2e2dd4be5890193e17ac.tar.xz
linux-dev-6c8cd5bbda7e6be166cf2e2dd4be5890193e17ac.zip
s390/spinlock: optimize spinlock code sequence
Use lowcore constant to improve the code generated for spinlocks. [ Martin Schwidefsky: patch breakdown and code beautification ] Signed-off-by: Philipp Hachtmann <phacht@linux.vnet.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/include')
-rw-r--r--arch/s390/include/asm/lowcore.h5
-rw-r--r--arch/s390/include/asm/spinlock.h15
2 files changed, 12 insertions, 8 deletions
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index bbf8141408cd..3b476eb92f20 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -139,7 +139,7 @@ struct _lowcore {
__u32 percpu_offset; /* 0x02f0 */
__u32 machine_flags; /* 0x02f4 */
__u32 ftrace_func; /* 0x02f8 */
- __u8 pad_0x02fc[0x0300-0x02fc]; /* 0x02fc */
+ __u32 spinlock_lockval; /* 0x02fc */
/* Interrupt response block */
__u8 irb[64]; /* 0x0300 */
@@ -285,7 +285,8 @@ struct _lowcore {
__u64 machine_flags; /* 0x0388 */
__u64 ftrace_func; /* 0x0390 */
__u64 gmap; /* 0x0398 */
- __u8 pad_0x03a0[0x0400-0x03a0]; /* 0x03a0 */
+ __u32 spinlock_lockval; /* 0x03a0 */
+ __u8 pad_0x03a0[0x0400-0x03a4]; /* 0x03a4 */
/* Interrupt response block. */
__u8 irb[64]; /* 0x0400 */
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index b60212a02d08..5a0b2882ad48 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -11,6 +11,8 @@
#include <linux/smp.h>
+#define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
+
extern int spin_retry;
static inline int
@@ -40,6 +42,11 @@ int arch_spin_trylock_retry(arch_spinlock_t *);
void arch_spin_relax(arch_spinlock_t *);
void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
+static inline u32 arch_spin_lockval(int cpu)
+{
+ return ~cpu;
+}
+
static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
{
return lock.lock == 0;
@@ -52,16 +59,12 @@ static inline int arch_spin_is_locked(arch_spinlock_t *lp)
static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
{
- unsigned int new = ~smp_processor_id();
-
- return _raw_compare_and_swap(&lp->lock, 0, new);
+ return _raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL);
}
static inline int arch_spin_tryrelease_once(arch_spinlock_t *lp)
{
- unsigned int old = ~smp_processor_id();
-
- return _raw_compare_and_swap(&lp->lock, old, 0);
+ return _raw_compare_and_swap(&lp->lock, SPINLOCK_LOCKVAL, 0);
}
static inline void arch_spin_lock(arch_spinlock_t *lp)