aboutsummaryrefslogtreecommitdiffstats
path: root/arch/parisc/kernel/syscall.S
diff options
context:
space:
mode:
authorJohn David Anglin <dave.anglin@bell.net>2020-10-02 21:21:41 +0200
committerHelge Deller <deller@gmx.de>2020-10-15 08:10:39 +0200
commit53a42b6324b8ddce1e9d2f34da2ca68ef21c2084 (patch)
treee56e6b487f238312f4c01b526a65aad48de475aa /arch/parisc/kernel/syscall.S
parentparisc: Mark pointers volatile in __xchg8(), __xchg32() and __xchg64() (diff)
downloadlinux-dev-53a42b6324b8ddce1e9d2f34da2ca68ef21c2084.tar.xz
linux-dev-53a42b6324b8ddce1e9d2f34da2ca68ef21c2084.zip
parisc: Switch to more fine grained lws locks
Increase the number of lws locks to 256 entries (instead of 16) and choose lock entry based on bits 3-11 (instead of 4-7) of the relevant address. With this change we archieve more fine-grained locking in futex syscalls and thus reduce the number of possible stalls. Signed-off-by: John David Anglin <dave.anglin@bell.net> Signed-off-by: Helge Deller <deller@gmx.de>
Diffstat (limited to 'arch/parisc/kernel/syscall.S')
-rw-r--r--arch/parisc/kernel/syscall.S10
1 files changed, 5 insertions, 5 deletions
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 3ad61a177f5b..322503780db6 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -571,8 +571,8 @@ lws_compare_and_swap:
ldil L%lws_lock_start, %r20
ldo R%lws_lock_start(%r20), %r28
- /* Extract four bits from r26 and hash lock (Bits 4-7) */
- extru %r26, 27, 4, %r20
+ /* Extract eight bits from r26 and hash lock (Bits 3-11) */
+ extru %r26, 28, 8, %r20
/* Find lock to use, the hash is either one of 0 to
15, multiplied by 16 (keep it 16-byte aligned)
@@ -761,8 +761,8 @@ cas2_lock_start:
ldil L%lws_lock_start, %r20
ldo R%lws_lock_start(%r20), %r28
- /* Extract four bits from r26 and hash lock (Bits 4-7) */
- extru %r26, 27, 4, %r20
+ /* Extract eight bits from r26 and hash lock (Bits 3-11) */
+ extru %r26, 28, 8, %r20
/* Find lock to use, the hash is either one of 0 to
15, multiplied by 16 (keep it 16-byte aligned)
@@ -950,7 +950,7 @@ END(sys_call_table64)
.align L1_CACHE_BYTES
ENTRY(lws_lock_start)
/* lws locks */
- .rept 16
+ .rept 256
/* Keep locks aligned at 16-bytes */
.word 1
.word 0