summaryrefslogtreecommitdiffstats
path: root/lib/libpthread/arch/mips64
diff options
context:
space:
mode:
authorpefo <pefo@openbsd.org>2004-08-11 17:38:15 +0000
committerpefo <pefo@openbsd.org>2004-08-11 17:38:15 +0000
commit1b5c0ae122494e594a4e217b37043a65d1fb6ada (patch)
tree6a41bda4356a9bb6f3228e74c4eaa1b99c840a03 /lib/libpthread/arch/mips64
parentbye (diff)
downloadwireguard-openbsd-1b5c0ae122494e594a4e217b37043a65d1fb6ada.tar.xz
wireguard-openbsd-1b5c0ae122494e594a4e217b37043a65d1fb6ada.zip
mips->mips64
Diffstat (limited to 'lib/libpthread/arch/mips64')
-rw-r--r--lib/libpthread/arch/mips64/_atomic_lock.c55
-rw-r--r--lib/libpthread/arch/mips64/_spinlock.h6
-rw-r--r--lib/libpthread/arch/mips64/uthread_machdep.c56
-rw-r--r--lib/libpthread/arch/mips64/uthread_machdep.h6
-rw-r--r--lib/libpthread/arch/mips64/uthread_machdep_asm.S52
5 files changed, 175 insertions, 0 deletions
diff --git a/lib/libpthread/arch/mips64/_atomic_lock.c b/lib/libpthread/arch/mips64/_atomic_lock.c
new file mode 100644
index 00000000000..5390c2c3e13
--- /dev/null
+++ b/lib/libpthread/arch/mips64/_atomic_lock.c
@@ -0,0 +1,55 @@
+/* $OpenBSD: _atomic_lock.c,v 1.1 2004/08/11 17:41:34 pefo Exp $ */
+/*
+ * Atomic lock for mips
+ */
+
+#include "pthread.h"
+#include "pthread_private.h"
+#include "spinlock.h"
+#include <signal.h>
+
+/*
+ * uthread atomic lock:
+ * attempt to acquire a lock (by giving it a non-zero value).
+ * Return zero on success, or the lock's value on failure
+ */
+int
+_atomic_lock(volatile _spinlock_lock_t *lock)
+{
+#if __mips >= 2
+ _spinlock_lock_t old;
+ _spinlock_lock_t temp;
+
+ do {
+ /*
+ * On a mips2 machine and above, we can use ll/sc.
+ * Read the lock and tag the cache line with a 'load linked'
+ * instruction. (Register 17 (LLAddr) will hold the
+ * physical address of lock for diagnostic purposes);
+ * (Under pathologically heavy swapping, the physaddr may
+ * change! XXX)
+ */
+ __asm__("ll %0, %1" : "=r"(old) : "m"(*lock));
+ if (old != _SPINLOCK_UNLOCKED)
+ break; /* already locked */
+ /*
+ * Try and store a 1 at the tagged lock address. If
+ * anyone else has since written it, the tag on the cache
+ * line will have been wiped, and temp will be set to zero
+ * by the 'store conditional' instruction.
+ */
+ temp = _SPINLOCK_LOCKED;
+ __asm__("sc %0, %1" : "=r"(temp), "=m"(*lock)
+ : "0"(temp));
+ } while (temp == 0);
+
+ return (old != _SPINLOCK_UNLOCKED);
+#else
+ /*
+ * Older MIPS cpus have no way of doing an atomic lock
+ * without some kind of shift to supervisor mode.
+ */
+
+ return (_thread_slow_atomic_lock(lock));
+#endif
+}
diff --git a/lib/libpthread/arch/mips64/_spinlock.h b/lib/libpthread/arch/mips64/_spinlock.h
new file mode 100644
index 00000000000..d1f543f2faa
--- /dev/null
+++ b/lib/libpthread/arch/mips64/_spinlock.h
@@ -0,0 +1,6 @@
+/* $OpenBSD: _spinlock.h,v 1.1 2004/08/11 17:41:34 pefo Exp $ */
+
+#define _SPINLOCK_UNLOCKED (0)
+#define _SPINLOCK_LOCKED (1)
+typedef int _spinlock_lock_t;
+
diff --git a/lib/libpthread/arch/mips64/uthread_machdep.c b/lib/libpthread/arch/mips64/uthread_machdep.c
new file mode 100644
index 00000000000..c2e47db96ad
--- /dev/null
+++ b/lib/libpthread/arch/mips64/uthread_machdep.c
@@ -0,0 +1,56 @@
+/* $OpenBSD: uthread_machdep.c,v 1.1 2004/08/11 17:41:34 pefo Exp $ */
+/* David Leonard, <d@csee.uq.edu.au>. Public domain. */
+
+/*
+ * Machine-dependent thread state functions for OpenBSD/mips
+ */
+
+#include <pthread.h>
+#include "pthread_private.h"
+
+#define ALIGNBYTES 0x3
+
+struct frame {
+ int s[9]; /* s0..s7 */
+ int _fill;
+ double f[3]; /* $f0..$f2 */
+ int t9; /* XXX only used when bootstrapping */
+ int ra;
+
+ int arg[4], cra, cfp; /* ABI space for debuggers */
+};
+
+/*
+ * Given a stack and an entry function, initialise a state
+ * structure that can be later switched to.
+ */
+void
+_thread_machdep_init(statep, base, len, entry)
+ struct _machdep_state* statep;
+ void *base;
+ int len;
+ void (*entry)(void);
+{
+ struct frame *f;
+
+ /* Locate the initial frame, aligned at the top of the stack */
+ f = (struct frame *)(((int)base + len - sizeof *f) & ~ALIGNBYTES);
+
+ f->cra = f->cfp = 0; /* for debugger */
+ f->ra = (int)entry;
+ f->t9 = (int)entry;
+
+ statep->frame = (int)f;
+}
+
+void
+_thread_machdep_save_float_state(statep)
+ struct _machdep_state* statep;
+{
+}
+
+void
+_thread_machdep_restore_float_state(statep)
+ struct _machdep_state* statep;
+{
+}
diff --git a/lib/libpthread/arch/mips64/uthread_machdep.h b/lib/libpthread/arch/mips64/uthread_machdep.h
new file mode 100644
index 00000000000..7e95f299456
--- /dev/null
+++ b/lib/libpthread/arch/mips64/uthread_machdep.h
@@ -0,0 +1,6 @@
+/* $OpenBSD: uthread_machdep.h,v 1.1 2004/08/11 17:41:34 pefo Exp $ */
+/* David Leonard, <d@csee.uq.edu.au>. Public domain. */
+
+struct _machdep_state {
+ int frame;
+};
diff --git a/lib/libpthread/arch/mips64/uthread_machdep_asm.S b/lib/libpthread/arch/mips64/uthread_machdep_asm.S
new file mode 100644
index 00000000000..b6e8943f336
--- /dev/null
+++ b/lib/libpthread/arch/mips64/uthread_machdep_asm.S
@@ -0,0 +1,52 @@
+/* $OpenBSD: uthread_machdep_asm.S,v 1.1 2004/08/11 17:41:34 pefo Exp $ */
+/* David Leonard, <d@csee.uq.edu.au>. Public domain. */
+
+#include <machine/asm.h>
+
+#define SOFF(n) ((n)*4)
+#define FPOFF(n) (SOFF(9) + 4 + (n)*8)
+#define REGOFF(n) (FPOFF(3) + (n)*4)
+
+#define FRAMESIZE (REGOFF(2) + 4*4+4+4)
+
+NON_LEAF(_thread_machdep_switch, FRAMESIZE, ra)
+ add sp, sp, -FRAMESIZE
+
+ sw s0, SOFF(0)(sp)
+ sw s1, SOFF(1)(sp)
+ sw s2, SOFF(2)(sp)
+ sw s3, SOFF(3)(sp)
+ sw s4, SOFF(4)(sp)
+ sw s5, SOFF(5)(sp)
+ sw s6, SOFF(6)(sp)
+ sw s7, SOFF(7)(sp)
+ sw s8, SOFF(8)(sp)
+ s.d $f0, FPOFF(0)(sp) /* XXX why? */
+ s.d $f2, FPOFF(1)(sp)
+ s.d $f4, FPOFF(2)(sp)
+ sw t9, REGOFF(0)(sp)
+ sw ra, REGOFF(1)(sp)
+
+ sw sp, 0(a1)
+ lw sp, 0(a0)
+
+ .set noreorder /* avoid nops */
+ lw ra, REGOFF(1)(sp)
+ lw t9, REGOFF(0)(sp)
+ l.d $f4, FPOFF(2)(sp)
+ l.d $f2, FPOFF(1)(sp)
+ l.d $f0, FPOFF(0)(sp)
+ lw s8, SOFF(8)(sp)
+ lw s7, SOFF(7)(sp)
+ lw s6, SOFF(6)(sp)
+ lw s5, SOFF(5)(sp)
+ lw s4, SOFF(4)(sp)
+ lw s3, SOFF(3)(sp)
+ lw s2, SOFF(2)(sp)
+ lw s1, SOFF(1)(sp)
+ lw s0, SOFF(0)(sp)
+ .set reorder
+
+ add sp, sp, FRAMESIZE
+ j ra
+END(_thread_machdep_switch)