summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authormpi <mpi@openbsd.org>2017-05-12 08:47:03 +0000
committermpi <mpi@openbsd.org>2017-05-12 08:47:03 +0000
commitb67e4186c4769a6a5959ad2f1df907c4c34a0c78 (patch)
tree210460597eba920f126cd5971098e5bcfa6ca191
parentMake atomic.h ready to be included in userland. (diff)
downloadwireguard-openbsd-b67e4186c4769a6a5959ad2f1df907c4c34a0c78.tar.xz
wireguard-openbsd-b67e4186c4769a6a5959ad2f1df907c4c34a0c78.zip
Make atomic.h ready to be included in userland.
- prefix the LOCK macro with an underscore - keep setbits/clearbits and virtio barriers inside _KERNEL ok dlg@, kettenis@
-rw-r--r--sys/arch/amd64/include/atomic.h58
1 files changed, 31 insertions, 27 deletions
diff --git a/sys/arch/amd64/include/atomic.h b/sys/arch/amd64/include/atomic.h
index 8ac08047f5b..4294ec64339 100644
--- a/sys/arch/amd64/include/atomic.h
+++ b/sys/arch/amd64/include/atomic.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: atomic.h,v 1.18 2016/08/17 11:56:42 kettenis Exp $ */
+/* $OpenBSD: atomic.h,v 1.19 2017/05/12 08:47:03 mpi Exp $ */
/* $NetBSD: atomic.h,v 1.1 2003/04/26 18:39:37 fvdl Exp $ */
/*
@@ -47,18 +47,18 @@
* void atomic_clearbits_int(volatile u_int *a, u_int mas) { *a &= ~mask; }
*/
-#if defined(_KERNEL) && !defined(_LOCORE)
+#if !defined(_LOCORE)
-#ifdef MULTIPROCESSOR
-#define LOCK "lock"
+#if defined(MULTIPROCESSOR) || !defined(_KERNEL)
+#define _LOCK "lock"
#else
-#define LOCK
+#define _LOCK
#endif
static inline unsigned int
_atomic_cas_uint(volatile unsigned int *p, unsigned int e, unsigned int n)
{
- __asm volatile(LOCK " cmpxchgl %2, %1"
+ __asm volatile(_LOCK " cmpxchgl %2, %1"
: "=a" (n), "=m" (*p)
: "r" (n), "a" (e), "m" (*p));
@@ -69,7 +69,7 @@ _atomic_cas_uint(volatile unsigned int *p, unsigned int e, unsigned int n)
static inline unsigned long
_atomic_cas_ulong(volatile unsigned long *p, unsigned long e, unsigned long n)
{
- __asm volatile(LOCK " cmpxchgq %2, %1"
+ __asm volatile(_LOCK " cmpxchgq %2, %1"
: "=a" (n), "=m" (*p)
: "r" (n), "a" (e), "m" (*p));
@@ -80,7 +80,7 @@ _atomic_cas_ulong(volatile unsigned long *p, unsigned long e, unsigned long n)
static inline void *
_atomic_cas_ptr(volatile void *p, void *e, void *n)
{
- __asm volatile(LOCK " cmpxchgq %2, %1"
+ __asm volatile(_LOCK " cmpxchgq %2, %1"
: "=a" (n), "=m" (*(unsigned long *)p)
: "r" (n), "a" (e), "m" (*(unsigned long *)p));
@@ -136,7 +136,7 @@ _atomic_swap_ptr(volatile void *p, void *n)
static inline void
_atomic_inc_int(volatile unsigned int *p)
{
- __asm volatile(LOCK " incl %0"
+ __asm volatile(_LOCK " incl %0"
: "+m" (*p));
}
#define atomic_inc_int(_p) _atomic_inc_int(_p)
@@ -144,7 +144,7 @@ _atomic_inc_int(volatile unsigned int *p)
static inline void
_atomic_inc_long(volatile unsigned long *p)
{
- __asm volatile(LOCK " incq %0"
+ __asm volatile(_LOCK " incq %0"
: "+m" (*p));
}
#define atomic_inc_long(_p) _atomic_inc_long(_p)
@@ -152,7 +152,7 @@ _atomic_inc_long(volatile unsigned long *p)
static inline void
_atomic_dec_int(volatile unsigned int *p)
{
- __asm volatile(LOCK " decl %0"
+ __asm volatile(_LOCK " decl %0"
: "+m" (*p));
}
#define atomic_dec_int(_p) _atomic_dec_int(_p)
@@ -160,7 +160,7 @@ _atomic_dec_int(volatile unsigned int *p)
static inline void
_atomic_dec_long(volatile unsigned long *p)
{
- __asm volatile(LOCK " decq %0"
+ __asm volatile(_LOCK " decq %0"
: "+m" (*p));
}
#define atomic_dec_long(_p) _atomic_dec_long(_p)
@@ -168,7 +168,7 @@ _atomic_dec_long(volatile unsigned long *p)
static inline void
_atomic_add_int(volatile unsigned int *p, unsigned int v)
{
- __asm volatile(LOCK " addl %1,%0"
+ __asm volatile(_LOCK " addl %1,%0"
: "+m" (*p)
: "a" (v));
}
@@ -177,7 +177,7 @@ _atomic_add_int(volatile unsigned int *p, unsigned int v)
static inline void
_atomic_add_long(volatile unsigned long *p, unsigned long v)
{
- __asm volatile(LOCK " addq %1,%0"
+ __asm volatile(_LOCK " addq %1,%0"
: "+m" (*p)
: "a" (v));
}
@@ -186,7 +186,7 @@ _atomic_add_long(volatile unsigned long *p, unsigned long v)
static inline void
_atomic_sub_int(volatile unsigned int *p, unsigned int v)
{
- __asm volatile(LOCK " subl %1,%0"
+ __asm volatile(_LOCK " subl %1,%0"
: "+m" (*p)
: "a" (v));
}
@@ -195,7 +195,7 @@ _atomic_sub_int(volatile unsigned int *p, unsigned int v)
static inline void
_atomic_sub_long(volatile unsigned long *p, unsigned long v)
{
- __asm volatile(LOCK " subq %1,%0"
+ __asm volatile(_LOCK " subq %1,%0"
: "+m" (*p)
: "a" (v));
}
@@ -207,7 +207,7 @@ _atomic_add_int_nv(volatile unsigned int *p, unsigned int v)
{
unsigned int rv = v;
- __asm volatile(LOCK " xaddl %0,%1"
+ __asm volatile(_LOCK " xaddl %0,%1"
: "+a" (rv), "+m" (*p));
return (rv + v);
@@ -219,7 +219,7 @@ _atomic_add_long_nv(volatile unsigned long *p, unsigned long v)
{
unsigned long rv = v;
- __asm volatile(LOCK " xaddq %0,%1"
+ __asm volatile(_LOCK " xaddq %0,%1"
: "+a" (rv), "+m" (*p));
return (rv + v);
@@ -231,7 +231,7 @@ _atomic_sub_int_nv(volatile unsigned int *p, unsigned int v)
{
unsigned int rv = 0 - v;
- __asm volatile(LOCK " xaddl %0,%1"
+ __asm volatile(_LOCK " xaddl %0,%1"
: "+a" (rv), "+m" (*p));
return (rv - v);
@@ -243,7 +243,7 @@ _atomic_sub_long_nv(volatile unsigned long *p, unsigned long v)
{
unsigned long rv = 0 - v;
- __asm volatile(LOCK " xaddq %0,%1"
+ __asm volatile(_LOCK " xaddq %0,%1"
: "+a" (rv), "+m" (*p));
return (rv - v);
@@ -262,7 +262,7 @@ _atomic_sub_long_nv(volatile unsigned long *p, unsigned long v)
#define __membar(_f) do { __asm __volatile(_f ::: "memory"); } while (0)
-#ifdef MULTIPROCESSOR
+#if defined(MULTIPROCESSOR) || !defined(_KERNEL)
#define membar_enter() __membar("mfence")
#define membar_exit() __membar("")
#define membar_producer() __membar("")
@@ -276,6 +276,8 @@ _atomic_sub_long_nv(volatile unsigned long *p, unsigned long v)
#define membar_sync() __membar("")
#endif
+#ifdef _KERNEL
+
/* virtio needs MP membars even on SP kernels */
#define virtio_membar_producer() __membar("")
#define virtio_membar_consumer() __membar("")
@@ -284,25 +286,25 @@ _atomic_sub_long_nv(volatile unsigned long *p, unsigned long v)
static __inline void
x86_atomic_setbits_u32(volatile u_int32_t *ptr, u_int32_t bits)
{
- __asm volatile(LOCK " orl %1,%0" : "=m" (*ptr) : "ir" (bits));
+ __asm volatile(_LOCK " orl %1,%0" : "=m" (*ptr) : "ir" (bits));
}
static __inline void
x86_atomic_clearbits_u32(volatile u_int32_t *ptr, u_int32_t bits)
{
- __asm volatile(LOCK " andl %1,%0" : "=m" (*ptr) : "ir" (~bits));
+ __asm volatile(_LOCK " andl %1,%0" : "=m" (*ptr) : "ir" (~bits));
}
static __inline void
x86_atomic_setbits_u64(volatile u_int64_t *ptr, u_int64_t bits)
{
- __asm volatile(LOCK " orq %1,%0" : "=m" (*ptr) : "er" (bits));
+ __asm volatile(_LOCK " orq %1,%0" : "=m" (*ptr) : "er" (bits));
}
static __inline void
x86_atomic_clearbits_u64(volatile u_int64_t *ptr, u_int64_t bits)
{
- __asm volatile(LOCK " andq %1,%0" : "=m" (*ptr) : "er" (~bits));
+ __asm volatile(_LOCK " andq %1,%0" : "=m" (*ptr) : "er" (~bits));
}
#define x86_atomic_testset_ul x86_atomic_testset_u64
@@ -312,7 +314,9 @@ x86_atomic_clearbits_u64(volatile u_int64_t *ptr, u_int64_t bits)
#define atomic_setbits_int x86_atomic_setbits_u32
#define atomic_clearbits_int x86_atomic_clearbits_u32
-#undef LOCK
+#endif /* _KERNEL */
+
+#undef _LOCK
-#endif /* defined(_KERNEL) && !defined(_LOCORE) */
+#endif /* !defined(_LOCORE) */
#endif /* _MACHINE_ATOMIC_H_ */