aboutsummaryrefslogtreecommitdiffstats
path: root/arch/alpha/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/alpha/include/asm')
-rw-r--r--arch/alpha/include/asm/Kbuild1
-rw-r--r--arch/alpha/include/asm/asm-prototypes.h2
-rw-r--r--arch/alpha/include/asm/atomic.h13
-rw-r--r--arch/alpha/include/asm/cmpxchg.h6
-rw-r--r--arch/alpha/include/asm/dma-mapping.h2
-rw-r--r--arch/alpha/include/asm/floppy.h2
-rw-r--r--arch/alpha/include/asm/futex.h8
-rw-r--r--arch/alpha/include/asm/pci.h5
-rw-r--r--arch/alpha/include/asm/processor.h5
-rw-r--r--arch/alpha/include/asm/rwsem.h21
-rw-r--r--arch/alpha/include/asm/spinlock.h14
-rw-r--r--arch/alpha/include/asm/thread_info.h3
-rw-r--r--arch/alpha/include/asm/xchg.h38
13 files changed, 63 insertions, 57 deletions
diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild
index 47f3fba3e41f..9b68790013e2 100644
--- a/arch/alpha/include/asm/Kbuild
+++ b/arch/alpha/include/asm/Kbuild
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-generic-y += clkdev.h
generic-y += exec.h
generic-y += export.h
generic-y += fb.h
diff --git a/arch/alpha/include/asm/asm-prototypes.h b/arch/alpha/include/asm/asm-prototypes.h
index d12c68ea340b..b34cc1f06720 100644
--- a/arch/alpha/include/asm/asm-prototypes.h
+++ b/arch/alpha/include/asm/asm-prototypes.h
@@ -4,7 +4,7 @@
#include <asm/console.h>
#include <asm/page.h>
#include <asm/string.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm-generic/asm-prototypes.h>
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index 85867d3cea64..767bfdd42992 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -14,6 +14,15 @@
* than regular operations.
*/
+/*
+ * To ensure dependency ordering is preserved for the _relaxed and
+ * _release atomics, an smp_read_barrier_depends() is unconditionally
+ * inserted into the _relaxed variants, which are used to build the
+ * barriered versions. To avoid redundant back-to-back fences, we can
+ * define the _acquire and _fence versions explicitly.
+ */
+#define __atomic_op_acquire(op, args...) op##_relaxed(args)
+#define __atomic_op_fence __atomic_op_release
#define ATOMIC_INIT(i) { (i) }
#define ATOMIC64_INIT(i) { (i) }
@@ -61,6 +70,7 @@ static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
".previous" \
:"=&r" (temp), "=m" (v->counter), "=&r" (result) \
:"Ir" (i), "m" (v->counter) : "memory"); \
+ smp_read_barrier_depends(); \
return result; \
}
@@ -78,6 +88,7 @@ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
".previous" \
:"=&r" (temp), "=m" (v->counter), "=&r" (result) \
:"Ir" (i), "m" (v->counter) : "memory"); \
+ smp_read_barrier_depends(); \
return result; \
}
@@ -112,6 +123,7 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
".previous" \
:"=&r" (temp), "=m" (v->counter), "=&r" (result) \
:"Ir" (i), "m" (v->counter) : "memory"); \
+ smp_read_barrier_depends(); \
return result; \
}
@@ -129,6 +141,7 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
".previous" \
:"=&r" (temp), "=m" (v->counter), "=&r" (result) \
:"Ir" (i), "m" (v->counter) : "memory"); \
+ smp_read_barrier_depends(); \
return result; \
}
diff --git a/arch/alpha/include/asm/cmpxchg.h b/arch/alpha/include/asm/cmpxchg.h
index 46ebf14aed4e..8a2b331e43fe 100644
--- a/arch/alpha/include/asm/cmpxchg.h
+++ b/arch/alpha/include/asm/cmpxchg.h
@@ -6,7 +6,6 @@
* Atomic exchange routines.
*/
-#define __ASM__MB
#define ____xchg(type, args...) __xchg ## type ## _local(args)
#define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args)
#include <asm/xchg.h>
@@ -33,10 +32,6 @@
cmpxchg_local((ptr), (o), (n)); \
})
-#ifdef CONFIG_SMP
-#undef __ASM__MB
-#define __ASM__MB "\tmb\n"
-#endif
#undef ____xchg
#undef ____cmpxchg
#define ____xchg(type, args...) __xchg ##type(args)
@@ -64,7 +59,6 @@
cmpxchg((ptr), (o), (n)); \
})
-#undef __ASM__MB
#undef ____cmpxchg
#endif /* _ALPHA_CMPXCHG_H */
diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h
index e542cb272b67..b78f61f20796 100644
--- a/arch/alpha/include/asm/dma-mapping.h
+++ b/arch/alpha/include/asm/dma-mapping.h
@@ -9,6 +9,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return dma_ops;
}
-#define dma_cache_sync(dev, va, size, dir) ((void)0)
-
#endif /* _ALPHA_DMA_MAPPING_H */
diff --git a/arch/alpha/include/asm/floppy.h b/arch/alpha/include/asm/floppy.h
index bae97eb19d26..942924756cf2 100644
--- a/arch/alpha/include/asm/floppy.h
+++ b/arch/alpha/include/asm/floppy.h
@@ -24,7 +24,6 @@
#define fd_set_dma_count(count) set_dma_count(FLOPPY_DMA,count)
#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
-#define fd_cacheflush(addr,size) /* nothing */
#define fd_request_irq() request_irq(FLOPPY_IRQ, floppy_interrupt,\
0, "floppy", NULL)
#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL)
@@ -62,7 +61,6 @@ alpha_fd_dma_setup(char *addr, unsigned long size, int mode, int io)
prev_dir = dir;
fd_clear_dma_ff();
- fd_cacheflush(addr, size);
fd_set_dma_mode(mode);
set_dma_addr(FLOPPY_DMA, bus_addr);
fd_set_dma_count(size);
diff --git a/arch/alpha/include/asm/futex.h b/arch/alpha/include/asm/futex.h
index d2e4da93e68c..ca3322536f72 100644
--- a/arch/alpha/include/asm/futex.h
+++ b/arch/alpha/include/asm/futex.h
@@ -20,8 +20,8 @@
"3: .subsection 2\n" \
"4: br 1b\n" \
" .previous\n" \
- EXC(1b,3b,%1,$31) \
- EXC(2b,3b,%1,$31) \
+ EXC(1b,3b,$31,%1) \
+ EXC(2b,3b,$31,%1) \
: "=&r" (oldval), "=&r"(ret) \
: "r" (uaddr), "r"(oparg) \
: "memory")
@@ -82,8 +82,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
"3: .subsection 2\n"
"4: br 1b\n"
" .previous\n"
- EXC(1b,3b,%0,$31)
- EXC(2b,3b,%0,$31)
+ EXC(1b,3b,$31,%0)
+ EXC(2b,3b,$31,%0)
: "+r"(ret), "=&r"(prev), "=&r"(cmp)
: "r"(uaddr), "r"((long)(int)oldval), "r"(newval)
: "memory");
diff --git a/arch/alpha/include/asm/pci.h b/arch/alpha/include/asm/pci.h
index fc988c16e894..b9ec55351924 100644
--- a/arch/alpha/include/asm/pci.h
+++ b/arch/alpha/include/asm/pci.h
@@ -13,9 +13,6 @@
* The following structure is used to manage multiple PCI busses.
*/
-struct pci_dev;
-struct pci_bus;
-struct resource;
struct pci_iommu_arena;
struct page;
@@ -57,8 +54,6 @@ struct pci_controller {
#define PCIBIOS_MIN_IO alpha_mv.min_io_address
#define PCIBIOS_MIN_MEM alpha_mv.min_mem_address
-extern void pcibios_set_master(struct pci_dev *dev);
-
/* IOMMU controls. */
/* The PCI address space does not equal the physical memory address space.
diff --git a/arch/alpha/include/asm/processor.h b/arch/alpha/include/asm/processor.h
index bfe784f2d4af..cb05d045efe3 100644
--- a/arch/alpha/include/asm/processor.h
+++ b/arch/alpha/include/asm/processor.h
@@ -40,15 +40,12 @@ typedef struct {
struct thread_struct { };
#define INIT_THREAD { }
-/* Return saved PC of a blocked thread. */
-struct task_struct;
-extern unsigned long thread_saved_pc(struct task_struct *);
-
/* Do necessary setup to start up a newly executed thread. */
struct pt_regs;
extern void start_thread(struct pt_regs *, unsigned long, unsigned long);
/* Free all resources held by a thread. */
+struct task_struct;
extern void release_thread(struct task_struct *);
unsigned long get_wchan(struct task_struct *p);
diff --git a/arch/alpha/include/asm/rwsem.h b/arch/alpha/include/asm/rwsem.h
index 3925f06afd6b..cf8fc8f9a2ed 100644
--- a/arch/alpha/include/asm/rwsem.h
+++ b/arch/alpha/include/asm/rwsem.h
@@ -22,7 +22,7 @@
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-static inline void __down_read(struct rw_semaphore *sem)
+static inline int ___down_read(struct rw_semaphore *sem)
{
long oldcount;
#ifndef CONFIG_SMP
@@ -42,10 +42,24 @@ static inline void __down_read(struct rw_semaphore *sem)
:"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
:"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory");
#endif
- if (unlikely(oldcount < 0))
+ return (oldcount < 0);
+}
+
+static inline void __down_read(struct rw_semaphore *sem)
+{
+ if (unlikely(___down_read(sem)))
rwsem_down_read_failed(sem);
}
+static inline int __down_read_killable(struct rw_semaphore *sem)
+{
+ if (unlikely(___down_read(sem)))
+ if (IS_ERR(rwsem_down_read_failed_killable(sem)))
+ return -EINTR;
+
+ return 0;
+}
+
/*
* trylock for reading -- returns 1 if successful, 0 if contention
*/
@@ -95,9 +109,10 @@ static inline void __down_write(struct rw_semaphore *sem)
static inline int __down_write_killable(struct rw_semaphore *sem)
{
- if (unlikely(___down_write(sem)))
+ if (unlikely(___down_write(sem))) {
if (IS_ERR(rwsem_down_write_failed_killable(sem)))
return -EINTR;
+ }
return 0;
}
diff --git a/arch/alpha/include/asm/spinlock.h b/arch/alpha/include/asm/spinlock.h
index aa4304afbea6..1221cbb86a6f 100644
--- a/arch/alpha/include/asm/spinlock.h
+++ b/arch/alpha/include/asm/spinlock.h
@@ -14,7 +14,6 @@
* We make no fairness assumptions. They have a cost.
*/
-#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#define arch_spin_is_locked(x) ((x)->lock != 0)
static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
@@ -55,16 +54,6 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
/***********************************************************/
-static inline int arch_read_can_lock(arch_rwlock_t *lock)
-{
- return (lock->lock & 1) == 0;
-}
-
-static inline int arch_write_can_lock(arch_rwlock_t *lock)
-{
- return lock->lock == 0;
-}
-
static inline void arch_read_lock(arch_rwlock_t *lock)
{
long regx;
@@ -171,7 +160,4 @@ static inline void arch_write_unlock(arch_rwlock_t * lock)
lock->lock = 0;
}
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
#endif /* _ALPHA_SPINLOCK_H */
diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h
index 8c20c5e35432..807d7b9a1860 100644
--- a/arch/alpha/include/asm/thread_info.h
+++ b/arch/alpha/include/asm/thread_info.h
@@ -39,9 +39,6 @@ struct thread_info {
.preempt_count = INIT_PREEMPT_COUNT, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
/* How to get the thread information struct from C. */
register struct thread_info *__current_thread_info __asm__("$8");
#define current_thread_info() __current_thread_info
diff --git a/arch/alpha/include/asm/xchg.h b/arch/alpha/include/asm/xchg.h
index 68dfb3cb7145..e2b59fac5257 100644
--- a/arch/alpha/include/asm/xchg.h
+++ b/arch/alpha/include/asm/xchg.h
@@ -12,6 +12,10 @@
* Atomic exchange.
* Since it can be used to implement critical sections
* it must clobber "memory" (also for interrupts in UP).
+ *
+ * The leading and the trailing memory barriers guarantee that these
+ * operations are fully ordered.
+ *
*/
static inline unsigned long
@@ -19,6 +23,7 @@ ____xchg(_u8, volatile char *m, unsigned long val)
{
unsigned long ret, tmp, addr64;
+ smp_mb();
__asm__ __volatile__(
" andnot %4,7,%3\n"
" insbl %1,%4,%1\n"
@@ -28,12 +33,12 @@ ____xchg(_u8, volatile char *m, unsigned long val)
" or %1,%2,%2\n"
" stq_c %2,0(%3)\n"
" beq %2,2f\n"
- __ASM__MB
".subsection 2\n"
"2: br 1b\n"
".previous"
: "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
: "r" ((long)m), "1" (val) : "memory");
+ smp_mb();
return ret;
}
@@ -43,6 +48,7 @@ ____xchg(_u16, volatile short *m, unsigned long val)
{
unsigned long ret, tmp, addr64;
+ smp_mb();
__asm__ __volatile__(
" andnot %4,7,%3\n"
" inswl %1,%4,%1\n"
@@ -52,12 +58,12 @@ ____xchg(_u16, volatile short *m, unsigned long val)
" or %1,%2,%2\n"
" stq_c %2,0(%3)\n"
" beq %2,2f\n"
- __ASM__MB
".subsection 2\n"
"2: br 1b\n"
".previous"
: "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
: "r" ((long)m), "1" (val) : "memory");
+ smp_mb();
return ret;
}
@@ -67,17 +73,18 @@ ____xchg(_u32, volatile int *m, unsigned long val)
{
unsigned long dummy;
+ smp_mb();
__asm__ __volatile__(
"1: ldl_l %0,%4\n"
" bis $31,%3,%1\n"
" stl_c %1,%2\n"
" beq %1,2f\n"
- __ASM__MB
".subsection 2\n"
"2: br 1b\n"
".previous"
: "=&r" (val), "=&r" (dummy), "=m" (*m)
: "rI" (val), "m" (*m) : "memory");
+ smp_mb();
return val;
}
@@ -87,17 +94,18 @@ ____xchg(_u64, volatile long *m, unsigned long val)
{
unsigned long dummy;
+ smp_mb();
__asm__ __volatile__(
"1: ldq_l %0,%4\n"
" bis $31,%3,%1\n"
" stq_c %1,%2\n"
" beq %1,2f\n"
- __ASM__MB
".subsection 2\n"
"2: br 1b\n"
".previous"
: "=&r" (val), "=&r" (dummy), "=m" (*m)
: "rI" (val), "m" (*m) : "memory");
+ smp_mb();
return val;
}
@@ -128,10 +136,12 @@ ____xchg(, volatile void *ptr, unsigned long x, int size)
* store NEW in MEM. Return the initial value in MEM. Success is
* indicated by comparing RETURN with OLD.
*
- * The memory barrier should be placed in SMP only when we actually
- * make the change. If we don't change anything (so if the returned
- * prev is equal to old) then we aren't acquiring anything new and
- * we don't need any memory barrier as far I can tell.
+ * The leading and the trailing memory barriers guarantee that these
+ * operations are fully ordered.
+ *
+ * The trailing memory barrier is placed in SMP unconditionally, in
+ * order to guarantee that dependency ordering is preserved when a
+ * dependency is headed by an unsuccessful operation.
*/
static inline unsigned long
@@ -139,6 +149,7 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new)
{
unsigned long prev, tmp, cmp, addr64;
+ smp_mb();
__asm__ __volatile__(
" andnot %5,7,%4\n"
" insbl %1,%5,%1\n"
@@ -150,13 +161,13 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new)
" or %1,%2,%2\n"
" stq_c %2,0(%4)\n"
" beq %2,3f\n"
- __ASM__MB
"2:\n"
".subsection 2\n"
"3: br 1b\n"
".previous"
: "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
: "r" ((long)m), "Ir" (old), "1" (new) : "memory");
+ smp_mb();
return prev;
}
@@ -166,6 +177,7 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new)
{
unsigned long prev, tmp, cmp, addr64;
+ smp_mb();
__asm__ __volatile__(
" andnot %5,7,%4\n"
" inswl %1,%5,%1\n"
@@ -177,13 +189,13 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new)
" or %1,%2,%2\n"
" stq_c %2,0(%4)\n"
" beq %2,3f\n"
- __ASM__MB
"2:\n"
".subsection 2\n"
"3: br 1b\n"
".previous"
: "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
: "r" ((long)m), "Ir" (old), "1" (new) : "memory");
+ smp_mb();
return prev;
}
@@ -193,6 +205,7 @@ ____cmpxchg(_u32, volatile int *m, int old, int new)
{
unsigned long prev, cmp;
+ smp_mb();
__asm__ __volatile__(
"1: ldl_l %0,%5\n"
" cmpeq %0,%3,%1\n"
@@ -200,13 +213,13 @@ ____cmpxchg(_u32, volatile int *m, int old, int new)
" mov %4,%1\n"
" stl_c %1,%2\n"
" beq %1,3f\n"
- __ASM__MB
"2:\n"
".subsection 2\n"
"3: br 1b\n"
".previous"
: "=&r"(prev), "=&r"(cmp), "=m"(*m)
: "r"((long) old), "r"(new), "m"(*m) : "memory");
+ smp_mb();
return prev;
}
@@ -216,6 +229,7 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new)
{
unsigned long prev, cmp;
+ smp_mb();
__asm__ __volatile__(
"1: ldq_l %0,%5\n"
" cmpeq %0,%3,%1\n"
@@ -223,13 +237,13 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new)
" mov %4,%1\n"
" stq_c %1,%2\n"
" beq %1,3f\n"
- __ASM__MB
"2:\n"
".subsection 2\n"
"3: br 1b\n"
".previous"
: "=&r"(prev), "=&r"(cmp), "=m"(*m)
: "r"((long) old), "r"(new), "m"(*m) : "memory");
+ smp_mb();
return prev;
}