aboutsummaryrefslogtreecommitdiffstats
path: root/arch/alpha/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/alpha/include')
-rw-r--r--arch/alpha/include/asm/atomic.h13
-rw-r--r--arch/alpha/include/asm/dma-mapping.h2
-rw-r--r--arch/alpha/include/asm/floppy.h2
-rw-r--r--arch/alpha/include/asm/rwsem.h21
-rw-r--r--arch/alpha/include/asm/spinlock.h14
5 files changed, 31 insertions, 21 deletions
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index 85867d3cea64..767bfdd42992 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -14,6 +14,15 @@
* than regular operations.
*/
+/*
+ * To ensure dependency ordering is preserved for the _relaxed and
+ * _release atomics, an smp_read_barrier_depends() is unconditionally
+ * inserted into the _relaxed variants, which are used to build the
+ * barriered versions. To avoid redundant back-to-back fences, we can
+ * define the _acquire and _fence versions explicitly.
+ */
+#define __atomic_op_acquire(op, args...) op##_relaxed(args)
+#define __atomic_op_fence __atomic_op_release
#define ATOMIC_INIT(i) { (i) }
#define ATOMIC64_INIT(i) { (i) }
@@ -61,6 +70,7 @@ static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
".previous" \
:"=&r" (temp), "=m" (v->counter), "=&r" (result) \
:"Ir" (i), "m" (v->counter) : "memory"); \
+ smp_read_barrier_depends(); \
return result; \
}
@@ -78,6 +88,7 @@ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
".previous" \
:"=&r" (temp), "=m" (v->counter), "=&r" (result) \
:"Ir" (i), "m" (v->counter) : "memory"); \
+ smp_read_barrier_depends(); \
return result; \
}
@@ -112,6 +123,7 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
".previous" \
:"=&r" (temp), "=m" (v->counter), "=&r" (result) \
:"Ir" (i), "m" (v->counter) : "memory"); \
+ smp_read_barrier_depends(); \
return result; \
}
@@ -129,6 +141,7 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
".previous" \
:"=&r" (temp), "=m" (v->counter), "=&r" (result) \
:"Ir" (i), "m" (v->counter) : "memory"); \
+ smp_read_barrier_depends(); \
return result; \
}
diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h
index e542cb272b67..b78f61f20796 100644
--- a/arch/alpha/include/asm/dma-mapping.h
+++ b/arch/alpha/include/asm/dma-mapping.h
@@ -9,6 +9,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return dma_ops;
}
-#define dma_cache_sync(dev, va, size, dir) ((void)0)
-
#endif /* _ALPHA_DMA_MAPPING_H */
diff --git a/arch/alpha/include/asm/floppy.h b/arch/alpha/include/asm/floppy.h
index bae97eb19d26..942924756cf2 100644
--- a/arch/alpha/include/asm/floppy.h
+++ b/arch/alpha/include/asm/floppy.h
@@ -24,7 +24,6 @@
#define fd_set_dma_count(count) set_dma_count(FLOPPY_DMA,count)
#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
-#define fd_cacheflush(addr,size) /* nothing */
#define fd_request_irq() request_irq(FLOPPY_IRQ, floppy_interrupt,\
0, "floppy", NULL)
#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL)
@@ -62,7 +61,6 @@ alpha_fd_dma_setup(char *addr, unsigned long size, int mode, int io)
prev_dir = dir;
fd_clear_dma_ff();
- fd_cacheflush(addr, size);
fd_set_dma_mode(mode);
set_dma_addr(FLOPPY_DMA, bus_addr);
fd_set_dma_count(size);
diff --git a/arch/alpha/include/asm/rwsem.h b/arch/alpha/include/asm/rwsem.h
index 3925f06afd6b..cf8fc8f9a2ed 100644
--- a/arch/alpha/include/asm/rwsem.h
+++ b/arch/alpha/include/asm/rwsem.h
@@ -22,7 +22,7 @@
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-static inline void __down_read(struct rw_semaphore *sem)
+static inline int ___down_read(struct rw_semaphore *sem)
{
long oldcount;
#ifndef CONFIG_SMP
@@ -42,10 +42,24 @@ static inline void __down_read(struct rw_semaphore *sem)
:"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
:"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory");
#endif
- if (unlikely(oldcount < 0))
+ return (oldcount < 0);
+}
+
+static inline void __down_read(struct rw_semaphore *sem)
+{
+ if (unlikely(___down_read(sem)))
rwsem_down_read_failed(sem);
}
+static inline int __down_read_killable(struct rw_semaphore *sem)
+{
+ if (unlikely(___down_read(sem)))
+ if (IS_ERR(rwsem_down_read_failed_killable(sem)))
+ return -EINTR;
+
+ return 0;
+}
+
/*
* trylock for reading -- returns 1 if successful, 0 if contention
*/
@@ -95,9 +109,10 @@ static inline void __down_write(struct rw_semaphore *sem)
static inline int __down_write_killable(struct rw_semaphore *sem)
{
- if (unlikely(___down_write(sem)))
+ if (unlikely(___down_write(sem))) {
if (IS_ERR(rwsem_down_write_failed_killable(sem)))
return -EINTR;
+ }
return 0;
}
diff --git a/arch/alpha/include/asm/spinlock.h b/arch/alpha/include/asm/spinlock.h
index aa4304afbea6..1221cbb86a6f 100644
--- a/arch/alpha/include/asm/spinlock.h
+++ b/arch/alpha/include/asm/spinlock.h
@@ -14,7 +14,6 @@
* We make no fairness assumptions. They have a cost.
*/
-#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#define arch_spin_is_locked(x) ((x)->lock != 0)
static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
@@ -55,16 +54,6 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
/***********************************************************/
-static inline int arch_read_can_lock(arch_rwlock_t *lock)
-{
- return (lock->lock & 1) == 0;
-}
-
-static inline int arch_write_can_lock(arch_rwlock_t *lock)
-{
- return lock->lock == 0;
-}
-
static inline void arch_read_lock(arch_rwlock_t *lock)
{
long regx;
@@ -171,7 +160,4 @@ static inline void arch_write_unlock(arch_rwlock_t * lock)
lock->lock = 0;
}
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
#endif /* _ALPHA_SPINLOCK_H */