aboutsummaryrefslogtreecommitdiffstats
path: root/arch/metag/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/metag/include')
-rw-r--r--arch/metag/include/asm/atomic_lock1.h2
-rw-r--r--arch/metag/include/asm/dma-mapping.h2
-rw-r--r--arch/metag/include/asm/processor.h3
-rw-r--r--arch/metag/include/asm/spinlock.h5
-rw-r--r--arch/metag/include/asm/topology.h1
5 files changed, 3 insertions, 10 deletions
diff --git a/arch/metag/include/asm/atomic_lock1.h b/arch/metag/include/asm/atomic_lock1.h
index 6c1380a8a0d4..eee779f26cc4 100644
--- a/arch/metag/include/asm/atomic_lock1.h
+++ b/arch/metag/include/asm/atomic_lock1.h
@@ -37,6 +37,8 @@ static inline int atomic_set(atomic_t *v, int i)
return i;
}
+#define atomic_set_release(v, i) atomic_set((v), (i))
+
#define ATOMIC_OP(op, c_op) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
diff --git a/arch/metag/include/asm/dma-mapping.h b/arch/metag/include/asm/dma-mapping.h
index fad3dc3cb210..ea573be2b6d0 100644
--- a/arch/metag/include/asm/dma-mapping.h
+++ b/arch/metag/include/asm/dma-mapping.h
@@ -9,7 +9,7 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
}
/*
- * dma_alloc_noncoherent() returns non-cacheable memory, so there's no need to
+ * dma_alloc_attrs() always returns non-cacheable memory, so there's no need to
* do any flushing here.
*/
static inline void
diff --git a/arch/metag/include/asm/processor.h b/arch/metag/include/asm/processor.h
index ec6a49076980..8ae92d6abfd2 100644
--- a/arch/metag/include/asm/processor.h
+++ b/arch/metag/include/asm/processor.h
@@ -131,9 +131,6 @@ static inline void release_thread(struct task_struct *dead_task)
{
}
-#define copy_segments(tsk, mm) do { } while (0)
-#define release_segments(mm) do { } while (0)
-
/*
* Return saved PC of a blocked thread.
*/
diff --git a/arch/metag/include/asm/spinlock.h b/arch/metag/include/asm/spinlock.h
index c0c7a22be1ae..ddf7fe5708a6 100644
--- a/arch/metag/include/asm/spinlock.h
+++ b/arch/metag/include/asm/spinlock.h
@@ -15,11 +15,6 @@
* locked.
*/
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
- smp_cond_load_acquire(&lock->lock, !VAL);
-}
-
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
diff --git a/arch/metag/include/asm/topology.h b/arch/metag/include/asm/topology.h
index e95f874ded1b..707c7f7b6bea 100644
--- a/arch/metag/include/asm/topology.h
+++ b/arch/metag/include/asm/topology.h
@@ -4,7 +4,6 @@
#ifdef CONFIG_NUMA
#define cpu_to_node(cpu) ((void)(cpu), 0)
-#define parent_node(node) ((void)(node), 0)
#define cpumask_of_node(node) ((void)node, cpu_online_mask)