aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/include/asm/barrier.h
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2012-03-28 18:30:03 +0100
committerDavid Howells <dhowells@redhat.com>2012-03-28 18:30:03 +0100
commite839ca528718e68cad32a307dc9aabf01ef3eb05 (patch)
tree5ceb6ece0688455f7205739b71e4b6e04afd2988 /arch/sh/include/asm/barrier.h
parentDisintegrate asm/system.h for Score (diff)
downloadlinux-dev-e839ca528718e68cad32a307dc9aabf01ef3eb05.tar.xz
linux-dev-e839ca528718e68cad32a307dc9aabf01ef3eb05.zip
Disintegrate asm/system.h for SH
Disintegrate asm/system.h for SH. Signed-off-by: David Howells <dhowells@redhat.com> cc: linux-sh@vger.kernel.org
Diffstat (limited to 'arch/sh/include/asm/barrier.h')
-rw-r--r--arch/sh/include/asm/barrier.h54
1 files changed, 54 insertions, 0 deletions
diff --git a/arch/sh/include/asm/barrier.h b/arch/sh/include/asm/barrier.h
new file mode 100644
index 000000000000..72c103dae300
--- /dev/null
+++ b/arch/sh/include/asm/barrier.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
+ * Copyright (C) 2002 Paul Mundt
+ */
+#ifndef __ASM_SH_BARRIER_H
+#define __ASM_SH_BARRIER_H
+
+#if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5)
+#include <asm/cache_insns.h>
+#endif
+
+/*
+ * A brief note on ctrl_barrier(), the control register write barrier.
+ *
+ * Legacy SH cores typically require a sequence of 8 nops after
+ * modification of a control register in order for the changes to take
+ * effect. On newer cores (like the sh4a and sh5) this is accomplished
+ * with icbi.
+ *
+ * Also note that on sh4a in the icbi case we can forego a synco for the
+ * write barrier, as it's not necessary for control registers.
+ *
+ * Historically we have only done this type of barrier for the MMUCR, but
+ * it's also necessary for the CCR, so we make it generic here instead.
+ */
+#if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5)
+#define mb() __asm__ __volatile__ ("synco": : :"memory")
+#define rmb() mb()
+#define wmb() __asm__ __volatile__ ("synco": : :"memory")
+#define ctrl_barrier() __icbi(PAGE_OFFSET)
+#define read_barrier_depends() do { } while(0)
+#else
+#define mb() __asm__ __volatile__ ("": : :"memory")
+#define rmb() mb()
+#define wmb() __asm__ __volatile__ ("": : :"memory")
+#define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop")
+#define read_barrier_depends() do { } while(0)
+#endif
+
+#ifdef CONFIG_SMP
+#define smp_mb() mb()
+#define smp_rmb() rmb()
+#define smp_wmb() wmb()
+#define smp_read_barrier_depends() read_barrier_depends()
+#else
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#define smp_read_barrier_depends() do { } while(0)
+#endif
+
+#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
+
+#endif /* __ASM_SH_BARRIER_H */