aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-mips
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-mips')
-rw-r--r--include/asm-mips/atomic.h57
-rw-r--r--include/asm-mips/bootinfo.h6
-rw-r--r--include/asm-mips/fpu.h23
-rw-r--r--include/asm-mips/hazards.h32
-rw-r--r--include/asm-mips/highmem.h42
-rw-r--r--include/asm-mips/kdebug.h1
-rw-r--r--include/asm-mips/kexec.h2
-rw-r--r--include/asm-mips/local.h304
-rw-r--r--include/asm-mips/mach-au1x00/au1550_spi.h16
-rw-r--r--include/asm-mips/mach-cobalt/cobalt.h2
-rw-r--r--include/asm-mips/mach-ja/cpu-feature-overrides.h45
-rw-r--r--include/asm-mips/mach-ja/spaces.h20
-rw-r--r--include/asm-mips/mips-boards/malta.h4
-rw-r--r--include/asm-mips/mmu_context.h1
-rw-r--r--include/asm-mips/msc01_ic.h5
-rw-r--r--include/asm-mips/page.h4
-rw-r--r--include/asm-mips/pgalloc.h1
-rw-r--r--include/asm-mips/pgtable.h4
-rw-r--r--include/asm-mips/poll.h21
-rw-r--r--include/asm-mips/scatterlist.h2
-rw-r--r--include/asm-mips/serial.h41
-rw-r--r--include/asm-mips/sgi/seeq.h21
-rw-r--r--include/asm-mips/sgi/wd.h20
-rw-r--r--include/asm-mips/system.h133
24 files changed, 550 insertions, 257 deletions
diff --git a/include/asm-mips/atomic.h b/include/asm-mips/atomic.h
index 1ac50b6c47ad..1b60624dab7e 100644
--- a/include/asm-mips/atomic.h
+++ b/include/asm-mips/atomic.h
@@ -18,6 +18,7 @@
#include <asm/barrier.h>
#include <asm/cpu-features.h>
#include <asm/war.h>
+#include <asm/system.h>
typedef struct { volatile int counter; } atomic_t;
@@ -306,8 +307,8 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
return result;
}
-#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
/**
* atomic_add_unless - add unless the number is a given value
@@ -318,14 +319,20 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
* Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if @v was not @u, and zero otherwise.
*/
-#define atomic_add_unless(v, a, u) \
-({ \
- int c, old; \
- c = atomic_read(v); \
- while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
- c = old; \
- c != (u); \
-})
+static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+{
+ int c, old;
+ c = atomic_read(v);
+ for (;;) {
+ if (unlikely(c == (u)))
+ break;
+ old = atomic_cmpxchg((v), c, c + (a));
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return c != (u);
+}
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#define atomic_dec_return(v) atomic_sub_return(1,(v))
@@ -681,6 +688,36 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
return result;
}
+#define atomic64_cmpxchg(v, o, n) \
+ ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
+#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
+
+/**
+ * atomic64_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic64_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns non-zero if @v was not @u, and zero otherwise.
+ */
+static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+{
+ long c, old;
+ c = atomic64_read(v);
+ for (;;) {
+ if (unlikely(c == (u)))
+ break;
+ old = atomic64_cmpxchg((v), c, c + (a));
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return c != (u);
+}
+
+#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+
#define atomic64_dec_return(v) atomic64_sub_return(1,(v))
#define atomic64_inc_return(v) atomic64_add_return(1,(v))
diff --git a/include/asm-mips/bootinfo.h b/include/asm-mips/bootinfo.h
index c7c945baf1ee..b0c329783ac5 100644
--- a/include/asm-mips/bootinfo.h
+++ b/include/asm-mips/bootinfo.h
@@ -119,9 +119,9 @@
*/
#define MACH_GROUP_MOMENCO 12 /* Momentum Boards */
#define MACH_MOMENCO_OCELOT 0
-#define MACH_MOMENCO_OCELOT_G 1
+#define MACH_MOMENCO_OCELOT_G 1 /* no more supported (may 2007) */
#define MACH_MOMENCO_OCELOT_C 2
-#define MACH_MOMENCO_JAGUAR_ATX 3
+#define MACH_MOMENCO_JAGUAR_ATX 3 /* no more supported (may 2007) */
#define MACH_MOMENCO_OCELOT_3 4
/*
@@ -254,7 +254,7 @@ extern void free_init_pages(const char *what,
extern char arcs_cmdline[CL_SIZE];
/*
- * Registers a0, a1, a3 and a4 as passed to the kenrel entry by firmware
+ * Registers a0, a1, a3 and a4 as passed to the kernel entry by firmware
*/
extern unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
diff --git a/include/asm-mips/fpu.h b/include/asm-mips/fpu.h
index b414a7d9db43..483685b1592e 100644
--- a/include/asm-mips/fpu.h
+++ b/include/asm-mips/fpu.h
@@ -16,6 +16,7 @@
#include <asm/mipsregs.h>
#include <asm/cpu.h>
#include <asm/cpu-features.h>
+#include <asm/hazards.h>
#include <asm/bitops.h>
#include <asm/processor.h>
#include <asm/current.h>
@@ -38,34 +39,16 @@ extern void _init_fpu(void);
extern void _save_fp(struct task_struct *);
extern void _restore_fp(struct task_struct *);
-#if defined(CONFIG_CPU_SB1)
-#define __enable_fpu_hazard() \
-do { \
- asm(".set push \n\t" \
- ".set mips64 \n\t" \
- ".set noreorder \n\t" \
- "ssnop \n\t" \
- "bnezl $0, .+4 \n\t" \
- "ssnop \n\t" \
- ".set pop"); \
-} while (0)
-#else
-#define __enable_fpu_hazard() \
-do { \
- asm("nop;nop;nop;nop"); /* max. hazard */ \
-} while (0)
-#endif
-
#define __enable_fpu() \
do { \
set_c0_status(ST0_CU1); \
- __enable_fpu_hazard(); \
+ enable_fpu_hazard(); \
} while (0)
#define __disable_fpu() \
do { \
clear_c0_status(ST0_CU1); \
- /* We don't care about the c0 hazard here */ \
+ disable_fpu_hazard(); \
} while (0)
#define enable_fpu() \
diff --git a/include/asm-mips/hazards.h b/include/asm-mips/hazards.h
index e50c77e69cb5..d9119f43f9aa 100644
--- a/include/asm-mips/hazards.h
+++ b/include/asm-mips/hazards.h
@@ -178,4 +178,36 @@ ASMMACRO(back_to_back_c0_hazard,
#endif
+
+/* FPU hazards */
+
+#if defined(CONFIG_CPU_SB1)
+ASMMACRO(enable_fpu_hazard,
+ .set push;
+ .set mips64;
+ .set noreorder;
+ _ssnop;
+ bnezl $0,.+4;
+ _ssnop;
+ .set pop
+)
+ASMMACRO(disable_fpu_hazard,
+)
+
+#elif defined(CONFIG_CPU_MIPSR2)
+ASMMACRO(enable_fpu_hazard,
+ _ehb
+)
+ASMMACRO(disable_fpu_hazard,
+ _ehb
+)
+#else
+ASMMACRO(enable_fpu_hazard,
+ nop; nop; nop; nop
+)
+ASMMACRO(disable_fpu_hazard,
+ _ehb
+)
+#endif
+
#endif /* _ASM_HAZARDS_H */
diff --git a/include/asm-mips/highmem.h b/include/asm-mips/highmem.h
index f8c8182f7f2e..4d6bd5c31c7b 100644
--- a/include/asm-mips/highmem.h
+++ b/include/asm-mips/highmem.h
@@ -48,46 +48,6 @@ extern pte_t *pkmap_page_table;
extern void * kmap_high(struct page *page);
extern void kunmap_high(struct page *page);
-/*
- * CONFIG_LIMITED_DMA is for systems with DMA limitations such as Momentum's
- * Jaguar ATX. This option exploits the highmem code in the kernel so is
- * always enabled together with CONFIG_HIGHMEM but at this time doesn't
- * actually add highmem functionality.
- */
-
-#ifdef CONFIG_LIMITED_DMA
-
-/*
- * These are the default functions for the no-highmem case from
- * <linux/highmem.h>
- */
-static inline void *kmap(struct page *page)
-{
- might_sleep();
- return page_address(page);
-}
-
-#define kunmap(page) do { (void) (page); } while (0)
-
-static inline void *kmap_atomic(struct page *page, enum km_type type)
-{
- pagefault_disable();
- return page_address(page);
-}
-
-static inline void kunmap_atomic(void *kvaddr, enum km_type type)
-{
- pagefault_enable();
-}
-
-#define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx))
-
-#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
-
-#define flush_cache_kmaps() do { } while (0)
-
-#else /* LIMITED_DMA */
-
extern void *__kmap(struct page *page);
extern void __kunmap(struct page *page);
extern void *__kmap_atomic(struct page *page, enum km_type type);
@@ -103,8 +63,6 @@ extern struct page *__kmap_atomic_to_page(void *ptr);
#define flush_cache_kmaps() flush_cache_all()
-#endif /* LIMITED_DMA */
-
#endif /* __KERNEL__ */
#endif /* _ASM_HIGHMEM_H */
diff --git a/include/asm-mips/kdebug.h b/include/asm-mips/kdebug.h
new file mode 100644
index 000000000000..6ece1b037665
--- /dev/null
+++ b/include/asm-mips/kdebug.h
@@ -0,0 +1 @@
+#include <asm-generic/kdebug.h>
diff --git a/include/asm-mips/kexec.h b/include/asm-mips/kexec.h
index b25267ebcb09..cdbab43b7d3a 100644
--- a/include/asm-mips/kexec.h
+++ b/include/asm-mips/kexec.h
@@ -21,8 +21,6 @@
/* The native architecture */
#define KEXEC_ARCH KEXEC_ARCH_MIPS
-#define MAX_NOTE_BYTES 1024
-
static inline void crash_setup_regs(struct pt_regs *newregs,
struct pt_regs *oldregs)
{
diff --git a/include/asm-mips/local.h b/include/asm-mips/local.h
index 9e2d43bae388..ed882c88e0ca 100644
--- a/include/asm-mips/local.h
+++ b/include/asm-mips/local.h
@@ -1,60 +1,288 @@
-#ifndef _ASM_LOCAL_H
-#define _ASM_LOCAL_H
+#ifndef _ARCH_MIPS_LOCAL_H
+#define _ARCH_MIPS_LOCAL_H
#include <linux/percpu.h>
+#include <linux/bitops.h>
#include <asm/atomic.h>
+#include <asm/war.h>
-#ifdef CONFIG_32BIT
+typedef struct
+{
+ atomic_long_t a;
+} local_t;
-typedef atomic_t local_t;
+#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
-#define LOCAL_INIT(i) ATOMIC_INIT(i)
-#define local_read(v) atomic_read(v)
-#define local_set(v,i) atomic_set(v,i)
+#define local_read(l) atomic_long_read(&(l)->a)
+#define local_set(l,i) atomic_long_set(&(l)->a, (i))
-#define local_inc(v) atomic_inc(v)
-#define local_dec(v) atomic_dec(v)
-#define local_add(i, v) atomic_add(i, v)
-#define local_sub(i, v) atomic_sub(i, v)
+#define local_add(i,l) atomic_long_add((i),(&(l)->a))
+#define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
+#define local_inc(l) atomic_long_inc(&(l)->a)
+#define local_dec(l) atomic_long_dec(&(l)->a)
-#endif
+/*
+ * Same as above, but return the result value
+ */
+static __inline__ long local_add_return(long i, local_t * l)
+{
+ unsigned long result;
+
+ if (cpu_has_llsc && R10000_LLSC_WAR) {
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ "1:" __LL "%1, %2 # local_add_return \n"
+ " addu %0, %1, %3 \n"
+ __SC "%0, %2 \n"
+ " beqzl %0, 1b \n"
+ " addu %0, %1, %3 \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
+ : "Ir" (i), "m" (l->a.counter)
+ : "memory");
+ } else if (cpu_has_llsc) {
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ "1:" __LL "%1, %2 # local_add_return \n"
+ " addu %0, %1, %3 \n"
+ __SC "%0, %2 \n"
+ " beqz %0, 1b \n"
+ " addu %0, %1, %3 \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
+ : "Ir" (i), "m" (l->a.counter)
+ : "memory");
+ } else {
+ unsigned long flags;
-#ifdef CONFIG_64BIT
+ local_irq_save(flags);
+ result = l->a.counter;
+ result += i;
+ l->a.counter = result;
+ local_irq_restore(flags);
+ }
-typedef atomic64_t local_t;
+ return result;
+}
-#define LOCAL_INIT(i) ATOMIC64_INIT(i)
-#define local_read(v) atomic64_read(v)
-#define local_set(v,i) atomic64_set(v,i)
+static __inline__ long local_sub_return(long i, local_t * l)
+{
+ unsigned long result;
-#define local_inc(v) atomic64_inc(v)
-#define local_dec(v) atomic64_dec(v)
-#define local_add(i, v) atomic64_add(i, v)
-#define local_sub(i, v) atomic64_sub(i, v)
+ if (cpu_has_llsc && R10000_LLSC_WAR) {
+ unsigned long temp;
-#endif
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ "1:" __LL "%1, %2 # local_sub_return \n"
+ " subu %0, %1, %3 \n"
+ __SC "%0, %2 \n"
+ " beqzl %0, 1b \n"
+ " subu %0, %1, %3 \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
+ : "Ir" (i), "m" (l->a.counter)
+ : "memory");
+ } else if (cpu_has_llsc) {
+ unsigned long temp;
-#define __local_inc(v) ((v)->counter++)
-#define __local_dec(v) ((v)->counter--)
-#define __local_add(i,v) ((v)->counter+=(i))
-#define __local_sub(i,v) ((v)->counter-=(i))
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ "1:" __LL "%1, %2 # local_sub_return \n"
+ " subu %0, %1, %3 \n"
+ __SC "%0, %2 \n"
+ " beqz %0, 1b \n"
+ " subu %0, %1, %3 \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
+ : "Ir" (i), "m" (l->a.counter)
+ : "memory");
+ } else {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ result = l->a.counter;
+ result -= i;
+ l->a.counter = result;
+ local_irq_restore(flags);
+ }
+
+ return result;
+}
/*
- * Use these for per-cpu local_t variables: on some archs they are
+ * local_sub_if_positive - conditionally subtract integer from atomic variable
+ * @i: integer value to subtract
+ * @l: pointer of type local_t
+ *
+ * Atomically test @l and subtract @i if @l is greater or equal than @i.
+ * The function returns the old value of @l minus @i.
+ */
+static __inline__ long local_sub_if_positive(long i, local_t * l)
+{
+ unsigned long result;
+
+ if (cpu_has_llsc && R10000_LLSC_WAR) {
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ "1:" __LL "%1, %2 # local_sub_if_positive\n"
+ " dsubu %0, %1, %3 \n"
+ " bltz %0, 1f \n"
+ __SC "%0, %2 \n"
+ " .set noreorder \n"
+ " beqzl %0, 1b \n"
+ " dsubu %0, %1, %3 \n"
+ " .set reorder \n"
+ "1: \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
+ : "Ir" (i), "m" (l->a.counter)
+ : "memory");
+ } else if (cpu_has_llsc) {
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ "1:" __LL "%1, %2 # local_sub_if_positive\n"
+ " dsubu %0, %1, %3 \n"
+ " bltz %0, 1f \n"
+ __SC "%0, %2 \n"
+ " .set noreorder \n"
+ " beqz %0, 1b \n"
+ " dsubu %0, %1, %3 \n"
+ " .set reorder \n"
+ "1: \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
+ : "Ir" (i), "m" (l->a.counter)
+ : "memory");
+ } else {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ result = l->a.counter;
+ result -= i;
+ if (result >= 0)
+ l->a.counter = result;
+ local_irq_restore(flags);
+ }
+
+ return result;
+}
+
+#define local_cmpxchg(l, o, n) \
+ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
+#define local_xchg(l, n) (xchg_local(&((l)->a.counter),(n)))
+
+/**
+ * local_add_unless - add unless the number is a given value
+ * @l: pointer of type local_t
+ * @a: the amount to add to l...
+ * @u: ...unless l is equal to u.
+ *
+ * Atomically adds @a to @l, so long as it was not @u.
+ * Returns non-zero if @l was not @u, and zero otherwise.
+ */
+#define local_add_unless(l, a, u) \
+({ \
+ long c, old; \
+ c = local_read(l); \
+ while (c != (u) && (old = local_cmpxchg((l), c, c + (a))) != c) \
+ c = old; \
+ c != (u); \
+})
+#define local_inc_not_zero(l) local_add_unless((l), 1, 0)
+
+#define local_dec_return(l) local_sub_return(1,(l))
+#define local_inc_return(l) local_add_return(1,(l))
+
+/*
+ * local_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @l: pointer of type local_t
+ *
+ * Atomically subtracts @i from @l and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+#define local_sub_and_test(i,l) (local_sub_return((i), (l)) == 0)
+
+/*
+ * local_inc_and_test - increment and test
+ * @l: pointer of type local_t
+ *
+ * Atomically increments @l by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+#define local_inc_and_test(l) (local_inc_return(l) == 0)
+
+/*
+ * local_dec_and_test - decrement by 1 and test
+ * @l: pointer of type local_t
+ *
+ * Atomically decrements @l by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+#define local_dec_and_test(l) (local_sub_return(1, (l)) == 0)
+
+/*
+ * local_dec_if_positive - decrement by 1 if old value positive
+ * @l: pointer of type local_t
+ */
+#define local_dec_if_positive(l) local_sub_if_positive(1, l)
+
+/*
+ * local_add_negative - add and test if negative
+ * @l: pointer of type local_t
+ * @i: integer value to add
+ *
+ * Atomically adds @i to @l and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
+ */
+#define local_add_negative(i,l) (local_add_return(i, (l)) < 0)
+
+/* Use these for per-cpu local_t variables: on some archs they are
* much more efficient than these naive implementations. Note they take
* a variable, not an address.
*/
-#define cpu_local_read(v) local_read(&__get_cpu_var(v))
-#define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i))
-#define cpu_local_inc(v) local_inc(&__get_cpu_var(v))
-#define cpu_local_dec(v) local_dec(&__get_cpu_var(v))
-#define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v))
-#define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v))
+#define __local_inc(l) ((l)->a.counter++)
+#define __local_dec(l) ((l)->a.counter++)
+#define __local_add(i,l) ((l)->a.counter+=(i))
+#define __local_sub(i,l) ((l)->a.counter-=(i))
+
+/* Need to disable preemption for the cpu local counters otherwise we could
+ still access a variable of a previous CPU in a non atomic way. */
+#define cpu_local_wrap_v(l) \
+ ({ local_t res__; \
+ preempt_disable(); \
+ res__ = (l); \
+ preempt_enable(); \
+ res__; })
+#define cpu_local_wrap(l) \
+ ({ preempt_disable(); \
+ l; \
+ preempt_enable(); }) \
+
+#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
+#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
+#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
+#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
+#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
+#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
-#define __cpu_local_inc(v) __local_inc(&__get_cpu_var(v))
-#define __cpu_local_dec(v) __local_dec(&__get_cpu_var(v))
-#define __cpu_local_add(i, v) __local_add((i), &__get_cpu_var(v))
-#define __cpu_local_sub(i, v) __local_sub((i), &__get_cpu_var(v))
+#define __cpu_local_inc(l) cpu_local_inc(l)
+#define __cpu_local_dec(l) cpu_local_dec(l)
+#define __cpu_local_add(i, l) cpu_local_add((i), (l))
+#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
-#endif /* _ASM_LOCAL_H */
+#endif /* _ARCH_MIPS_LOCAL_H */
diff --git a/include/asm-mips/mach-au1x00/au1550_spi.h b/include/asm-mips/mach-au1x00/au1550_spi.h
new file mode 100644
index 000000000000..c2f0466523ec
--- /dev/null
+++ b/include/asm-mips/mach-au1x00/au1550_spi.h
@@ -0,0 +1,16 @@
+/*
+ * au1550_spi.h - au1550 psc spi controller driver - platform data struct
+ */
+
+#ifndef _AU1550_SPI_H_
+#define _AU1550_SPI_H_
+
+struct au1550_spi_info {
+ s16 bus_num; /* defines which PSC and IRQ to use */
+ u32 mainclk_hz; /* main input clock frequency of PSC */
+ u16 num_chipselect; /* number of chipselects supported */
+ void (*activate_cs)(struct au1550_spi_info *spi, int cs, int polarity);
+ void (*deactivate_cs)(struct au1550_spi_info *spi, int cs, int polarity);
+};
+
+#endif
diff --git a/include/asm-mips/mach-cobalt/cobalt.h b/include/asm-mips/mach-cobalt/cobalt.h
index 24a8d51a55a3..684a501c04cf 100644
--- a/include/asm-mips/mach-cobalt/cobalt.h
+++ b/include/asm-mips/mach-cobalt/cobalt.h
@@ -69,6 +69,8 @@
#define COBALT_BRD_ID_QUBE2 0x5
#define COBALT_BRD_ID_RAQ2 0x6
+extern int cobalt_board_id;
+
#define PCI_CFG_SET(devfn,where) \
GT_WRITE(GT_PCI0_CFGADDR_OFS, (0x80000000 | (PCI_SLOT (devfn) << 11) | \
(PCI_FUNC (devfn) << 8) | (where)))
diff --git a/include/asm-mips/mach-ja/cpu-feature-overrides.h b/include/asm-mips/mach-ja/cpu-feature-overrides.h
deleted file mode 100644
index 84b6dead0e8a..000000000000
--- a/include/asm-mips/mach-ja/cpu-feature-overrides.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2003, 2004 Ralf Baechle
- */
-#ifndef __ASM_MACH_JA_CPU_FEATURE_OVERRIDES_H
-#define __ASM_MACH_JA_CPU_FEATURE_OVERRIDES_H
-
-/*
- * Momentum Jaguar ATX always has the RM9000 processor.
- */
-#define cpu_has_watch 1
-#define cpu_has_mips16 0
-#define cpu_has_divec 0
-#define cpu_has_vce 0
-#define cpu_has_cache_cdex_p 0
-#define cpu_has_cache_cdex_s 0
-#define cpu_has_prefetch 1
-#define cpu_has_mcheck 0
-#define cpu_has_ejtag 0
-
-#define cpu_has_llsc 1
-#define cpu_has_vtag_icache 0
-#define cpu_has_dc_aliases 0
-#define cpu_has_ic_fills_f_dc 0
-#define cpu_has_dsp 0
-#define cpu_icache_snoops_remote_store 0
-
-#define cpu_has_nofpuex 0
-#define cpu_has_64bits 1
-
-#define cpu_has_inclusive_pcaches 0
-
-#define cpu_dcache_line_size() 32
-#define cpu_icache_line_size() 32
-#define cpu_scache_line_size() 32
-
-#define cpu_has_mips32r1 0
-#define cpu_has_mips32r2 0
-#define cpu_has_mips64r1 0
-#define cpu_has_mips64r2 0
-
-#endif /* __ASM_MACH_JA_CPU_FEATURE_OVERRIDES_H */
diff --git a/include/asm-mips/mach-ja/spaces.h b/include/asm-mips/mach-ja/spaces.h
deleted file mode 100644
index 8466a0e69c79..000000000000
--- a/include/asm-mips/mach-ja/spaces.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1994 - 1999, 2000, 03, 04 Ralf Baechle
- * Copyright (C) 2000, 2002 Maciej W. Rozycki
- * Copyright (C) 1990, 1999, 2000 Silicon Graphics, Inc.
- */
-#ifndef __ASM_MACH_JA_SPACES_H
-#define __ASM_MACH_JA_SPACES_H
-
-/*
- * Memory above this physical address will be considered highmem.
- */
-#define HIGHMEM_START 0x08000000UL
-
-#include_next <spaces.h>
-
-#endif /* __ASM_MACH_JA_SPACES_H */
diff --git a/include/asm-mips/mips-boards/malta.h b/include/asm-mips/mips-boards/malta.h
index b0ba3c5a921e..eec91001bb65 100644
--- a/include/asm-mips/mips-boards/malta.h
+++ b/include/asm-mips/mips-boards/malta.h
@@ -25,6 +25,10 @@
#include <asm/mips-boards/msc01_pci.h>
#include <asm/gt64120.h>
+/* Mips interrupt controller found in SOCit variations */
+#define MIPS_MSC01_IC_REG_BASE 0x1bc40000
+#define MIPS_SOCITSC_IC_REG_BASE 0x1ffa0000
+
/*
* Malta I/O ports base address for the Galileo GT64120 and Algorithmics
* Bonito system controllers.
diff --git a/include/asm-mips/mmu_context.h b/include/asm-mips/mmu_context.h
index fe065d6070ca..65024ffd7879 100644
--- a/include/asm-mips/mmu_context.h
+++ b/include/asm-mips/mmu_context.h
@@ -20,6 +20,7 @@
#include <asm/mipsmtregs.h>
#include <asm/smtc.h>
#endif /* SMTC */
+#include <asm-generic/mm_hooks.h>
/*
* For the fast tlb miss handlers, we keep a per cpu array of pointers
diff --git a/include/asm-mips/msc01_ic.h b/include/asm-mips/msc01_ic.h
index aa7ad9a71762..7989b9ffc1d2 100644
--- a/include/asm-mips/msc01_ic.h
+++ b/include/asm-mips/msc01_ic.h
@@ -94,10 +94,7 @@
/*
* MIPS System controller interrupt register base.
*
- * FIXME - are these macros specific to Malta and co or to the MSC? If the
- * latter, they should be moved elsewhere.
*/
-#define MIPS_MSC01_IC_REG_BASE 0x1bc40000
/*****************************************************************************
* Absolute register addresses
@@ -144,7 +141,7 @@ typedef struct msc_irqmap {
#define MSC01_IRQ_LEVEL 0
#define MSC01_IRQ_EDGE 1
-extern void __init init_msc_irqs(unsigned int base, msc_irqmap_t *imp, int nirq);
+extern void __init init_msc_irqs(unsigned long icubase, unsigned int base, msc_irqmap_t *imp, int nirq);
extern void ll_msc_irq(void);
#endif /* __ASM_MIPS_BOARDS_MSC01_IC_H */
diff --git a/include/asm-mips/page.h b/include/asm-mips/page.h
index d3fbd83ff545..5c3239dad0f2 100644
--- a/include/asm-mips/page.h
+++ b/include/asm-mips/page.h
@@ -190,10 +190,6 @@ typedef struct { unsigned long pgprot; } pgprot_t;
#define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + UNCAC_BASE)
#define CAC_ADDR(addr) ((addr) - UNCAC_BASE + PAGE_OFFSET)
-#ifdef CONFIG_LIMITED_DMA
-#define WANT_PAGE_VIRTUAL
-#endif
-
#include <asm-generic/memory_model.h>
#include <asm-generic/page.h>
diff --git a/include/asm-mips/pgalloc.h b/include/asm-mips/pgalloc.h
index 5685d4fc7881..9fb57c035213 100644
--- a/include/asm-mips/pgalloc.h
+++ b/include/asm-mips/pgalloc.h
@@ -11,6 +11,7 @@
#include <linux/highmem.h>
#include <linux/mm.h>
+#include <linux/sched.h>
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
pte_t *pte)
diff --git a/include/asm-mips/pgtable.h b/include/asm-mips/pgtable.h
index 0d3295f57a95..27d77d981937 100644
--- a/include/asm-mips/pgtable.h
+++ b/include/asm-mips/pgtable.h
@@ -387,10 +387,6 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
remap_pfn_range(vma, vaddr, pfn, size, prot)
#endif
-#define MK_IOSPACE_PFN(space, pfn) (pfn)
-#define GET_IOSPACE(pfn) 0
-#define GET_PFN(pfn) (pfn)
-
#include <asm-generic/pgtable.h>
/*
diff --git a/include/asm-mips/poll.h b/include/asm-mips/poll.h
index 70881f8c5c50..47b952080431 100644
--- a/include/asm-mips/poll.h
+++ b/include/asm-mips/poll.h
@@ -1,28 +1,9 @@
#ifndef __ASM_POLL_H
#define __ASM_POLL_H
-#define POLLIN 0x0001
-#define POLLPRI 0x0002
-#define POLLOUT 0x0004
-
-#define POLLERR 0x0008
-#define POLLHUP 0x0010
-#define POLLNVAL 0x0020
-
-#define POLLRDNORM 0x0040
-#define POLLRDBAND 0x0080
#define POLLWRNORM POLLOUT
#define POLLWRBAND 0x0100
-/* These seem to be more or less nonstandard ... */
-#define POLLMSG 0x0400
-#define POLLREMOVE 0x1000
-#define POLLRDHUP 0x2000
-
-struct pollfd {
- int fd;
- short events;
- short revents;
-};
+#include <asm-generic/poll.h>
#endif /* __ASM_POLL_H */
diff --git a/include/asm-mips/scatterlist.h b/include/asm-mips/scatterlist.h
index 22634706e9d5..7af104c95b20 100644
--- a/include/asm-mips/scatterlist.h
+++ b/include/asm-mips/scatterlist.h
@@ -1,6 +1,8 @@
#ifndef __ASM_SCATTERLIST_H
#define __ASM_SCATTERLIST_H
+#include <asm/types.h>
+
struct scatterlist {
struct page * page;
unsigned int offset;
diff --git a/include/asm-mips/serial.h b/include/asm-mips/serial.h
index d7a65135d837..ce51213d84f9 100644
--- a/include/asm-mips/serial.h
+++ b/include/asm-mips/serial.h
@@ -81,25 +81,6 @@
#define STD_SERIAL_PORT_DEFNS
#endif /* CONFIG_HAVE_STD_PC_SERIAL_PORTS */
-#ifdef CONFIG_MOMENCO_JAGUAR_ATX
-/* Ordinary NS16552 duart with a 20MHz crystal. */
-#define JAGUAR_ATX_UART_CLK 20000000
-#define JAGUAR_ATX_BASE_BAUD (JAGUAR_ATX_UART_CLK / 16)
-
-#define JAGUAR_ATX_SERIAL1_IRQ 6
-#define JAGUAR_ATX_SERIAL1_BASE 0xfd000023L
-
-#define _JAGUAR_ATX_SERIAL_INIT(int, base) \
- { .baud_base = JAGUAR_ATX_BASE_BAUD, irq: int, \
- .flags = (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST), \
- .iomem_base = (u8 *) base, iomem_reg_shift: 2, \
- io_type: SERIAL_IO_MEM }
-#define MOMENCO_JAGUAR_ATX_SERIAL_PORT_DEFNS \
- _JAGUAR_ATX_SERIAL_INIT(JAGUAR_ATX_SERIAL1_IRQ, JAGUAR_ATX_SERIAL1_BASE)
-#else
-#define MOMENCO_JAGUAR_ATX_SERIAL_PORT_DEFNS
-#endif
-
#ifdef CONFIG_MOMENCO_OCELOT_3
#define OCELOT_3_BASE_BAUD ( 20000000 / 16 )
#define OCELOT_3_SERIAL_IRQ 6
@@ -134,27 +115,6 @@
#define MOMENCO_OCELOT_SERIAL_PORT_DEFNS
#endif
-#ifdef CONFIG_MOMENCO_OCELOT_G
-/* Ordinary NS16552 duart with a 20MHz crystal. */
-#define OCELOT_G_BASE_BAUD ( 20000000 / 16 )
-
-#define OCELOT_G_SERIAL1_IRQ 4
-#if 0
-#define OCELOT_G_SERIAL1_BASE 0xe0001020
-#else
-#define OCELOT_G_SERIAL1_BASE 0xfd000020
-#endif
-
-#define _OCELOT_G_SERIAL_INIT(int, base) \
- { .baud_base = OCELOT_G_BASE_BAUD, .irq = int, .flags = STD_COM_FLAGS,\
- .iomem_base = (u8 *) base, .iomem_reg_shift = 2, \
- .io_type = SERIAL_IO_MEM }
-#define MOMENCO_OCELOT_G_SERIAL_PORT_DEFNS \
- _OCELOT_G_SERIAL_INIT(OCELOT_G_SERIAL1_IRQ, OCELOT_G_SERIAL1_BASE)
-#else
-#define MOMENCO_OCELOT_G_SERIAL_PORT_DEFNS
-#endif
-
#ifdef CONFIG_MOMENCO_OCELOT_C
/* Ordinary NS16552 duart with a 20MHz crystal. */
#define OCELOT_C_BASE_BAUD ( 20000000 / 16 )
@@ -210,7 +170,6 @@
IP32_SERIAL_PORT_DEFNS \
JAZZ_SERIAL_PORT_DEFNS \
STD_SERIAL_PORT_DEFNS \
- MOMENCO_OCELOT_G_SERIAL_PORT_DEFNS \
MOMENCO_OCELOT_C_SERIAL_PORT_DEFNS \
MOMENCO_OCELOT_SERIAL_PORT_DEFNS \
MOMENCO_OCELOT_3_SERIAL_PORT_DEFNS
diff --git a/include/asm-mips/sgi/seeq.h b/include/asm-mips/sgi/seeq.h
new file mode 100644
index 000000000000..af0ffd76899d
--- /dev/null
+++ b/include/asm-mips/sgi/seeq.h
@@ -0,0 +1,21 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2007 by Ralf Baechle
+ */
+#ifndef __ASM_SGI_SEEQ_H
+#define __ASM_SGI_SEEQ_H
+
+#include <linux/if_ether.h>
+
+#include <asm/sgi/hpc3.h>
+
+struct sgiseeq_platform_data {
+ struct hpc3_regs *hpc;
+ unsigned int irq;
+ unsigned char mac[ETH_ALEN];
+};
+
+#endif /* __ASM_SGI_SEEQ_H */
diff --git a/include/asm-mips/sgi/wd.h b/include/asm-mips/sgi/wd.h
new file mode 100644
index 000000000000..0d6c3a4da891
--- /dev/null
+++ b/include/asm-mips/sgi/wd.h
@@ -0,0 +1,20 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2007 by Ralf Baechle
+ */
+#ifndef __ASM_SGI_WD_H
+#define __ASM_SGI_WD_H
+
+#include <asm/sgi/hpc3.h>
+
+struct sgiwd93_platform_data {
+ unsigned int unit;
+ unsigned int irq;
+ struct hpc3_scsiregs *hregs;
+ unsigned char *wdregs;
+};
+
+#endif /* __ASM_SGI_WD_H */
diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h
index 290887077e44..bb0b289dbc9e 100644
--- a/include/asm-mips/system.h
+++ b/include/asm-mips/system.h
@@ -55,7 +55,7 @@ do { \
if (cpu_has_dsp) \
__save_dsp(prev); \
next->thread.emulated_fp = 0; \
- (last) = resume(prev, next, next->thread_info); \
+ (last) = resume(prev, next, task_thread_info(next)); \
if (cpu_has_dsp) \
__restore_dsp(current); \
} while(0)
@@ -201,7 +201,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
}
#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
-#define tas(ptr) (xchg((ptr),1))
#define __HAVE_ARCH_CMPXCHG 1
@@ -262,6 +261,58 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
return retval;
}
+static inline unsigned long __cmpxchg_u32_local(volatile int * m,
+ unsigned long old, unsigned long new)
+{
+ __u32 retval;
+
+ if (cpu_has_llsc && R10000_LLSC_WAR) {
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set noat \n"
+ " .set mips3 \n"
+ "1: ll %0, %2 # __cmpxchg_u32 \n"
+ " bne %0, %z3, 2f \n"
+ " .set mips0 \n"
+ " move $1, %z4 \n"
+ " .set mips3 \n"
+ " sc $1, %1 \n"
+ " beqzl $1, 1b \n"
+ "2: \n"
+ " .set pop \n"
+ : "=&r" (retval), "=R" (*m)
+ : "R" (*m), "Jr" (old), "Jr" (new)
+ : "memory");
+ } else if (cpu_has_llsc) {
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set noat \n"
+ " .set mips3 \n"
+ "1: ll %0, %2 # __cmpxchg_u32 \n"
+ " bne %0, %z3, 2f \n"
+ " .set mips0 \n"
+ " move $1, %z4 \n"
+ " .set mips3 \n"
+ " sc $1, %1 \n"
+ " beqz $1, 1b \n"
+ "2: \n"
+ " .set pop \n"
+ : "=&r" (retval), "=R" (*m)
+ : "R" (*m), "Jr" (old), "Jr" (new)
+ : "memory");
+ } else {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ retval = *m;
+ if (retval == old)
+ *m = new;
+ local_irq_restore(flags); /* implies memory barrier */
+ }
+
+ return retval;
+}
+
#ifdef CONFIG_64BIT
static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
unsigned long new)
@@ -315,10 +366,62 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
return retval;
}
+
+static inline unsigned long __cmpxchg_u64_local(volatile int * m,
+ unsigned long old, unsigned long new)
+{
+ __u64 retval;
+
+ if (cpu_has_llsc && R10000_LLSC_WAR) {
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set noat \n"
+ " .set mips3 \n"
+ "1: lld %0, %2 # __cmpxchg_u64 \n"
+ " bne %0, %z3, 2f \n"
+ " move $1, %z4 \n"
+ " scd $1, %1 \n"
+ " beqzl $1, 1b \n"
+ "2: \n"
+ " .set pop \n"
+ : "=&r" (retval), "=R" (*m)
+ : "R" (*m), "Jr" (old), "Jr" (new)
+ : "memory");
+ } else if (cpu_has_llsc) {
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set noat \n"
+ " .set mips3 \n"
+ "1: lld %0, %2 # __cmpxchg_u64 \n"
+ " bne %0, %z3, 2f \n"
+ " move $1, %z4 \n"
+ " scd $1, %1 \n"
+ " beqz $1, 1b \n"
+ "2: \n"
+ " .set pop \n"
+ : "=&r" (retval), "=R" (*m)
+ : "R" (*m), "Jr" (old), "Jr" (new)
+ : "memory");
+ } else {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ retval = *m;
+ if (retval == old)
+ *m = new;
+ local_irq_restore(flags); /* implies memory barrier */
+ }
+
+ return retval;
+}
+
#else
extern unsigned long __cmpxchg_u64_unsupported_on_32bit_kernels(
volatile int * m, unsigned long old, unsigned long new);
#define __cmpxchg_u64 __cmpxchg_u64_unsupported_on_32bit_kernels
+extern unsigned long __cmpxchg_u64_local_unsupported_on_32bit_kernels(
+ volatile int * m, unsigned long old, unsigned long new);
+#define __cmpxchg_u64_local __cmpxchg_u64_local_unsupported_on_32bit_kernels
#endif
/* This function doesn't exist, so you'll get a linker error
@@ -338,11 +441,33 @@ static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old,
return old;
}
-#define cmpxchg(ptr,old,new) ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(old), (unsigned long)(new),sizeof(*(ptr))))
+static inline unsigned long __cmpxchg_local(volatile void * ptr,
+ unsigned long old, unsigned long new, int size)
+{
+ switch (size) {
+ case 4:
+ return __cmpxchg_u32_local(ptr, old, new);
+ case 8:
+ return __cmpxchg_u64_local(ptr, old, new);
+ }
+ __cmpxchg_called_with_bad_pointer();
+ return old;
+}
+
+#define cmpxchg(ptr,old,new) \
+ ((__typeof__(*(ptr)))__cmpxchg((ptr), \
+ (unsigned long)(old), (unsigned long)(new),sizeof(*(ptr))))
+
+#define cmpxchg_local(ptr,old,new) \
+ ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \
+ (unsigned long)(old), (unsigned long)(new),sizeof(*(ptr))))
extern void set_handler (unsigned long offset, void *addr, unsigned long len);
extern void set_uncached_handler (unsigned long offset, void *addr, unsigned long len);
-extern void *set_vi_handler (int n, void *addr);
+
+typedef void (*vi_handler_t)(void);
+extern void *set_vi_handler (int n, vi_handler_t addr);
+
extern void *set_except_vector(int n, void *addr);
extern unsigned long ebase;
extern void per_cpu_trap_init(void);