aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/include')
-rw-r--r--arch/mips/include/asm/asmmacro.h62
-rw-r--r--arch/mips/include/asm/atomic.h9
-rw-r--r--arch/mips/include/asm/barrier.h3
-rw-r--r--arch/mips/include/asm/bitops.h11
-rw-r--r--arch/mips/include/asm/branch.h30
-rw-r--r--arch/mips/include/asm/cacheflush.h6
-rw-r--r--arch/mips/include/asm/cmp.h1
-rw-r--r--arch/mips/include/asm/cpu-features.h20
-rw-r--r--arch/mips/include/asm/cpu-info.h13
-rw-r--r--arch/mips/include/asm/cpu-type.h4
-rw-r--r--arch/mips/include/asm/cpu.h3
-rw-r--r--arch/mips/include/asm/dec/kn05.h15
-rw-r--r--arch/mips/include/asm/fixmap.h4
-rw-r--r--arch/mips/include/asm/fpu.h7
-rw-r--r--arch/mips/include/asm/fpu_emulator.h21
-rw-r--r--arch/mips/include/asm/gic.h1
-rw-r--r--arch/mips/include/asm/gio_device.h4
-rw-r--r--arch/mips/include/asm/idle.h14
-rw-r--r--arch/mips/include/asm/irq.h96
-rw-r--r--arch/mips/include/asm/irqflags.h32
-rw-r--r--arch/mips/include/asm/kvm_host.h183
-rw-r--r--arch/mips/include/asm/kvm_para.h109
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h1
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/irq.h2
-rw-r--r--arch/mips/include/asm/mach-ip22/cpu-feature-overrides.h4
-rw-r--r--arch/mips/include/asm/mach-jz4740/dma.h2
-rw-r--r--arch/mips/include/asm/mach-malta/kernel-entry-init.h30
-rw-r--r--arch/mips/include/asm/mach-malta/malta-pm.h37
-rw-r--r--arch/mips/include/asm/mach-netlogic/topology.h2
-rw-r--r--arch/mips/include/asm/mach-paravirt/cpu-feature-overrides.h36
-rw-r--r--arch/mips/include/asm/mach-paravirt/irq.h19
-rw-r--r--arch/mips/include/asm/mach-paravirt/kernel-entry-init.h50
-rw-r--r--arch/mips/include/asm/mach-paravirt/war.h25
-rw-r--r--arch/mips/include/asm/mach-pmcs-msp71xx/msp_usb.h4
-rw-r--r--arch/mips/include/asm/mach-ralink/war.h1
-rw-r--r--arch/mips/include/asm/mach-sead3/kernel-entry-init.h31
-rw-r--r--arch/mips/include/asm/mips-boards/generic.h4
-rw-r--r--arch/mips/include/asm/mips-boards/piix4.h12
-rw-r--r--arch/mips/include/asm/mips-cpc.h34
-rw-r--r--arch/mips/include/asm/mips_mt.h5
-rw-r--r--arch/mips/include/asm/mipsmtregs.h2
-rw-r--r--arch/mips/include/asm/mipsregs.h151
-rw-r--r--arch/mips/include/asm/mmu_context.h122
-rw-r--r--arch/mips/include/asm/module.h8
-rw-r--r--arch/mips/include/asm/msa.h15
-rw-r--r--arch/mips/include/asm/netlogic/mips-extns.h5
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/iomap.h18
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/pcibus.h14
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/pic.h4
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/sys.h35
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/xlp.h19
-rw-r--r--arch/mips/include/asm/nile4.h2
-rw-r--r--arch/mips/include/asm/octeon/octeon.h1
-rw-r--r--arch/mips/include/asm/pci.h5
-rw-r--r--arch/mips/include/asm/pgtable.h2
-rw-r--r--arch/mips/include/asm/pm-cps.h51
-rw-r--r--arch/mips/include/asm/pm.h159
-rw-r--r--arch/mips/include/asm/prom.h6
-rw-r--r--arch/mips/include/asm/ptrace.h3
-rw-r--r--arch/mips/include/asm/r4kcache.h7
-rw-r--r--arch/mips/include/asm/sgi/ip22.h2
-rw-r--r--arch/mips/include/asm/sigcontext.h2
-rw-r--r--arch/mips/include/asm/smp-cps.h19
-rw-r--r--arch/mips/include/asm/smp-ops.h1
-rw-r--r--arch/mips/include/asm/smp.h3
-rw-r--r--arch/mips/include/asm/smtc.h78
-rw-r--r--arch/mips/include/asm/smtc_ipi.h129
-rw-r--r--arch/mips/include/asm/smtc_proc.h23
-rw-r--r--arch/mips/include/asm/stackframe.h196
-rw-r--r--arch/mips/include/asm/thread_info.h11
-rw-r--r--arch/mips/include/asm/time.h5
-rw-r--r--arch/mips/include/asm/timex.h65
-rw-r--r--arch/mips/include/asm/uasm.h27
-rw-r--r--arch/mips/include/asm/unistd.h1
-rw-r--r--arch/mips/include/uapi/asm/Kbuild1
-rw-r--r--arch/mips/include/uapi/asm/bitfield.h29
-rw-r--r--arch/mips/include/uapi/asm/inst.h65
-rw-r--r--arch/mips/include/uapi/asm/kvm.h35
-rw-r--r--arch/mips/include/uapi/asm/kvm_para.h6
-rw-r--r--arch/mips/include/uapi/asm/sigcontext.h8
-rw-r--r--arch/mips/include/uapi/asm/types.h5
81 files changed, 1188 insertions, 1099 deletions
diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
index b464b8b1147a..935543f14538 100644
--- a/arch/mips/include/asm/asmmacro.h
+++ b/arch/mips/include/asm/asmmacro.h
@@ -17,26 +17,8 @@
#ifdef CONFIG_64BIT
#include <asm/asmmacro-64.h>
#endif
-#ifdef CONFIG_MIPS_MT_SMTC
-#include <asm/mipsmtregs.h>
-#endif
-
-#ifdef CONFIG_MIPS_MT_SMTC
- .macro local_irq_enable reg=t0
- mfc0 \reg, CP0_TCSTATUS
- ori \reg, \reg, TCSTATUS_IXMT
- xori \reg, \reg, TCSTATUS_IXMT
- mtc0 \reg, CP0_TCSTATUS
- _ehb
- .endm
- .macro local_irq_disable reg=t0
- mfc0 \reg, CP0_TCSTATUS
- ori \reg, \reg, TCSTATUS_IXMT
- mtc0 \reg, CP0_TCSTATUS
- _ehb
- .endm
-#elif defined(CONFIG_CPU_MIPSR2)
+#ifdef CONFIG_CPU_MIPSR2
.macro local_irq_enable reg=t0
ei
irq_enable_hazard
@@ -71,7 +53,7 @@
sw \reg, TI_PRE_COUNT($28)
#endif
.endm
-#endif /* CONFIG_MIPS_MT_SMTC */
+#endif /* CONFIG_CPU_MIPSR2 */
.macro fpu_save_16even thread tmp=t0
cfc1 \tmp, fcr31
@@ -267,13 +249,35 @@
.set pop
.endm
#else
+
+#ifdef CONFIG_CPU_MICROMIPS
+#define CFC_MSA_INSN 0x587e0056
+#define CTC_MSA_INSN 0x583e0816
+#define LDD_MSA_INSN 0x58000837
+#define STD_MSA_INSN 0x5800083f
+#define COPY_UW_MSA_INSN 0x58f00056
+#define COPY_UD_MSA_INSN 0x58f80056
+#define INSERT_W_MSA_INSN 0x59300816
+#define INSERT_D_MSA_INSN 0x59380816
+#else
+#define CFC_MSA_INSN 0x787e0059
+#define CTC_MSA_INSN 0x783e0819
+#define LDD_MSA_INSN 0x78000823
+#define STD_MSA_INSN 0x78000827
+#define COPY_UW_MSA_INSN 0x78f00059
+#define COPY_UD_MSA_INSN 0x78f80059
+#define INSERT_W_MSA_INSN 0x79300819
+#define INSERT_D_MSA_INSN 0x79380819
+#endif
+
/*
* Temporary until all toolchains in use include MSA support.
*/
.macro cfcmsa rd, cs
.set push
.set noat
- .word 0x787e0059 | (\cs << 11)
+ .insn
+ .word CFC_MSA_INSN | (\cs << 11)
move \rd, $1
.set pop
.endm
@@ -282,7 +286,7 @@
.set push
.set noat
move $1, \rs
- .word 0x783e0819 | (\cd << 6)
+ .word CTC_MSA_INSN | (\cd << 6)
.set pop
.endm
@@ -290,7 +294,7 @@
.set push
.set noat
add $1, \base, \off
- .word 0x78000823 | (\wd << 6)
+ .word LDD_MSA_INSN | (\wd << 6)
.set pop
.endm
@@ -298,14 +302,15 @@
.set push
.set noat
add $1, \base, \off
- .word 0x78000827 | (\wd << 6)
+ .word STD_MSA_INSN | (\wd << 6)
.set pop
.endm
.macro copy_u_w rd, ws, n
.set push
.set noat
- .word 0x78f00059 | (\n << 16) | (\ws << 11)
+ .insn
+ .word COPY_UW_MSA_INSN | (\n << 16) | (\ws << 11)
/* move triggers an assembler bug... */
or \rd, $1, zero
.set pop
@@ -314,7 +319,8 @@
.macro copy_u_d rd, ws, n
.set push
.set noat
- .word 0x78f80059 | (\n << 16) | (\ws << 11)
+ .insn
+ .word COPY_UD_MSA_INSN | (\n << 16) | (\ws << 11)
/* move triggers an assembler bug... */
or \rd, $1, zero
.set pop
@@ -325,7 +331,7 @@
.set noat
/* move triggers an assembler bug... */
or $1, \rs, zero
- .word 0x79300819 | (\n << 16) | (\wd << 6)
+ .word INSERT_W_MSA_INSN | (\n << 16) | (\wd << 6)
.set pop
.endm
@@ -334,7 +340,7 @@
.set noat
/* move triggers an assembler bug... */
or $1, \rs, zero
- .word 0x79380819 | (\n << 16) | (\wd << 6)
+ .word INSERT_D_MSA_INSN | (\n << 16) | (\wd << 6)
.set pop
.endm
#endif
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index e8eb3d53a241..37b2befe651a 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -761,13 +761,4 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
#endif /* CONFIG_64BIT */
-/*
- * atomic*_return operations are serializing but not the non-*_return
- * versions.
- */
-#define smp_mb__before_atomic_dec() smp_mb__before_llsc()
-#define smp_mb__after_atomic_dec() smp_llsc_mb()
-#define smp_mb__before_atomic_inc() smp_mb__before_llsc()
-#define smp_mb__after_atomic_inc() smp_llsc_mb()
-
#endif /* _ASM_ATOMIC_H */
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
index e1aa4e4c2984..d0101dd0575e 100644
--- a/arch/mips/include/asm/barrier.h
+++ b/arch/mips/include/asm/barrier.h
@@ -195,4 +195,7 @@ do { \
___p1; \
})
+#define smp_mb__before_atomic() smp_mb__before_llsc()
+#define smp_mb__after_atomic() smp_llsc_mb()
+
#endif /* __ASM_BARRIER_H */
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
index 6a65d49e2c0d..7c8816f7b7c4 100644
--- a/arch/mips/include/asm/bitops.h
+++ b/arch/mips/include/asm/bitops.h
@@ -38,13 +38,6 @@
#endif
/*
- * clear_bit() doesn't provide any barrier for the compiler.
- */
-#define smp_mb__before_clear_bit() smp_mb__before_llsc()
-#define smp_mb__after_clear_bit() smp_llsc_mb()
-
-
-/*
* These are the "slower" versions of the functions and are in bitops.c.
* These functions call raw_local_irq_{save,restore}().
*/
@@ -120,7 +113,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
*
* clear_bit() is atomic and may not be reordered. However, it does
* not contain a memory barrier, so if it is used for locking purposes,
- * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
+ * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
* in order to ensure changes are visible on other processors.
*/
static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
@@ -175,7 +168,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
*/
static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
{
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(nr, addr);
}
diff --git a/arch/mips/include/asm/branch.h b/arch/mips/include/asm/branch.h
index e28a3e0eb3cb..de781cf54bc7 100644
--- a/arch/mips/include/asm/branch.h
+++ b/arch/mips/include/asm/branch.h
@@ -8,6 +8,8 @@
#ifndef _ASM_BRANCH_H
#define _ASM_BRANCH_H
+#include <asm/cpu-features.h>
+#include <asm/mipsregs.h>
#include <asm/ptrace.h>
#include <asm/inst.h>
@@ -18,12 +20,40 @@ extern int __compute_return_epc_for_insn(struct pt_regs *regs,
extern int __microMIPS_compute_return_epc(struct pt_regs *regs);
extern int __MIPS16e_compute_return_epc(struct pt_regs *regs);
+/*
+ * microMIPS bitfields
+ */
+#define MM_POOL32A_MINOR_MASK 0x3f
+#define MM_POOL32A_MINOR_SHIFT 0x6
+#define MM_MIPS32_COND_FC 0x30
+
+extern int __mm_isBranchInstr(struct pt_regs *regs,
+ struct mm_decoded_insn dec_insn, unsigned long *contpc);
+
+static inline int mm_isBranchInstr(struct pt_regs *regs,
+ struct mm_decoded_insn dec_insn, unsigned long *contpc)
+{
+ if (!cpu_has_mmips)
+ return 0;
+
+ return __mm_isBranchInstr(regs, dec_insn, contpc);
+}
static inline int delay_slot(struct pt_regs *regs)
{
return regs->cp0_cause & CAUSEF_BD;
}
+static inline void clear_delay_slot(struct pt_regs *regs)
+{
+ regs->cp0_cause &= ~CAUSEF_BD;
+}
+
+static inline void set_delay_slot(struct pt_regs *regs)
+{
+ regs->cp0_cause |= CAUSEF_BD;
+}
+
static inline unsigned long exception_epc(struct pt_regs *regs)
{
if (likely(!delay_slot(regs)))
diff --git a/arch/mips/include/asm/cacheflush.h b/arch/mips/include/asm/cacheflush.h
index 69468ded2828..e08381a37f8b 100644
--- a/arch/mips/include/asm/cacheflush.h
+++ b/arch/mips/include/asm/cacheflush.h
@@ -113,6 +113,12 @@ unsigned long run_uncached(void *func);
extern void *kmap_coherent(struct page *page, unsigned long addr);
extern void kunmap_coherent(void);
+extern void *kmap_noncoherent(struct page *page, unsigned long addr);
+
+static inline void kunmap_noncoherent(void)
+{
+ kunmap_coherent();
+}
#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
static inline void flush_kernel_dcache_page(struct page *page)
diff --git a/arch/mips/include/asm/cmp.h b/arch/mips/include/asm/cmp.h
index 89a73fb93ae6..033d97303c85 100644
--- a/arch/mips/include/asm/cmp.h
+++ b/arch/mips/include/asm/cmp.h
@@ -10,7 +10,6 @@ extern void cmp_smp_setup(void);
extern void cmp_smp_finish(void);
extern void cmp_boot_secondary(int cpu, struct task_struct *t);
extern void cmp_init_secondary(void);
-extern void cmp_cpus_done(void);
extern void cmp_prepare_cpus(unsigned int max_cpus);
/* This is platform specific */
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index f56cc975b92f..c7d8c997d93e 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -110,9 +110,15 @@
#ifndef cpu_has_smartmips
#define cpu_has_smartmips (cpu_data[0].ases & MIPS_ASE_SMARTMIPS)
#endif
+
#ifndef cpu_has_rixi
-#define cpu_has_rixi (cpu_data[0].options & MIPS_CPU_RIXI)
+# ifdef CONFIG_64BIT
+# define cpu_has_rixi (cpu_data[0].options & MIPS_CPU_RIXI)
+# else /* CONFIG_32BIT */
+# define cpu_has_rixi ((cpu_data[0].options & MIPS_CPU_RIXI) && !cpu_has_64bits)
+# endif
#endif
+
#ifndef cpu_has_mmips
# ifdef CONFIG_SYS_SUPPORTS_MICROMIPS
# define cpu_has_mmips (cpu_data[0].options & MIPS_CPU_MICROMIPS)
@@ -120,6 +126,7 @@
# define cpu_has_mmips 0
# endif
#endif
+
#ifndef cpu_has_vtag_icache
#define cpu_has_vtag_icache (cpu_data[0].icache.flags & MIPS_CACHE_VTAG)
#endif
@@ -183,6 +190,17 @@
/*
* Shortcuts ...
*/
+#define cpu_has_mips_2_3_4_5 (cpu_has_mips_2 | cpu_has_mips_3_4_5)
+#define cpu_has_mips_3_4_5 (cpu_has_mips_3 | cpu_has_mips_4_5)
+#define cpu_has_mips_4_5 (cpu_has_mips_4 | cpu_has_mips_5)
+
+#define cpu_has_mips_2_3_4_5_r (cpu_has_mips_2 | cpu_has_mips_3_4_5_r)
+#define cpu_has_mips_3_4_5_r (cpu_has_mips_3 | cpu_has_mips_4_5_r)
+#define cpu_has_mips_4_5_r (cpu_has_mips_4 | cpu_has_mips_5_r)
+#define cpu_has_mips_5_r (cpu_has_mips_5 | cpu_has_mips_r)
+
+#define cpu_has_mips_4_5_r2 (cpu_has_mips_4_5 | cpu_has_mips_r2)
+
#define cpu_has_mips32 (cpu_has_mips32r1 | cpu_has_mips32r2)
#define cpu_has_mips64 (cpu_has_mips64r1 | cpu_has_mips64r2)
#define cpu_has_mips_r1 (cpu_has_mips32r1 | cpu_has_mips64r1)
diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
index ff2707ab3295..47d5967ce7ef 100644
--- a/arch/mips/include/asm/cpu-info.h
+++ b/arch/mips/include/asm/cpu-info.h
@@ -65,18 +65,13 @@ struct cpuinfo_mips {
#ifdef CONFIG_64BIT
int vmbits; /* Virtual memory size in bits */
#endif
-#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
+#ifdef CONFIG_MIPS_MT_SMP
/*
- * In the MIPS MT "SMTC" model, each TC is considered
- * to be a "CPU" for the purposes of scheduling, but
- * exception resources, ASID spaces, etc, are common
- * to all TCs within the same VPE.
+ * There is not necessarily a 1:1 mapping of VPE num to CPU number
+ * in particular on multi-core systems.
*/
int vpe_id; /* Virtual Processor number */
#endif
-#ifdef CONFIG_MIPS_MT_SMTC
- int tc_id; /* Thread Context number */
-#endif
void *data; /* Additional data */
unsigned int watch_reg_count; /* Number that exist */
unsigned int watch_reg_use_cnt; /* Usable by ptrace */
@@ -117,7 +112,7 @@ struct proc_cpuinfo_notifier_args {
unsigned long n;
};
-#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
+#ifdef CONFIG_MIPS_MT_SMP
# define cpu_vpe_id(cpuinfo) ((cpuinfo)->vpe_id)
#else
# define cpu_vpe_id(cpuinfo) 0
diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h
index 721906130a57..b4e2bd87df50 100644
--- a/arch/mips/include/asm/cpu-type.h
+++ b/arch/mips/include/asm/cpu-type.h
@@ -155,9 +155,6 @@ static inline int __pure __get_cpu_type(const int cpu_type)
case CPU_RM7000:
case CPU_SR71000:
#endif
-#ifdef CONFIG_SYS_HAS_CPU_RM9000
- case CPU_RM9000:
-#endif
#ifdef CONFIG_SYS_HAS_CPU_SB1
case CPU_SB1:
case CPU_SB1A:
@@ -166,6 +163,7 @@ static inline int __pure __get_cpu_type(const int cpu_type)
case CPU_CAVIUM_OCTEON:
case CPU_CAVIUM_OCTEON_PLUS:
case CPU_CAVIUM_OCTEON2:
+ case CPU_CAVIUM_OCTEON3:
#endif
#if defined(CONFIG_SYS_HAS_CPU_BMIPS32_3300) || \
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index 530eb8b3a68e..129d08701e91 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -201,6 +201,7 @@
#define PRID_IMP_NETLOGIC_XLP3XX 0x1100
#define PRID_IMP_NETLOGIC_XLP2XX 0x1200
#define PRID_IMP_NETLOGIC_XLP9XX 0x1500
+#define PRID_IMP_NETLOGIC_XLP5XX 0x1300
/*
* Particular Revision values for bits 7:0 of the PRId register.
@@ -281,7 +282,7 @@ enum cpu_type_enum {
CPU_R4700, CPU_R5000, CPU_R5500, CPU_NEVADA, CPU_R5432, CPU_R10000,
CPU_R12000, CPU_R14000, CPU_VR41XX, CPU_VR4111, CPU_VR4121, CPU_VR4122,
CPU_VR4131, CPU_VR4133, CPU_VR4181, CPU_VR4181A, CPU_RM7000,
- CPU_SR71000, CPU_RM9000, CPU_TX49XX,
+ CPU_SR71000, CPU_TX49XX,
/*
* R8000 class processors
diff --git a/arch/mips/include/asm/dec/kn05.h b/arch/mips/include/asm/dec/kn05.h
index 56d22dc8803a..8e14f677e5ef 100644
--- a/arch/mips/include/asm/dec/kn05.h
+++ b/arch/mips/include/asm/dec/kn05.h
@@ -49,12 +49,20 @@
#define KN4K_RES_15 (15*IOASIC_SLOT_SIZE) /* unused? */
/*
+ * MB ASIC interrupt bits.
+ */
+#define KN4K_MB_INR_MB 4 /* ??? */
+#define KN4K_MB_INR_MT 3 /* memory, I/O bus read/write errors */
+#define KN4K_MB_INR_RES_2 2 /* unused */
+#define KN4K_MB_INR_RTC 1 /* RTC */
+#define KN4K_MB_INR_TC 0 /* I/O ASIC cascade */
+
+/*
* Bits for the MB interrupt register.
* The register appears read-only.
*/
-#define KN4K_MB_INT_TC (1<<0) /* TURBOchannel? */
-#define KN4K_MB_INT_RTC (1<<1) /* RTC? */
-#define KN4K_MB_INT_MT (1<<3) /* I/O ASIC cascade */
+#define KN4K_MB_INT_IRQ (0x1f<<0) /* CPU Int[4:0] status. */
+#define KN4K_MB_INT_IRQ_N(n) (1<<(n)) /* Individual status bits. */
/*
* Bits for the MB control & status register.
@@ -70,6 +78,7 @@
#define KN4K_MB_CSR_NC (1<<14) /* ??? */
#define KN4K_MB_CSR_EE (1<<15) /* (bus) Exception Enable? */
#define KN4K_MB_CSR_MSK (0x1f<<16) /* CPU Int[4:0] mask */
+#define KN4K_MB_CSR_MSK_N(n) (1<<((n)+16)) /* Individual mask bits. */
#define KN4K_MB_CSR_FW (1<<21) /* ??? */
#define KN4K_MB_CSR_W (1<<31) /* ??? */
diff --git a/arch/mips/include/asm/fixmap.h b/arch/mips/include/asm/fixmap.h
index 8c012af2f451..6842ffafd1e7 100644
--- a/arch/mips/include/asm/fixmap.h
+++ b/arch/mips/include/asm/fixmap.h
@@ -48,11 +48,7 @@
enum fixed_addresses {
#define FIX_N_COLOURS 8
FIX_CMAP_BEGIN,
-#ifdef CONFIG_MIPS_MT_SMTC
- FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * NR_CPUS * 2),
-#else
FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * 2),
-#endif
#ifdef CONFIG_HIGHMEM
/* reserved pte's for temporary kernel mappings */
FIX_KMAP_BEGIN = FIX_CMAP_END + 1,
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index 4d86b72750c7..a939574f8293 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -17,6 +17,7 @@
#include <asm/mipsregs.h>
#include <asm/cpu.h>
#include <asm/cpu-features.h>
+#include <asm/fpu_emulator.h>
#include <asm/hazards.h>
#include <asm/processor.h>
#include <asm/current.h>
@@ -28,7 +29,6 @@
struct sigcontext;
struct sigcontext32;
-extern void fpu_emulator_init_fpu(void);
extern void _init_fpu(void);
extern void _save_fp(struct task_struct *);
extern void _restore_fp(struct task_struct *);
@@ -156,15 +156,16 @@ static inline int init_fpu(void)
int ret = 0;
preempt_disable();
+
if (cpu_has_fpu) {
ret = __own_fpu();
if (!ret)
_init_fpu();
- } else {
+ } else
fpu_emulator_init_fpu();
- }
preempt_enable();
+
return ret;
}
diff --git a/arch/mips/include/asm/fpu_emulator.h b/arch/mips/include/asm/fpu_emulator.h
index 2abb587d5ab4..0195745b4b1b 100644
--- a/arch/mips/include/asm/fpu_emulator.h
+++ b/arch/mips/include/asm/fpu_emulator.h
@@ -23,9 +23,12 @@
#ifndef _ASM_FPU_EMULATOR_H
#define _ASM_FPU_EMULATOR_H
+#include <linux/sched.h>
#include <asm/break.h>
+#include <asm/thread_info.h>
#include <asm/inst.h>
#include <asm/local.h>
+#include <asm/processor.h>
#ifdef CONFIG_DEBUG_FS
@@ -36,6 +39,11 @@ struct mips_fpu_emulator_stats {
local_t cp1ops;
local_t cp1xops;
local_t errors;
+ local_t ieee754_inexact;
+ local_t ieee754_underflow;
+ local_t ieee754_overflow;
+ local_t ieee754_zerodiv;
+ local_t ieee754_invalidop;
};
DECLARE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats);
@@ -71,4 +79,17 @@ int mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
*/
#define BREAK_MATH (0x0000000d | (BRK_MEMU << 16))
+#define SIGNALLING_NAN 0x7ff800007ff80000LL
+
+static inline void fpu_emulator_init_fpu(void)
+{
+ struct task_struct *t = current;
+ int i;
+
+ t->thread.fpu.fcr31 = 0;
+
+ for (i = 0; i < 32; i++)
+ set_fpr64(&t->thread.fpu.fpr[i], 0, SIGNALLING_NAN);
+}
+
#endif /* _ASM_FPU_EMULATOR_H */
diff --git a/arch/mips/include/asm/gic.h b/arch/mips/include/asm/gic.h
index 082716690589..10f6a99f92c2 100644
--- a/arch/mips/include/asm/gic.h
+++ b/arch/mips/include/asm/gic.h
@@ -380,6 +380,7 @@ extern unsigned int gic_compare_int (void);
extern cycle_t gic_read_count(void);
extern cycle_t gic_read_compare(void);
extern void gic_write_compare(cycle_t cnt);
+extern void gic_write_cpu_compare(cycle_t cnt, int cpu);
extern void gic_send_ipi(unsigned int intr);
extern unsigned int plat_ipi_call_int_xlate(unsigned int);
extern unsigned int plat_ipi_resched_int_xlate(unsigned int);
diff --git a/arch/mips/include/asm/gio_device.h b/arch/mips/include/asm/gio_device.h
index 0878701712f8..4be1a57cdbb0 100644
--- a/arch/mips/include/asm/gio_device.h
+++ b/arch/mips/include/asm/gio_device.h
@@ -50,7 +50,7 @@ static inline void gio_device_free(struct gio_device *dev)
extern int gio_register_driver(struct gio_driver *);
extern void gio_unregister_driver(struct gio_driver *);
-#define gio_get_drvdata(_dev) drv_get_drvdata(&(_dev)->dev)
-#define gio_set_drvdata(_dev, data) drv_set_drvdata(&(_dev)->dev, (data))
+#define gio_get_drvdata(_dev) dev_get_drvdata(&(_dev)->dev)
+#define gio_set_drvdata(_dev, data) dev_set_drvdata(&(_dev)->dev, (data))
extern void gio_set_master(struct gio_device *);
diff --git a/arch/mips/include/asm/idle.h b/arch/mips/include/asm/idle.h
index d192158886b1..d9f932de80e9 100644
--- a/arch/mips/include/asm/idle.h
+++ b/arch/mips/include/asm/idle.h
@@ -1,6 +1,7 @@
#ifndef __ASM_IDLE_H
#define __ASM_IDLE_H
+#include <linux/cpuidle.h>
#include <linux/linkage.h>
extern void (*cpu_wait)(void);
@@ -20,4 +21,17 @@ static inline int address_is_in_r4k_wait_irqoff(unsigned long addr)
addr < (unsigned long)__pastwait;
}
+extern int mips_cpuidle_wait_enter(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index);
+
+#define MIPS_CPUIDLE_WAIT_STATE {\
+ .enter = mips_cpuidle_wait_enter,\
+ .exit_latency = 1,\
+ .target_residency = 1,\
+ .power_usage = UINT_MAX,\
+ .flags = CPUIDLE_FLAG_TIME_VALID,\
+ .name = "wait",\
+ .desc = "MIPS wait",\
+}
+
#endif /* __ASM_IDLE_H */
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
index 7bc2cdb35057..ae1f7b24dd1a 100644
--- a/arch/mips/include/asm/irq.h
+++ b/arch/mips/include/asm/irq.h
@@ -26,104 +26,8 @@ static inline int irq_canonicalize(int irq)
#define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */
#endif
-#ifdef CONFIG_MIPS_MT_SMTC
-
-struct irqaction;
-
-extern unsigned long irq_hwmask[];
-extern int setup_irq_smtc(unsigned int irq, struct irqaction * new,
- unsigned long hwmask);
-
-static inline void smtc_im_ack_irq(unsigned int irq)
-{
- if (irq_hwmask[irq] & ST0_IM)
- set_c0_status(irq_hwmask[irq] & ST0_IM);
-}
-
-#else
-
-static inline void smtc_im_ack_irq(unsigned int irq)
-{
-}
-
-#endif /* CONFIG_MIPS_MT_SMTC */
-
-#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
-#include <linux/cpumask.h>
-
-extern int plat_set_irq_affinity(struct irq_data *d,
- const struct cpumask *affinity, bool force);
-extern void smtc_forward_irq(struct irq_data *d);
-
-/*
- * IRQ affinity hook invoked at the beginning of interrupt dispatch
- * if option is enabled.
- *
- * Up through Linux 2.6.22 (at least) cpumask operations are very
- * inefficient on MIPS. Initial prototypes of SMTC IRQ affinity
- * used a "fast path" per-IRQ-descriptor cache of affinity information
- * to reduce latency. As there is a project afoot to optimize the
- * cpumask implementations, this version is optimistically assuming
- * that cpumask.h macro overhead is reasonable during interrupt dispatch.
- */
-static inline int handle_on_other_cpu(unsigned int irq)
-{
- struct irq_data *d = irq_get_irq_data(irq);
-
- if (cpumask_test_cpu(smp_processor_id(), d->affinity))
- return 0;
- smtc_forward_irq(d);
- return 1;
-}
-
-#else /* Not doing SMTC affinity */
-
-static inline int handle_on_other_cpu(unsigned int irq) { return 0; }
-
-#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
-
-#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
-
-static inline void smtc_im_backstop(unsigned int irq)
-{
- if (irq_hwmask[irq] & 0x0000ff00)
- write_c0_tccontext(read_c0_tccontext() &
- ~(irq_hwmask[irq] & 0x0000ff00));
-}
-
-/*
- * Clear interrupt mask handling "backstop" if irq_hwmask
- * entry so indicates. This implies that the ack() or end()
- * functions will take over re-enabling the low-level mask.
- * Otherwise it will be done on return from exception.
- */
-static inline int smtc_handle_on_other_cpu(unsigned int irq)
-{
- int ret = handle_on_other_cpu(irq);
-
- if (!ret)
- smtc_im_backstop(irq);
- return ret;
-}
-
-#else
-
-static inline void smtc_im_backstop(unsigned int irq) { }
-static inline int smtc_handle_on_other_cpu(unsigned int irq)
-{
- return handle_on_other_cpu(irq);
-}
-
-#endif
-
extern void do_IRQ(unsigned int irq);
-#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
-
-extern void do_IRQ_no_affinity(unsigned int irq);
-
-#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
-
extern void arch_init_irq(void);
extern void spurious_interrupt(void);
diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h
index 45c00951888b..0fa5fdcd1f01 100644
--- a/arch/mips/include/asm/irqflags.h
+++ b/arch/mips/include/asm/irqflags.h
@@ -17,7 +17,7 @@
#include <linux/stringify.h>
#include <asm/hazards.h>
-#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC)
+#ifdef CONFIG_CPU_MIPSR2
static inline void arch_local_irq_disable(void)
{
@@ -118,30 +118,15 @@ void arch_local_irq_disable(void);
unsigned long arch_local_irq_save(void);
void arch_local_irq_restore(unsigned long flags);
void __arch_local_irq_restore(unsigned long flags);
-#endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */
-
-
-extern void smtc_ipi_replay(void);
+#endif /* CONFIG_CPU_MIPSR2 */
static inline void arch_local_irq_enable(void)
{
-#ifdef CONFIG_MIPS_MT_SMTC
- /*
- * SMTC kernel needs to do a software replay of queued
- * IPIs, at the cost of call overhead on each local_irq_enable()
- */
- smtc_ipi_replay();
-#endif
__asm__ __volatile__(
" .set push \n"
" .set reorder \n"
" .set noat \n"
-#ifdef CONFIG_MIPS_MT_SMTC
- " mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n"
- " ori $1, 0x400 \n"
- " xori $1, 0x400 \n"
- " mtc0 $1, $2, 1 \n"
-#elif defined(CONFIG_CPU_MIPSR2)
+#if defined(CONFIG_CPU_MIPSR2)
" ei \n"
#else
" mfc0 $1,$12 \n"
@@ -163,11 +148,7 @@ static inline unsigned long arch_local_save_flags(void)
asm __volatile__(
" .set push \n"
" .set reorder \n"
-#ifdef CONFIG_MIPS_MT_SMTC
- " mfc0 %[flags], $2, 1 \n"
-#else
" mfc0 %[flags], $12 \n"
-#endif
" .set pop \n"
: [flags] "=r" (flags));
@@ -177,14 +158,7 @@ static inline unsigned long arch_local_save_flags(void)
static inline int arch_irqs_disabled_flags(unsigned long flags)
{
-#ifdef CONFIG_MIPS_MT_SMTC
- /*
- * SMTC model uses TCStatus.IXMT to disable interrupts for a thread/CPU
- */
- return flags & 0x400;
-#else
return !(flags & 1);
-#endif
}
#endif /* #ifndef __ASSEMBLY__ */
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 060aaa6348d7..b0aa95565752 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -19,6 +19,38 @@
#include <linux/threads.h>
#include <linux/spinlock.h>
+/* MIPS KVM register ids */
+#define MIPS_CP0_32(_R, _S) \
+ (KVM_REG_MIPS | KVM_REG_SIZE_U32 | 0x10000 | (8 * (_R) + (_S)))
+
+#define MIPS_CP0_64(_R, _S) \
+ (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0x10000 | (8 * (_R) + (_S)))
+
+#define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0)
+#define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0)
+#define KVM_REG_MIPS_CP0_ENTRYLO1 MIPS_CP0_64(3, 0)
+#define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0)
+#define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2)
+#define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0)
+#define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1)
+#define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0)
+#define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0)
+#define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0)
+#define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0)
+#define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0)
+#define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0)
+#define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0)
+#define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0)
+#define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0)
+#define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1)
+#define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0)
+#define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1)
+#define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2)
+#define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3)
+#define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7)
+#define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0)
+#define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
+
#define KVM_MAX_VCPUS 1
#define KVM_USER_MEM_SLOTS 8
@@ -372,8 +404,19 @@ struct kvm_vcpu_arch {
u32 io_gpr; /* GPR used as IO source/target */
- /* Used to calibrate the virutal count register for the guest */
- int32_t host_cp0_count;
+ struct hrtimer comparecount_timer;
+ /* Count timer control KVM register */
+ uint32_t count_ctl;
+ /* Count bias from the raw time */
+ uint32_t count_bias;
+ /* Frequency of timer in Hz */
+ uint32_t count_hz;
+ /* Dynamic nanosecond bias (multiple of count_period) to avoid overflow */
+ s64 count_dyn_bias;
+ /* Resume time */
+ ktime_t count_resume;
+ /* Period of timer tick in ns */
+ u64 count_period;
/* Bitmask of exceptions that are pending */
unsigned long pending_exceptions;
@@ -394,8 +437,6 @@ struct kvm_vcpu_arch {
uint32_t guest_kernel_asid[NR_CPUS];
struct mm_struct guest_kernel_mm, guest_user_mm;
- struct hrtimer comparecount_timer;
-
int last_sched_cpu;
/* WAIT executed */
@@ -410,6 +451,7 @@ struct kvm_vcpu_arch {
#define kvm_read_c0_guest_context(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0])
#define kvm_write_c0_guest_context(cop0, val) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0] = (val))
#define kvm_read_c0_guest_userlocal(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][2])
+#define kvm_write_c0_guest_userlocal(cop0, val) (cop0->reg[MIPS_CP0_TLB_CONTEXT][2] = (val))
#define kvm_read_c0_guest_pagemask(cop0) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0])
#define kvm_write_c0_guest_pagemask(cop0, val) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0] = (val))
#define kvm_read_c0_guest_wired(cop0) (cop0->reg[MIPS_CP0_TLB_WIRED][0])
@@ -449,15 +491,74 @@ struct kvm_vcpu_arch {
#define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0])
#define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val))
+/*
+ * Some of the guest registers may be modified asynchronously (e.g. from a
+ * hrtimer callback in hard irq context) and therefore need stronger atomicity
+ * guarantees than other registers.
+ */
+
+static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg,
+ unsigned long val)
+{
+ unsigned long temp;
+ do {
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ " " __LL "%0, %1 \n"
+ " or %0, %2 \n"
+ " " __SC "%0, %1 \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "+m" (*reg)
+ : "r" (val));
+ } while (unlikely(!temp));
+}
+
+static inline void _kvm_atomic_clear_c0_guest_reg(unsigned long *reg,
+ unsigned long val)
+{
+ unsigned long temp;
+ do {
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ " " __LL "%0, %1 \n"
+ " and %0, %2 \n"
+ " " __SC "%0, %1 \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "+m" (*reg)
+ : "r" (~val));
+ } while (unlikely(!temp));
+}
+
+static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
+ unsigned long change,
+ unsigned long val)
+{
+ unsigned long temp;
+ do {
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ " " __LL "%0, %1 \n"
+ " and %0, %2 \n"
+ " or %0, %3 \n"
+ " " __SC "%0, %1 \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "+m" (*reg)
+ : "r" (~change), "r" (val & change));
+ } while (unlikely(!temp));
+}
+
#define kvm_set_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] |= (val))
#define kvm_clear_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] &= ~(val))
-#define kvm_set_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] |= (val))
-#define kvm_clear_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] &= ~(val))
+
+/* Cause can be modified asynchronously from hardirq hrtimer callback */
+#define kvm_set_c0_guest_cause(cop0, val) \
+ _kvm_atomic_set_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], val)
+#define kvm_clear_c0_guest_cause(cop0, val) \
+ _kvm_atomic_clear_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], val)
#define kvm_change_c0_guest_cause(cop0, change, val) \
-{ \
- kvm_clear_c0_guest_cause(cop0, change); \
- kvm_set_c0_guest_cause(cop0, ((val) & (change))); \
-}
+ _kvm_atomic_change_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], \
+ change, val)
+
#define kvm_set_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] |= (val))
#define kvm_clear_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] &= ~(val))
#define kvm_change_c0_guest_ebase(cop0, change, val) \
@@ -468,29 +569,33 @@ struct kvm_vcpu_arch {
struct kvm_mips_callbacks {
- int (*handle_cop_unusable) (struct kvm_vcpu *vcpu);
- int (*handle_tlb_mod) (struct kvm_vcpu *vcpu);
- int (*handle_tlb_ld_miss) (struct kvm_vcpu *vcpu);
- int (*handle_tlb_st_miss) (struct kvm_vcpu *vcpu);
- int (*handle_addr_err_st) (struct kvm_vcpu *vcpu);
- int (*handle_addr_err_ld) (struct kvm_vcpu *vcpu);
- int (*handle_syscall) (struct kvm_vcpu *vcpu);
- int (*handle_res_inst) (struct kvm_vcpu *vcpu);
- int (*handle_break) (struct kvm_vcpu *vcpu);
- int (*vm_init) (struct kvm *kvm);
- int (*vcpu_init) (struct kvm_vcpu *vcpu);
- int (*vcpu_setup) (struct kvm_vcpu *vcpu);
- gpa_t(*gva_to_gpa) (gva_t gva);
- void (*queue_timer_int) (struct kvm_vcpu *vcpu);
- void (*dequeue_timer_int) (struct kvm_vcpu *vcpu);
- void (*queue_io_int) (struct kvm_vcpu *vcpu,
- struct kvm_mips_interrupt *irq);
- void (*dequeue_io_int) (struct kvm_vcpu *vcpu,
- struct kvm_mips_interrupt *irq);
- int (*irq_deliver) (struct kvm_vcpu *vcpu, unsigned int priority,
- uint32_t cause);
- int (*irq_clear) (struct kvm_vcpu *vcpu, unsigned int priority,
- uint32_t cause);
+ int (*handle_cop_unusable)(struct kvm_vcpu *vcpu);
+ int (*handle_tlb_mod)(struct kvm_vcpu *vcpu);
+ int (*handle_tlb_ld_miss)(struct kvm_vcpu *vcpu);
+ int (*handle_tlb_st_miss)(struct kvm_vcpu *vcpu);
+ int (*handle_addr_err_st)(struct kvm_vcpu *vcpu);
+ int (*handle_addr_err_ld)(struct kvm_vcpu *vcpu);
+ int (*handle_syscall)(struct kvm_vcpu *vcpu);
+ int (*handle_res_inst)(struct kvm_vcpu *vcpu);
+ int (*handle_break)(struct kvm_vcpu *vcpu);
+ int (*vm_init)(struct kvm *kvm);
+ int (*vcpu_init)(struct kvm_vcpu *vcpu);
+ int (*vcpu_setup)(struct kvm_vcpu *vcpu);
+ gpa_t (*gva_to_gpa)(gva_t gva);
+ void (*queue_timer_int)(struct kvm_vcpu *vcpu);
+ void (*dequeue_timer_int)(struct kvm_vcpu *vcpu);
+ void (*queue_io_int)(struct kvm_vcpu *vcpu,
+ struct kvm_mips_interrupt *irq);
+ void (*dequeue_io_int)(struct kvm_vcpu *vcpu,
+ struct kvm_mips_interrupt *irq);
+ int (*irq_deliver)(struct kvm_vcpu *vcpu, unsigned int priority,
+ uint32_t cause);
+ int (*irq_clear)(struct kvm_vcpu *vcpu, unsigned int priority,
+ uint32_t cause);
+ int (*get_one_reg)(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg, s64 *v);
+ int (*set_one_reg)(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg, s64 v);
};
extern struct kvm_mips_callbacks *kvm_mips_callbacks;
int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
@@ -609,7 +714,16 @@ extern enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
struct kvm_run *run);
-enum emulation_result kvm_mips_emulate_count(struct kvm_vcpu *vcpu);
+uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu);
+void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count);
+void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare);
+void kvm_mips_init_count(struct kvm_vcpu *vcpu);
+int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl);
+int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume);
+int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz);
+void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu);
+void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu);
+enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu);
enum emulation_result kvm_mips_check_privilege(unsigned long cause,
uint32_t *opc,
@@ -646,7 +760,6 @@ extern int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc,
struct kvm_vcpu *vcpu);
/* Misc */
-extern void mips32_SyncICache(unsigned long addr, unsigned long size);
extern int kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
diff --git a/arch/mips/include/asm/kvm_para.h b/arch/mips/include/asm/kvm_para.h
new file mode 100644
index 000000000000..5a9aa918abe6
--- /dev/null
+++ b/arch/mips/include/asm/kvm_para.h
@@ -0,0 +1,109 @@
+#ifndef _ASM_MIPS_KVM_PARA_H
+#define _ASM_MIPS_KVM_PARA_H
+
+#include <uapi/asm/kvm_para.h>
+
+#define KVM_HYPERCALL ".word 0x42000028"
+
+/*
+ * Hypercalls for KVM.
+ *
+ * Hypercall number is passed in v0.
+ * Return value will be placed in v0.
+ * Up to 3 arguments are passed in a0, a1, and a2.
+ */
+static inline unsigned long kvm_hypercall0(unsigned long num)
+{
+ register unsigned long n asm("v0");
+ register unsigned long r asm("v0");
+
+ n = num;
+ __asm__ __volatile__(
+ KVM_HYPERCALL
+ : "=r" (r) : "r" (n) : "memory"
+ );
+
+ return r;
+}
+
+static inline unsigned long kvm_hypercall1(unsigned long num,
+ unsigned long arg0)
+{
+ register unsigned long n asm("v0");
+ register unsigned long r asm("v0");
+ register unsigned long a0 asm("a0");
+
+ n = num;
+ a0 = arg0;
+ __asm__ __volatile__(
+ KVM_HYPERCALL
+ : "=r" (r) : "r" (n), "r" (a0) : "memory"
+ );
+
+ return r;
+}
+
+static inline unsigned long kvm_hypercall2(unsigned long num,
+ unsigned long arg0, unsigned long arg1)
+{
+ register unsigned long n asm("v0");
+ register unsigned long r asm("v0");
+ register unsigned long a0 asm("a0");
+ register unsigned long a1 asm("a1");
+
+ n = num;
+ a0 = arg0;
+ a1 = arg1;
+ __asm__ __volatile__(
+ KVM_HYPERCALL
+ : "=r" (r) : "r" (n), "r" (a0), "r" (a1) : "memory"
+ );
+
+ return r;
+}
+
+static inline unsigned long kvm_hypercall3(unsigned long num,
+ unsigned long arg0, unsigned long arg1, unsigned long arg2)
+{
+ register unsigned long n asm("v0");
+ register unsigned long r asm("v0");
+ register unsigned long a0 asm("a0");
+ register unsigned long a1 asm("a1");
+ register unsigned long a2 asm("a2");
+
+ n = num;
+ a0 = arg0;
+ a1 = arg1;
+ a2 = arg2;
+ __asm__ __volatile__(
+ KVM_HYPERCALL
+ : "=r" (r) : "r" (n), "r" (a0), "r" (a1), "r" (a2) : "memory"
+ );
+
+ return r;
+}
+
+static inline bool kvm_check_and_clear_guest_paused(void)
+{
+ return false;
+}
+
+static inline unsigned int kvm_arch_para_features(void)
+{
+ return 0;
+}
+
+#ifdef CONFIG_MIPS_PARAVIRT
+static inline bool kvm_para_available(void)
+{
+ return true;
+}
+#else
+static inline bool kvm_para_available(void)
+{
+ return false;
+}
+#endif
+
+
+#endif /* _ASM_MIPS_KVM_PARA_H */
diff --git a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
index 94ed063eec92..cf8022872892 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
@@ -22,7 +22,6 @@
#define cpu_has_3k_cache 0
#define cpu_has_4k_cache 0
#define cpu_has_tx39_cache 0
-#define cpu_has_fpu 0
#define cpu_has_counter 1
#define cpu_has_watch 1
#define cpu_has_divec 1
diff --git a/arch/mips/include/asm/mach-cavium-octeon/irq.h b/arch/mips/include/asm/mach-cavium-octeon/irq.h
index 60fc4c347c44..cceae32a0732 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/irq.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/irq.h
@@ -35,6 +35,8 @@ enum octeon_irq {
OCTEON_IRQ_PCI_MSI2,
OCTEON_IRQ_PCI_MSI3,
+ OCTEON_IRQ_TWSI,
+ OCTEON_IRQ_TWSI2,
OCTEON_IRQ_RML,
OCTEON_IRQ_TIMER0,
OCTEON_IRQ_TIMER1,
diff --git a/arch/mips/include/asm/mach-ip22/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ip22/cpu-feature-overrides.h
index 1bcb6421205e..1dfe47453ea4 100644
--- a/arch/mips/include/asm/mach-ip22/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-ip22/cpu-feature-overrides.h
@@ -39,6 +39,10 @@
#define cpu_has_nofpuex 0
#define cpu_has_64bits 1
+#define cpu_has_mips_2 1
+#define cpu_has_mips_3 1
+#define cpu_has_mips_5 0
+
#define cpu_has_mips32r1 0
#define cpu_has_mips32r2 0
#define cpu_has_mips64r1 0
diff --git a/arch/mips/include/asm/mach-jz4740/dma.h b/arch/mips/include/asm/mach-jz4740/dma.h
index 509cd5828044..14ecc5313d2d 100644
--- a/arch/mips/include/asm/mach-jz4740/dma.h
+++ b/arch/mips/include/asm/mach-jz4740/dma.h
@@ -22,8 +22,6 @@ enum jz4740_dma_request_type {
JZ4740_DMA_TYPE_UART_RECEIVE = 21,
JZ4740_DMA_TYPE_SPI_TRANSMIT = 22,
JZ4740_DMA_TYPE_SPI_RECEIVE = 23,
- JZ4740_DMA_TYPE_AIC_TRANSMIT = 24,
- JZ4740_DMA_TYPE_AIC_RECEIVE = 25,
JZ4740_DMA_TYPE_MMC_TRANSMIT = 26,
JZ4740_DMA_TYPE_MMC_RECEIVE = 27,
JZ4740_DMA_TYPE_TCU = 28,
diff --git a/arch/mips/include/asm/mach-malta/kernel-entry-init.h b/arch/mips/include/asm/mach-malta/kernel-entry-init.h
index 7c5e17a17849..77eeda77e73c 100644
--- a/arch/mips/include/asm/mach-malta/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-malta/kernel-entry-init.h
@@ -80,36 +80,6 @@
.endm
.macro kernel_entry_setup
-#ifdef CONFIG_MIPS_MT_SMTC
- mfc0 t0, CP0_CONFIG
- bgez t0, 9f
- mfc0 t0, CP0_CONFIG, 1
- bgez t0, 9f
- mfc0 t0, CP0_CONFIG, 2
- bgez t0, 9f
- mfc0 t0, CP0_CONFIG, 3
- and t0, 1<<2
- bnez t0, 0f
-9:
- /* Assume we came from YAMON... */
- PTR_LA v0, 0x9fc00534 /* YAMON print */
- lw v0, (v0)
- move a0, zero
- PTR_LA a1, nonmt_processor
- jal v0
-
- PTR_LA v0, 0x9fc00520 /* YAMON exit */
- lw v0, (v0)
- li a0, 1
- jal v0
-
-1: b 1b
-
- __INITDATA
-nonmt_processor:
- .asciz "SMTC kernel requires the MT ASE to run\n"
- __FINIT
-#endif
#ifdef CONFIG_EVA
sync
diff --git a/arch/mips/include/asm/mach-malta/malta-pm.h b/arch/mips/include/asm/mach-malta/malta-pm.h
new file mode 100644
index 000000000000..c2c2e201013d
--- /dev/null
+++ b/arch/mips/include/asm/mach-malta/malta-pm.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2014 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __ASM_MIPS_MACH_MALTA_PM_H__
+#define __ASM_MIPS_MACH_MALTA_PM_H__
+
+#include <asm/mips-boards/piix4.h>
+
+#ifdef CONFIG_MIPS_MALTA_PM
+
+/**
+ * mips_pm_suspend - enter a suspend state
+ * @state: the state to enter, one of PIIX4_FUNC3IO_PMCNTRL_SUS_TYP_*
+ *
+ * Enters a suspend state via the Malta's PIIX4. If the state to be entered
+ * is one which loses context (eg. SOFF) then this function will never
+ * return.
+ */
+extern int mips_pm_suspend(unsigned state);
+
+#else /* !CONFIG_MIPS_MALTA_PM */
+
+static inline int mips_pm_suspend(unsigned state)
+{
+ return -EINVAL;
+}
+
+#endif /* !CONFIG_MIPS_MALTA_PM */
+
+#endif /* __ASM_MIPS_MACH_MALTA_PM_H__ */
diff --git a/arch/mips/include/asm/mach-netlogic/topology.h b/arch/mips/include/asm/mach-netlogic/topology.h
index 0da99fa11c38..ceeb1f5e7129 100644
--- a/arch/mips/include/asm/mach-netlogic/topology.h
+++ b/arch/mips/include/asm/mach-netlogic/topology.h
@@ -10,10 +10,12 @@
#include <asm/mach-netlogic/multi-node.h>
+#ifdef CONFIG_SMP
#define topology_physical_package_id(cpu) cpu_to_node(cpu)
#define topology_core_id(cpu) (cpu_logical_map(cpu) / NLM_THREADS_PER_CORE)
#define topology_thread_cpumask(cpu) (&cpu_sibling_map[cpu])
#define topology_core_cpumask(cpu) cpumask_of_node(cpu_to_node(cpu))
+#endif
#include <asm-generic/topology.h>
diff --git a/arch/mips/include/asm/mach-paravirt/cpu-feature-overrides.h b/arch/mips/include/asm/mach-paravirt/cpu-feature-overrides.h
new file mode 100644
index 000000000000..725e1ed83f6a
--- /dev/null
+++ b/arch/mips/include/asm/mach-paravirt/cpu-feature-overrides.h
@@ -0,0 +1,36 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2013 Cavium, Inc.
+ */
+#ifndef __ASM_MACH_PARAVIRT_CPU_FEATURE_OVERRIDES_H
+#define __ASM_MACH_PARAVIRT_CPU_FEATURE_OVERRIDES_H
+
+#define cpu_has_4kex 1
+#define cpu_has_3k_cache 0
+#define cpu_has_tx39_cache 0
+#define cpu_has_counter 1
+#define cpu_has_llsc 1
+/*
+ * We Disable LL/SC on non SMP systems as it is faster to disable
+ * interrupts for atomic access than a LL/SC.
+ */
+#ifdef CONFIG_SMP
+# define kernel_uses_llsc 1
+#else
+# define kernel_uses_llsc 0
+#endif
+
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+#define cpu_dcache_line_size() 128
+#define cpu_icache_line_size() 128
+#define cpu_has_octeon_cache 1
+#define cpu_has_4k_cache 0
+#else
+#define cpu_has_octeon_cache 0
+#define cpu_has_4k_cache 1
+#endif
+
+#endif /* __ASM_MACH_PARAVIRT_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-paravirt/irq.h b/arch/mips/include/asm/mach-paravirt/irq.h
new file mode 100644
index 000000000000..9b4d35eca977
--- /dev/null
+++ b/arch/mips/include/asm/mach-paravirt/irq.h
@@ -0,0 +1,19 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2013 Cavium, Inc.
+ */
+#ifndef __ASM_MACH_PARAVIRT_IRQ_H__
+#define __ASM_MACH_PARAVIRT_IRQ_H__
+
+#define NR_IRQS 64
+#define MIPS_CPU_IRQ_BASE 1
+
+#define MIPS_IRQ_PCIA (MIPS_CPU_IRQ_BASE + 8)
+
+#define MIPS_IRQ_MBOX0 (MIPS_CPU_IRQ_BASE + 32)
+#define MIPS_IRQ_MBOX1 (MIPS_CPU_IRQ_BASE + 33)
+
+#endif /* __ASM_MACH_PARAVIRT_IRQ_H__ */
diff --git a/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h b/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h
new file mode 100644
index 000000000000..2f82bfa3a773
--- /dev/null
+++ b/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h
@@ -0,0 +1,50 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2013 Cavium, Inc
+ */
+#ifndef __ASM_MACH_PARAVIRT_KERNEL_ENTRY_H
+#define __ASM_MACH_PARAVIRT_KERNEL_ENTRY_H
+
+#define CP0_EBASE $15, 1
+
+ .macro kernel_entry_setup
+ mfc0 t0, CP0_EBASE
+ andi t0, t0, 0x3ff # CPUNum
+ beqz t0, 1f
+ # CPUs other than zero goto smp_bootstrap
+ j smp_bootstrap
+
+1:
+ .endm
+
+/*
+ * Do SMP slave processor setup necessary before we can safely execute
+ * C code.
+ */
+ .macro smp_slave_setup
+ mfc0 t0, CP0_EBASE
+ andi t0, t0, 0x3ff # CPUNum
+ slti t1, t0, NR_CPUS
+ bnez t1, 1f
+2:
+ di
+ wait
+ b 2b # Unknown CPU, loop forever.
+1:
+ PTR_LA t1, paravirt_smp_sp
+ PTR_SLL t0, PTR_SCALESHIFT
+ PTR_ADDU t1, t1, t0
+3:
+ PTR_L sp, 0(t1)
+ beqz sp, 3b # Spin until told to proceed.
+
+ PTR_LA t1, paravirt_smp_gp
+ PTR_ADDU t1, t1, t0
+ sync
+ PTR_L gp, 0(t1)
+ .endm
+
+#endif /* __ASM_MACH_PARAVIRT_KERNEL_ENTRY_H */
diff --git a/arch/mips/include/asm/mach-paravirt/war.h b/arch/mips/include/asm/mach-paravirt/war.h
new file mode 100644
index 000000000000..36d3afb98451
--- /dev/null
+++ b/arch/mips/include/asm/mach-paravirt/war.h
@@ -0,0 +1,25 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
+ * Copyright (C) 2013 Cavium Networks <support@caviumnetworks.com>
+ */
+#ifndef __ASM_MIPS_MACH_PARAVIRT_WAR_H
+#define __ASM_MIPS_MACH_PARAVIRT_WAR_H
+
+#define R4600_V1_INDEX_ICACHEOP_WAR 0
+#define R4600_V1_HIT_CACHEOP_WAR 0
+#define R4600_V2_HIT_CACHEOP_WAR 0
+#define R5432_CP0_INTERRUPT_WAR 0
+#define BCM1250_M3_WAR 0
+#define SIBYTE_1956_WAR 0
+#define MIPS4K_ICACHE_REFILL_WAR 0
+#define MIPS_CACHE_SYNC_WAR 0
+#define TX49XX_ICACHE_INDEX_INV_WAR 0
+#define ICACHE_REFILLS_WORKAROUND_WAR 0
+#define R10000_LLSC_WAR 0
+#define MIPS34K_MISSED_ITLB_WAR 0
+
+#endif /* __ASM_MIPS_MACH_PARAVIRT_WAR_H */
diff --git a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_usb.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_usb.h
index aa45e6a07126..fe1566f2913e 100644
--- a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_usb.h
+++ b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_usb.h
@@ -25,11 +25,7 @@
#ifndef MSP_USB_H_
#define MSP_USB_H_
-#ifdef CONFIG_MSP_HAS_DUAL_USB
-#define NUM_USB_DEVS 2
-#else
#define NUM_USB_DEVS 1
-#endif
/* Register spaces for USB host 0 */
#define MSP_USB0_MAB_START (MSP_USB0_BASE + 0x0)
diff --git a/arch/mips/include/asm/mach-ralink/war.h b/arch/mips/include/asm/mach-ralink/war.h
index a7b712cf2d28..c074b5dc1f82 100644
--- a/arch/mips/include/asm/mach-ralink/war.h
+++ b/arch/mips/include/asm/mach-ralink/war.h
@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0
diff --git a/arch/mips/include/asm/mach-sead3/kernel-entry-init.h b/arch/mips/include/asm/mach-sead3/kernel-entry-init.h
index 3dfbd8e7947f..6cccd4d558d7 100644
--- a/arch/mips/include/asm/mach-sead3/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-sead3/kernel-entry-init.h
@@ -10,37 +10,6 @@
#define __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H
.macro kernel_entry_setup
-#ifdef CONFIG_MIPS_MT_SMTC
- mfc0 t0, CP0_CONFIG
- bgez t0, 9f
- mfc0 t0, CP0_CONFIG, 1
- bgez t0, 9f
- mfc0 t0, CP0_CONFIG, 2
- bgez t0, 9f
- mfc0 t0, CP0_CONFIG, 3
- and t0, 1<<2
- bnez t0, 0f
-9 :
- /* Assume we came from YAMON... */
- PTR_LA v0, 0x9fc00534 /* YAMON print */
- lw v0, (v0)
- move a0, zero
- PTR_LA a1, nonmt_processor
- jal v0
-
- PTR_LA v0, 0x9fc00520 /* YAMON exit */
- lw v0, (v0)
- li a0, 1
- jal v0
-
-1 : b 1b
-
- __INITDATA
-nonmt_processor :
- .asciz "SMTC kernel requires the MT ASE to run\n"
- __FINIT
-0 :
-#endif
.endm
/*
diff --git a/arch/mips/include/asm/mips-boards/generic.h b/arch/mips/include/asm/mips-boards/generic.h
index 48616816bcbc..c904c24550f6 100644
--- a/arch/mips/include/asm/mips-boards/generic.h
+++ b/arch/mips/include/asm/mips-boards/generic.h
@@ -67,10 +67,6 @@
extern int mips_revision_sconid;
-#ifdef CONFIG_OF
-extern struct boot_param_header __dtb_start;
-#endif
-
#ifdef CONFIG_PCI
extern void mips_pcibios_init(void);
#else
diff --git a/arch/mips/include/asm/mips-boards/piix4.h b/arch/mips/include/asm/mips-boards/piix4.h
index 9cf54041d416..9e340be52a50 100644
--- a/arch/mips/include/asm/mips-boards/piix4.h
+++ b/arch/mips/include/asm/mips-boards/piix4.h
@@ -55,4 +55,16 @@
#define PIIX4_FUNC3_PMREGMISC 0x80
#define PIIX4_FUNC3_PMREGMISC_EN (1 << 0)
+/* Power Management IO Space */
+#define PIIX4_FUNC3IO_PMSTS 0x00
+#define PIIX4_FUNC3IO_PMSTS_PWRBTN_STS (1 << 8)
+#define PIIX4_FUNC3IO_PMCNTRL 0x04
+#define PIIX4_FUNC3IO_PMCNTRL_SUS_EN (1 << 13)
+#define PIIX4_FUNC3IO_PMCNTRL_SUS_TYP (0x7 << 10)
+#define PIIX4_FUNC3IO_PMCNTRL_SUS_TYP_SOFF (0x0 << 10)
+#define PIIX4_FUNC3IO_PMCNTRL_SUS_TYP_STR (0x1 << 10)
+
+/* Data for magic special PCI cycle */
+#define PIIX4_SUSPEND_MAGIC 0x00120002
+
#endif /* __ASM_MIPS_BOARDS_PIIX4_H */
diff --git a/arch/mips/include/asm/mips-cpc.h b/arch/mips/include/asm/mips-cpc.h
index 988507e46d42..e139a534e0fd 100644
--- a/arch/mips/include/asm/mips-cpc.h
+++ b/arch/mips/include/asm/mips-cpc.h
@@ -72,7 +72,12 @@ static inline bool mips_cpc_present(void)
#define MIPS_CPC_COCB_OFS 0x4000
/* Macros to ease the creation of register access functions */
-#define BUILD_CPC_R_(name, off) \
+#define BUILD_CPC_R_(name, off) \
+static inline u32 *addr_cpc_##name(void) \
+{ \
+ return (u32 *)(mips_cpc_base + (off)); \
+} \
+ \
static inline u32 read_cpc_##name(void) \
{ \
return __raw_readl(mips_cpc_base + (off)); \
@@ -147,4 +152,31 @@ BUILD_CPC_Cx_RW(other, 0x10)
#define CPC_Cx_OTHER_CORENUM_SHF 16
#define CPC_Cx_OTHER_CORENUM_MSK (_ULCAST_(0xff) << 16)
+#ifdef CONFIG_MIPS_CPC
+
+/**
+ * mips_cpc_lock_other - lock access to another core
+ * core: the other core to be accessed
+ *
+ * Call before operating upon a core via the 'other' register region in
+ * order to prevent the region being moved during access. Must be followed
+ * by a call to mips_cpc_unlock_other.
+ */
+extern void mips_cpc_lock_other(unsigned int core);
+
+/**
+ * mips_cpc_unlock_other - unlock access to another core
+ *
+ * Call after operating upon another core via the 'other' register region.
+ * Must be called after mips_cpc_lock_other.
+ */
+extern void mips_cpc_unlock_other(void);
+
+#else /* !CONFIG_MIPS_CPC */
+
+static inline void mips_cpc_lock_other(unsigned int core) { }
+static inline void mips_cpc_unlock_other(void) { }
+
+#endif /* !CONFIG_MIPS_CPC */
+
#endif /* __MIPS_ASM_MIPS_CPC_H__ */
diff --git a/arch/mips/include/asm/mips_mt.h b/arch/mips/include/asm/mips_mt.h
index a3df0c3faa0e..f6ba004a7711 100644
--- a/arch/mips/include/asm/mips_mt.h
+++ b/arch/mips/include/asm/mips_mt.h
@@ -1,7 +1,6 @@
/*
- * Definitions and decalrations for MIPS MT support
- * that are common between SMTC, VSMP, and/or AP/SP
- * kernel models.
+ * Definitions and decalrations for MIPS MT support that are common between
+ * the VSMP, and AP/SP kernel models.
*/
#ifndef __ASM_MIPS_MT_H
#define __ASM_MIPS_MT_H
diff --git a/arch/mips/include/asm/mipsmtregs.h b/arch/mips/include/asm/mipsmtregs.h
index 6efa79a27b6a..5f8052ce43bf 100644
--- a/arch/mips/include/asm/mipsmtregs.h
+++ b/arch/mips/include/asm/mipsmtregs.h
@@ -36,6 +36,8 @@
#define read_c0_tcbind() __read_32bit_c0_register($2, 2)
+#define write_c0_tchalt(val) __write_32bit_c0_register($2, 4, val)
+
#define read_c0_tccontext() __read_32bit_c0_register($2, 5)
#define write_c0_tccontext(val) __write_32bit_c0_register($2, 5, val)
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 3e025b5311db..98e9754a4b6b 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -709,11 +709,18 @@
#ifndef __ASSEMBLY__
/*
- * Macros for handling the ISA mode bit for microMIPS.
+ * Macros for handling the ISA mode bit for MIPS16 and microMIPS.
*/
+#if defined(CONFIG_SYS_SUPPORTS_MIPS16) || \
+ defined(CONFIG_SYS_SUPPORTS_MICROMIPS)
#define get_isa16_mode(x) ((x) & 0x1)
#define msk_isa16_mode(x) ((x) & ~0x1)
#define set_isa16_mode(x) do { (x) |= 0x1; } while(0)
+#else
+#define get_isa16_mode(x) 0
+#define msk_isa16_mode(x) (x)
+#define set_isa16_mode(x) do { } while(0)
+#endif
/*
* microMIPS instructions can be 16-bit or 32-bit in length. This
@@ -1007,19 +1014,8 @@ do { \
#define write_c0_compare3(val) __write_32bit_c0_register($11, 7, val)
#define read_c0_status() __read_32bit_c0_register($12, 0)
-#ifdef CONFIG_MIPS_MT_SMTC
-#define write_c0_status(val) \
-do { \
- __write_32bit_c0_register($12, 0, val); \
- __ehb(); \
-} while (0)
-#else
-/*
- * Legacy non-SMTC code, which may be hazardous
- * but which might not support EHB
- */
+
#define write_c0_status(val) __write_32bit_c0_register($12, 0, val)
-#endif /* CONFIG_MIPS_MT_SMTC */
#define read_c0_cause() __read_32bit_c0_register($13, 0)
#define write_c0_cause(val) __write_32bit_c0_register($13, 0, val)
@@ -1743,11 +1739,6 @@ static inline void tlb_write_random(void)
/*
* Manipulate bits in a c0 register.
*/
-#ifndef CONFIG_MIPS_MT_SMTC
-/*
- * SMTC Linux requires shutting-down microthread scheduling
- * during CP0 register read-modify-write sequences.
- */
#define __BUILD_SET_C0(name) \
static inline unsigned int \
set_c0_##name(unsigned int set) \
@@ -1786,121 +1777,6 @@ change_c0_##name(unsigned int change, unsigned int val) \
return res; \
}
-#else /* SMTC versions that manage MT scheduling */
-
-#include <linux/irqflags.h>
-
-/*
- * This is a duplicate of dmt() in mipsmtregs.h to avoid problems with
- * header file recursion.
- */
-static inline unsigned int __dmt(void)
-{
- int res;
-
- __asm__ __volatile__(
- " .set push \n"
- " .set mips32r2 \n"
- " .set noat \n"
- " .word 0x41610BC1 # dmt $1 \n"
- " ehb \n"
- " move %0, $1 \n"
- " .set pop \n"
- : "=r" (res));
-
- instruction_hazard();
-
- return res;
-}
-
-#define __VPECONTROL_TE_SHIFT 15
-#define __VPECONTROL_TE (1UL << __VPECONTROL_TE_SHIFT)
-
-#define __EMT_ENABLE __VPECONTROL_TE
-
-static inline void __emt(unsigned int previous)
-{
- if ((previous & __EMT_ENABLE))
- __asm__ __volatile__(
- " .set mips32r2 \n"
- " .word 0x41600be1 # emt \n"
- " ehb \n"
- " .set mips0 \n");
-}
-
-static inline void __ehb(void)
-{
- __asm__ __volatile__(
- " .set mips32r2 \n"
- " ehb \n" " .set mips0 \n");
-}
-
-/*
- * Note that local_irq_save/restore affect TC-specific IXMT state,
- * not Status.IE as in non-SMTC kernel.
- */
-
-#define __BUILD_SET_C0(name) \
-static inline unsigned int \
-set_c0_##name(unsigned int set) \
-{ \
- unsigned int res; \
- unsigned int new; \
- unsigned int omt; \
- unsigned long flags; \
- \
- local_irq_save(flags); \
- omt = __dmt(); \
- res = read_c0_##name(); \
- new = res | set; \
- write_c0_##name(new); \
- __emt(omt); \
- local_irq_restore(flags); \
- \
- return res; \
-} \
- \
-static inline unsigned int \
-clear_c0_##name(unsigned int clear) \
-{ \
- unsigned int res; \
- unsigned int new; \
- unsigned int omt; \
- unsigned long flags; \
- \
- local_irq_save(flags); \
- omt = __dmt(); \
- res = read_c0_##name(); \
- new = res & ~clear; \
- write_c0_##name(new); \
- __emt(omt); \
- local_irq_restore(flags); \
- \
- return res; \
-} \
- \
-static inline unsigned int \
-change_c0_##name(unsigned int change, unsigned int newbits) \
-{ \
- unsigned int res; \
- unsigned int new; \
- unsigned int omt; \
- unsigned long flags; \
- \
- local_irq_save(flags); \
- \
- omt = __dmt(); \
- res = read_c0_##name(); \
- new = res & ~change; \
- new |= (newbits & change); \
- write_c0_##name(new); \
- __emt(omt); \
- local_irq_restore(flags); \
- \
- return res; \
-}
-#endif
-
__BUILD_SET_C0(status)
__BUILD_SET_C0(cause)
__BUILD_SET_C0(config)
@@ -1916,6 +1792,15 @@ __BUILD_SET_C0(brcm_cmt_ctrl)
__BUILD_SET_C0(brcm_config)
__BUILD_SET_C0(brcm_mode)
+/*
+ * Return low 10 bits of ebase.
+ * Note that under KVM (MIPSVZ) this returns vcpu id.
+ */
+static inline unsigned int get_ebase_cpunum(void)
+{
+ return read_c0_ebase() & 0x3ff;
+}
+
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_MIPSREGS_H */
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index e277bbad2871..2e373da5f8e9 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -18,10 +18,6 @@
#include <asm/cacheflush.h>
#include <asm/hazards.h>
#include <asm/tlbflush.h>
-#ifdef CONFIG_MIPS_MT_SMTC
-#include <asm/mipsmtregs.h>
-#include <asm/smtc.h>
-#endif /* SMTC */
#include <asm-generic/mm_hooks.h>
#define TLBMISS_HANDLER_SETUP_PGD(pgd) \
@@ -31,11 +27,15 @@ do { \
} while (0)
#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
+
+#define TLBMISS_HANDLER_RESTORE() \
+ write_c0_xcontext((unsigned long) smp_processor_id() << \
+ SMP_CPUID_REGSHIFT)
+
#define TLBMISS_HANDLER_SETUP() \
do { \
TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir); \
- write_c0_xcontext((unsigned long) smp_processor_id() << \
- SMP_CPUID_REGSHIFT); \
+ TLBMISS_HANDLER_RESTORE(); \
} while (0)
#else /* !CONFIG_MIPS_PGD_C0_CONTEXT: using pgd_current*/
@@ -47,9 +47,12 @@ do { \
*/
extern unsigned long pgd_current[];
-#define TLBMISS_HANDLER_SETUP() \
+#define TLBMISS_HANDLER_RESTORE() \
write_c0_context((unsigned long) smp_processor_id() << \
- SMP_CPUID_REGSHIFT); \
+ SMP_CPUID_REGSHIFT)
+
+#define TLBMISS_HANDLER_SETUP() \
+ TLBMISS_HANDLER_RESTORE(); \
back_to_back_c0_hazard(); \
TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
#endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/
@@ -63,13 +66,6 @@ extern unsigned long pgd_current[];
#define ASID_INC 0x10
#define ASID_MASK 0xff0
-#elif defined(CONFIG_MIPS_MT_SMTC)
-
-#define ASID_INC 0x1
-extern unsigned long smtc_asid_mask;
-#define ASID_MASK (smtc_asid_mask)
-#define HW_ASID_MASK 0xff
-/* End SMTC/34K debug hack */
#else /* FIXME: not correct for R6000 */
#define ASID_INC 0x1
@@ -92,7 +88,6 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
#define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1)))
#define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1)
-#ifndef CONFIG_MIPS_MT_SMTC
/* Normal, classic MIPS get_new_mmu_context */
static inline void
get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
@@ -115,12 +110,6 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
cpu_context(cpu, mm) = asid_cache(cpu) = asid;
}
-#else /* CONFIG_MIPS_MT_SMTC */
-
-#define get_new_mmu_context(mm, cpu) smtc_get_new_mmu_context((mm), (cpu))
-
-#endif /* CONFIG_MIPS_MT_SMTC */
-
/*
* Initialize the context related info for a new mm_struct
* instance.
@@ -141,46 +130,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
{
unsigned int cpu = smp_processor_id();
unsigned long flags;
-#ifdef CONFIG_MIPS_MT_SMTC
- unsigned long oldasid;
- unsigned long mtflags;
- int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id;
local_irq_save(flags);
- mtflags = dvpe();
-#else /* Not SMTC */
- local_irq_save(flags);
-#endif /* CONFIG_MIPS_MT_SMTC */
/* Check if our ASID is of an older version and thus invalid */
if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK)
get_new_mmu_context(next, cpu);
-#ifdef CONFIG_MIPS_MT_SMTC
- /*
- * If the EntryHi ASID being replaced happens to be
- * the value flagged at ASID recycling time as having
- * an extended life, clear the bit showing it being
- * in use by this "CPU", and if that's the last bit,
- * free up the ASID value for use and flush any old
- * instances of it from the TLB.
- */
- oldasid = (read_c0_entryhi() & ASID_MASK);
- if(smtc_live_asid[mytlb][oldasid]) {
- smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
- if(smtc_live_asid[mytlb][oldasid] == 0)
- smtc_flush_tlb_asid(oldasid);
- }
- /*
- * Tread softly on EntryHi, and so long as we support
- * having ASID_MASK smaller than the hardware maximum,
- * make sure no "soft" bits become "hard"...
- */
- write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
- cpu_asid(cpu, next));
- ehb(); /* Make sure it propagates to TCStatus */
- evpe(mtflags);
-#else
write_c0_entryhi(cpu_asid(cpu, next));
-#endif /* CONFIG_MIPS_MT_SMTC */
TLBMISS_HANDLER_SETUP_PGD(next->pgd);
/*
@@ -213,34 +168,12 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
unsigned long flags;
unsigned int cpu = smp_processor_id();
-#ifdef CONFIG_MIPS_MT_SMTC
- unsigned long oldasid;
- unsigned long mtflags;
- int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id;
-#endif /* CONFIG_MIPS_MT_SMTC */
-
local_irq_save(flags);
/* Unconditionally get a new ASID. */
get_new_mmu_context(next, cpu);
-#ifdef CONFIG_MIPS_MT_SMTC
- /* See comments for similar code above */
- mtflags = dvpe();
- oldasid = read_c0_entryhi() & ASID_MASK;
- if(smtc_live_asid[mytlb][oldasid]) {
- smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
- if(smtc_live_asid[mytlb][oldasid] == 0)
- smtc_flush_tlb_asid(oldasid);
- }
- /* See comments for similar code above */
- write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
- cpu_asid(cpu, next));
- ehb(); /* Make sure it propagates to TCStatus */
- evpe(mtflags);
-#else
write_c0_entryhi(cpu_asid(cpu, next));
-#endif /* CONFIG_MIPS_MT_SMTC */
TLBMISS_HANDLER_SETUP_PGD(next->pgd);
/* mark mmu ownership change */
@@ -258,48 +191,15 @@ static inline void
drop_mmu_context(struct mm_struct *mm, unsigned cpu)
{
unsigned long flags;
-#ifdef CONFIG_MIPS_MT_SMTC
- unsigned long oldasid;
- /* Can't use spinlock because called from TLB flush within DVPE */
- unsigned int prevvpe;
- int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id;
-#endif /* CONFIG_MIPS_MT_SMTC */
local_irq_save(flags);
if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
get_new_mmu_context(mm, cpu);
-#ifdef CONFIG_MIPS_MT_SMTC
- /* See comments for similar code above */
- prevvpe = dvpe();
- oldasid = (read_c0_entryhi() & ASID_MASK);
- if (smtc_live_asid[mytlb][oldasid]) {
- smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
- if(smtc_live_asid[mytlb][oldasid] == 0)
- smtc_flush_tlb_asid(oldasid);
- }
- /* See comments for similar code above */
- write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK)
- | cpu_asid(cpu, mm));
- ehb(); /* Make sure it propagates to TCStatus */
- evpe(prevvpe);
-#else /* not CONFIG_MIPS_MT_SMTC */
write_c0_entryhi(cpu_asid(cpu, mm));
-#endif /* CONFIG_MIPS_MT_SMTC */
} else {
/* will get a new context next time */
-#ifndef CONFIG_MIPS_MT_SMTC
cpu_context(cpu, mm) = 0;
-#else /* SMTC */
- int i;
-
- /* SMTC shares the TLB (and ASIDs) across VPEs */
- for_each_online_cpu(i) {
- if((smtc_status & SMTC_TLB_SHARED)
- || (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))
- cpu_context(i, mm) = 0;
- }
-#endif /* CONFIG_MIPS_MT_SMTC */
}
local_irq_restore(flags);
}
diff --git a/arch/mips/include/asm/module.h b/arch/mips/include/asm/module.h
index c2edae382d5d..800fe578dc99 100644
--- a/arch/mips/include/asm/module.h
+++ b/arch/mips/include/asm/module.h
@@ -144,13 +144,7 @@ search_module_dbetables(unsigned long addr)
#define MODULE_KERNEL_TYPE "64BIT "
#endif
-#ifdef CONFIG_MIPS_MT_SMTC
-#define MODULE_KERNEL_SMTC "MT_SMTC "
-#else
-#define MODULE_KERNEL_SMTC ""
-#endif
-
#define MODULE_ARCH_VERMAGIC \
- MODULE_PROC_FAMILY MODULE_KERNEL_TYPE MODULE_KERNEL_SMTC
+ MODULE_PROC_FAMILY MODULE_KERNEL_TYPE
#endif /* _ASM_MODULE_H */
diff --git a/arch/mips/include/asm/msa.h b/arch/mips/include/asm/msa.h
index a2aba6c3ec05..538f6d482db8 100644
--- a/arch/mips/include/asm/msa.h
+++ b/arch/mips/include/asm/msa.h
@@ -84,7 +84,7 @@ static inline void write_msa_##name(unsigned int val) \
__asm__ __volatile__( \
" .set push\n" \
" .set msa\n" \
- " cfcmsa $" #cs ", %0\n" \
+ " ctcmsa $" #cs ", %0\n" \
" .set pop\n" \
: : "r"(val)); \
}
@@ -96,6 +96,13 @@ static inline void write_msa_##name(unsigned int val) \
* allow compilation with toolchains that do not support MSA. Once all
* toolchains in use support MSA these can be removed.
*/
+#ifdef CONFIG_CPU_MICROMIPS
+#define CFC_MSA_INSN 0x587e0056
+#define CTC_MSA_INSN 0x583e0816
+#else
+#define CFC_MSA_INSN 0x787e0059
+#define CTC_MSA_INSN 0x783e0819
+#endif
#define __BUILD_MSA_CTL_REG(name, cs) \
static inline unsigned int read_msa_##name(void) \
@@ -104,7 +111,8 @@ static inline unsigned int read_msa_##name(void) \
__asm__ __volatile__( \
" .set push\n" \
" .set noat\n" \
- " .word 0x787e0059 | (" #cs " << 11)\n" \
+ " .insn\n" \
+ " .word #CFC_MSA_INSN | (" #cs " << 11)\n" \
" move %0, $1\n" \
" .set pop\n" \
: "=r"(reg)); \
@@ -117,7 +125,8 @@ static inline void write_msa_##name(unsigned int val) \
" .set push\n" \
" .set noat\n" \
" move $1, %0\n" \
- " .word 0x783e0819 | (" #cs " << 6)\n" \
+ " .insn\n" \
+ " .word #CTC_MSA_INSN | (" #cs " << 6)\n" \
" .set pop\n" \
: : "r"(val)); \
}
diff --git a/arch/mips/include/asm/netlogic/mips-extns.h b/arch/mips/include/asm/netlogic/mips-extns.h
index de9aada6f4c1..06f1f75bfa9b 100644
--- a/arch/mips/include/asm/netlogic/mips-extns.h
+++ b/arch/mips/include/asm/netlogic/mips-extns.h
@@ -146,9 +146,10 @@ static inline int hard_smp_processor_id(void)
static inline int nlm_nodeid(void)
{
- uint32_t prid = read_c0_prid();
+ uint32_t prid = read_c0_prid() & PRID_IMP_MASK;
- if ((prid & 0xff00) == PRID_IMP_NETLOGIC_XLP9XX)
+ if ((prid == PRID_IMP_NETLOGIC_XLP9XX) ||
+ (prid == PRID_IMP_NETLOGIC_XLP5XX))
return (__read_32bit_c0_register($15, 1) >> 7) & 0x7;
else
return (__read_32bit_c0_register($15, 1) >> 5) & 0x3;
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/iomap.h b/arch/mips/include/asm/netlogic/xlp-hal/iomap.h
index 1f23dfaa7167..805bfd21f33e 100644
--- a/arch/mips/include/asm/netlogic/xlp-hal/iomap.h
+++ b/arch/mips/include/asm/netlogic/xlp-hal/iomap.h
@@ -74,6 +74,8 @@
#define XLP_IO_USB_OHCI2_OFFSET(node) XLP_HDR_OFFSET(node, 0, 2, 4)
#define XLP_IO_USB_OHCI3_OFFSET(node) XLP_HDR_OFFSET(node, 0, 2, 5)
+#define XLP_IO_SATA_OFFSET(node) XLP_HDR_OFFSET(node, 0, 3, 2)
+
/* XLP2xx has an updated USB block */
#define XLP2XX_IO_USB_OFFSET(node, i) XLP_HDR_OFFSET(node, 0, 4, i)
#define XLP2XX_IO_USB_XHCI0_OFFSET(node) XLP_HDR_OFFSET(node, 0, 4, 1)
@@ -103,13 +105,11 @@
#define XLP_IO_SYS_OFFSET(node) XLP_HDR_OFFSET(node, 0, 6, 5)
#define XLP_IO_JTAG_OFFSET(node) XLP_HDR_OFFSET(node, 0, 6, 6)
+/* Flash */
#define XLP_IO_NOR_OFFSET(node) XLP_HDR_OFFSET(node, 0, 7, 0)
#define XLP_IO_NAND_OFFSET(node) XLP_HDR_OFFSET(node, 0, 7, 1)
#define XLP_IO_SPI_OFFSET(node) XLP_HDR_OFFSET(node, 0, 7, 2)
-/* SD flash */
-#define XLP_IO_SD_OFFSET(node) XLP_HDR_OFFSET(node, 0, 7, 3)
-#define XLP_IO_MMC_OFFSET(node, slot) \
- ((XLP_IO_SD_OFFSET(node))+(slot*0x100)+XLP_IO_PCI_HDRSZ)
+#define XLP_IO_MMC_OFFSET(node) XLP_HDR_OFFSET(node, 0, 7, 3)
/* Things have changed drastically in XLP 9XX */
#define XLP9XX_HDR_OFFSET(n, d, f) \
@@ -120,6 +120,8 @@
#define XLP9XX_IO_UART_OFFSET(node) XLP9XX_HDR_OFFSET(node, 2, 2)
#define XLP9XX_IO_SYS_OFFSET(node) XLP9XX_HDR_OFFSET(node, 6, 0)
#define XLP9XX_IO_FUSE_OFFSET(node) XLP9XX_HDR_OFFSET(node, 6, 1)
+#define XLP9XX_IO_CLOCK_OFFSET(node) XLP9XX_HDR_OFFSET(node, 6, 2)
+#define XLP9XX_IO_POWER_OFFSET(node) XLP9XX_HDR_OFFSET(node, 6, 3)
#define XLP9XX_IO_JTAG_OFFSET(node) XLP9XX_HDR_OFFSET(node, 6, 4)
#define XLP9XX_IO_PCIE_OFFSET(node, i) XLP9XX_HDR_OFFSET(node, 1, i)
@@ -135,11 +137,11 @@
/* XLP9XX on-chip SATA controller */
#define XLP9XX_IO_SATA_OFFSET(node) XLP9XX_HDR_OFFSET(node, 3, 2)
+/* Flash */
#define XLP9XX_IO_NOR_OFFSET(node) XLP9XX_HDR_OFFSET(node, 7, 0)
#define XLP9XX_IO_NAND_OFFSET(node) XLP9XX_HDR_OFFSET(node, 7, 1)
#define XLP9XX_IO_SPI_OFFSET(node) XLP9XX_HDR_OFFSET(node, 7, 2)
-/* SD flash */
-#define XLP9XX_IO_MMCSD_OFFSET(node) XLP9XX_HDR_OFFSET(node, 7, 3)
+#define XLP9XX_IO_MMC_OFFSET(node) XLP9XX_HDR_OFFSET(node, 7, 3)
/* PCI config header register id's */
#define XLP_PCI_CFGREG0 0x00
@@ -186,8 +188,10 @@
#define PCI_DEVICE_ID_NLM_NOR 0x1015
#define PCI_DEVICE_ID_NLM_NAND 0x1016
#define PCI_DEVICE_ID_NLM_MMC 0x1018
-#define PCI_DEVICE_ID_NLM_XHCI 0x101d
+#define PCI_DEVICE_ID_NLM_SATA 0x101A
+#define PCI_DEVICE_ID_NLM_XHCI 0x101D
+#define PCI_DEVICE_ID_XLP9XX_MMC 0x9018
#define PCI_DEVICE_ID_XLP9XX_SATA 0x901A
#define PCI_DEVICE_ID_XLP9XX_XHCI 0x901D
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/pcibus.h b/arch/mips/include/asm/netlogic/xlp-hal/pcibus.h
index d4deb87ad069..91540f41e1e4 100644
--- a/arch/mips/include/asm/netlogic/xlp-hal/pcibus.h
+++ b/arch/mips/include/asm/netlogic/xlp-hal/pcibus.h
@@ -69,6 +69,20 @@
#define PCIE_9XX_BYTE_SWAP_IO_BASE 0x25e
#define PCIE_9XX_BYTE_SWAP_IO_LIM 0x25f
+#define PCIE_9XX_BRIDGE_MSIX_ADDR_BASE 0x264
+#define PCIE_9XX_BRIDGE_MSIX_ADDR_LIMIT 0x265
+#define PCIE_9XX_MSI_STATUS 0x283
+#define PCIE_9XX_MSI_EN 0x284
+/* 128 MSIX vectors available in 9xx */
+#define PCIE_9XX_MSIX_STATUS0 0x286
+#define PCIE_9XX_MSIX_STATUSX(n) (n + 0x286)
+#define PCIE_9XX_MSIX_VEC 0x296
+#define PCIE_9XX_MSIX_VECX(n) (n + 0x296)
+#define PCIE_9XX_INT_STATUS0 0x397
+#define PCIE_9XX_INT_STATUS1 0x398
+#define PCIE_9XX_INT_EN0 0x399
+#define PCIE_9XX_INT_EN1 0x39a
+
/* other */
#define PCIE_NLINKS 4
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/pic.h b/arch/mips/include/asm/netlogic/xlp-hal/pic.h
index f10bf3bba58f..41cefe94f0c9 100644
--- a/arch/mips/include/asm/netlogic/xlp-hal/pic.h
+++ b/arch/mips/include/asm/netlogic/xlp-hal/pic.h
@@ -199,6 +199,10 @@
#define PIC_IRT_PCIE_LINK_3_INDEX 81
#define PIC_IRT_PCIE_LINK_INDEX(num) ((num) + PIC_IRT_PCIE_LINK_0_INDEX)
+#define PIC_9XX_IRT_PCIE_LINK_0_INDEX 191
+#define PIC_9XX_IRT_PCIE_LINK_INDEX(num) \
+ ((num) + PIC_9XX_IRT_PCIE_LINK_0_INDEX)
+
#define PIC_CLOCK_TIMER 7
#if !defined(LOCORE) && !defined(__ASSEMBLY__)
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/sys.h b/arch/mips/include/asm/netlogic/xlp-hal/sys.h
index d9b107ffca93..bc7bddf25be9 100644
--- a/arch/mips/include/asm/netlogic/xlp-hal/sys.h
+++ b/arch/mips/include/asm/netlogic/xlp-hal/sys.h
@@ -118,6 +118,10 @@
#define SYS_SCRTCH3 0x4c
/* PLL registers XLP2XX */
+#define SYS_CPU_PLL_CTRL0(core) (0x1c0 + (core * 4))
+#define SYS_CPU_PLL_CTRL1(core) (0x1c1 + (core * 4))
+#define SYS_CPU_PLL_CTRL2(core) (0x1c2 + (core * 4))
+#define SYS_CPU_PLL_CTRL3(core) (0x1c3 + (core * 4))
#define SYS_PLL_CTRL0 0x240
#define SYS_PLL_CTRL1 0x241
#define SYS_PLL_CTRL2 0x242
@@ -147,6 +151,32 @@
#define SYS_SYS_PLL_MEM_REQ 0x2a3
#define SYS_PLL_MEM_STAT 0x2a4
+/* PLL registers XLP9XX */
+#define SYS_9XX_CPU_PLL_CTRL0(core) (0xc0 + (core * 4))
+#define SYS_9XX_CPU_PLL_CTRL1(core) (0xc1 + (core * 4))
+#define SYS_9XX_CPU_PLL_CTRL2(core) (0xc2 + (core * 4))
+#define SYS_9XX_CPU_PLL_CTRL3(core) (0xc3 + (core * 4))
+#define SYS_9XX_DMC_PLL_CTRL0 0x140
+#define SYS_9XX_DMC_PLL_CTRL1 0x141
+#define SYS_9XX_DMC_PLL_CTRL2 0x142
+#define SYS_9XX_DMC_PLL_CTRL3 0x143
+#define SYS_9XX_PLL_CTRL0 0x144
+#define SYS_9XX_PLL_CTRL1 0x145
+#define SYS_9XX_PLL_CTRL2 0x146
+#define SYS_9XX_PLL_CTRL3 0x147
+
+#define SYS_9XX_PLL_CTRL0_DEVX(x) (0x148 + (x) * 4)
+#define SYS_9XX_PLL_CTRL1_DEVX(x) (0x149 + (x) * 4)
+#define SYS_9XX_PLL_CTRL2_DEVX(x) (0x14a + (x) * 4)
+#define SYS_9XX_PLL_CTRL3_DEVX(x) (0x14b + (x) * 4)
+
+#define SYS_9XX_CPU_PLL_CHG_CTRL 0x188
+#define SYS_9XX_PLL_CHG_CTRL 0x189
+#define SYS_9XX_CLK_DEV_DIS 0x18a
+#define SYS_9XX_CLK_DEV_SEL 0x18b
+#define SYS_9XX_CLK_DEV_DIV 0x18d
+#define SYS_9XX_CLK_DEV_CHG 0x18f
+
/* Registers changed on 9XX */
#define SYS_9XX_POWER_ON_RESET_CFG 0x00
#define SYS_9XX_CHIP_RESET 0x01
@@ -170,6 +200,11 @@
#define nlm_get_fuse_regbase(node) \
(nlm_get_fuse_pcibase(node) + XLP_IO_PCI_HDRSZ)
+#define nlm_get_clock_pcibase(node) \
+ nlm_pcicfg_base(XLP9XX_IO_CLOCK_OFFSET(node))
+#define nlm_get_clock_regbase(node) \
+ (nlm_get_clock_pcibase(node) + XLP_IO_PCI_HDRSZ)
+
unsigned int nlm_get_pic_frequency(int node);
#endif
#endif
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/xlp.h b/arch/mips/include/asm/netlogic/xlp-hal/xlp.h
index 2b0c9599ebe5..a862b93223cc 100644
--- a/arch/mips/include/asm/netlogic/xlp-hal/xlp.h
+++ b/arch/mips/include/asm/netlogic/xlp-hal/xlp.h
@@ -58,6 +58,10 @@
#define PIC_I2C_1_IRQ 31
#define PIC_I2C_2_IRQ 32
#define PIC_I2C_3_IRQ 33
+#define PIC_SPI_IRQ 34
+#define PIC_NAND_IRQ 37
+#define PIC_SATA_IRQ 38
+#define PIC_GPIO_IRQ 39
#define PIC_PCIE_LINK_MSI_IRQ_BASE 44 /* 44 - 47 MSI IRQ */
#define PIC_PCIE_LINK_MSI_IRQ(i) (44 + (i))
@@ -66,8 +70,9 @@
#define PIC_PCIE_MSIX_IRQ_BASE 48 /* 48 - 51 MSI-X IRQ */
#define PIC_PCIE_MSIX_IRQ(i) (48 + (i))
-#define NLM_MSIX_VEC_BASE 96 /* 96 - 127 - MSIX mapped */
-#define NLM_MSI_VEC_BASE 128 /* 128 -255 - MSI mapped */
+/* XLP9xx and XLP8xx has 128 and 32 MSIX vectors respectively */
+#define NLM_MSIX_VEC_BASE 96 /* 96 - 223 - MSIX mapped */
+#define NLM_MSI_VEC_BASE 224 /* 224 -351 - MSI mapped */
#define NLM_PIC_INDIRECT_VEC_BASE 512
#define NLM_GPIO_VEC_BASE 768
@@ -95,17 +100,19 @@ void *xlp_dt_init(void *fdtp);
static inline int cpu_is_xlpii(void)
{
- int chip = read_c0_prid() & 0xff00;
+ int chip = read_c0_prid() & PRID_IMP_MASK;
return chip == PRID_IMP_NETLOGIC_XLP2XX ||
- chip == PRID_IMP_NETLOGIC_XLP9XX;
+ chip == PRID_IMP_NETLOGIC_XLP9XX ||
+ chip == PRID_IMP_NETLOGIC_XLP5XX;
}
static inline int cpu_is_xlp9xx(void)
{
- int chip = read_c0_prid() & 0xff00;
+ int chip = read_c0_prid() & PRID_IMP_MASK;
- return chip == PRID_IMP_NETLOGIC_XLP9XX;
+ return chip == PRID_IMP_NETLOGIC_XLP9XX ||
+ chip == PRID_IMP_NETLOGIC_XLP5XX;
}
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_NLM_XLP_H */
diff --git a/arch/mips/include/asm/nile4.h b/arch/mips/include/asm/nile4.h
index 2e2436d0e94e..99e97f8bfbca 100644
--- a/arch/mips/include/asm/nile4.h
+++ b/arch/mips/include/asm/nile4.h
@@ -1,7 +1,7 @@
/*
* asm-mips/nile4.h -- NEC Vrc-5074 Nile 4 definitions
*
- * Copyright (C) 2000 Geert Uytterhoeven <geert@sonycom.com>
+ * Copyright (C) 2000 Geert Uytterhoeven <geert@linux-m68k.org>
* Sony Software Development Center Europe (SDCE), Brussels
*
* This file is based on the following documentation:
diff --git a/arch/mips/include/asm/octeon/octeon.h b/arch/mips/include/asm/octeon/octeon.h
index f5d77b91537f..d781f9e66884 100644
--- a/arch/mips/include/asm/octeon/octeon.h
+++ b/arch/mips/include/asm/octeon/octeon.h
@@ -211,7 +211,6 @@ union octeon_cvmemctl {
extern void octeon_write_lcd(const char *s);
extern void octeon_check_cpu_bist(void);
-extern int octeon_get_boot_debug_flag(void);
extern int octeon_get_boot_uart(void);
struct uart_port;
diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h
index 12d6842962be..974b0e308963 100644
--- a/arch/mips/include/asm/pci.h
+++ b/arch/mips/include/asm/pci.h
@@ -73,11 +73,6 @@ extern unsigned long PCIBIOS_MIN_MEM;
extern void pcibios_set_master(struct pci_dev *dev);
-static inline void pcibios_penalize_isa_irq(int irq, int active)
-{
- /* We don't do dynamic PCI IRQ allocation */
-}
-
#define HAVE_PCI_MMAP
extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 008324d1c261..539ddd148bbb 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -32,6 +32,8 @@ struct vm_area_struct;
_page_cachable_default)
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
_PAGE_GLOBAL | _page_cachable_default)
+#define PAGE_KERNEL_NC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
+ _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT)
#define PAGE_USERIO __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | _PAGE_WRITE | \
_page_cachable_default)
#define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
diff --git a/arch/mips/include/asm/pm-cps.h b/arch/mips/include/asm/pm-cps.h
new file mode 100644
index 000000000000..625eda53d571
--- /dev/null
+++ b/arch/mips/include/asm/pm-cps.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2014 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __MIPS_ASM_PM_CPS_H__
+#define __MIPS_ASM_PM_CPS_H__
+
+/*
+ * The CM & CPC can only handle coherence & power control on a per-core basis,
+ * thus in an MT system the VPEs within each core are coupled and can only
+ * enter or exit states requiring CM or CPC assistance in unison.
+ */
+#ifdef CONFIG_MIPS_MT
+# define coupled_coherence cpu_has_mipsmt
+#else
+# define coupled_coherence 0
+#endif
+
+/* Enumeration of possible PM states */
+enum cps_pm_state {
+ CPS_PM_NC_WAIT, /* MIPS wait instruction, non-coherent */
+ CPS_PM_CLOCK_GATED, /* Core clock gated */
+ CPS_PM_POWER_GATED, /* Core power gated */
+ CPS_PM_STATE_COUNT,
+};
+
+/**
+ * cps_pm_support_state - determine whether the system supports a PM state
+ * @state: the state to test for support
+ *
+ * Returns true if the system supports the given state, otherwise false.
+ */
+extern bool cps_pm_support_state(enum cps_pm_state state);
+
+/**
+ * cps_pm_enter_state - enter a PM state
+ * @state: the state to enter
+ *
+ * Enter the given PM state. If coupled_coherence is non-zero then it is
+ * expected that this function be called at approximately the same time on
+ * each coupled CPU. Returns 0 on successful entry & exit, otherwise -errno.
+ */
+extern int cps_pm_enter_state(enum cps_pm_state state);
+
+#endif /* __MIPS_ASM_PM_CPS_H__ */
diff --git a/arch/mips/include/asm/pm.h b/arch/mips/include/asm/pm.h
new file mode 100644
index 000000000000..7c03469e043f
--- /dev/null
+++ b/arch/mips/include/asm/pm.h
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2014 Imagination Technologies Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * PM helper macros for CPU power off (e.g. Suspend-to-RAM).
+ */
+
+#ifndef __ASM_PM_H
+#define __ASM_PM_H
+
+#ifdef __ASSEMBLY__
+
+#include <asm/asm-offsets.h>
+#include <asm/asm.h>
+#include <asm/mipsregs.h>
+#include <asm/regdef.h>
+
+/* Save CPU state to stack for suspend to RAM */
+.macro SUSPEND_SAVE_REGS
+ subu sp, PT_SIZE
+ /* Call preserved GPRs */
+ LONG_S $16, PT_R16(sp)
+ LONG_S $17, PT_R17(sp)
+ LONG_S $18, PT_R18(sp)
+ LONG_S $19, PT_R19(sp)
+ LONG_S $20, PT_R20(sp)
+ LONG_S $21, PT_R21(sp)
+ LONG_S $22, PT_R22(sp)
+ LONG_S $23, PT_R23(sp)
+ LONG_S $28, PT_R28(sp)
+ LONG_S $30, PT_R30(sp)
+ LONG_S $31, PT_R31(sp)
+ /* A couple of CP0 registers with space in pt_regs */
+ mfc0 k0, CP0_STATUS
+ LONG_S k0, PT_STATUS(sp)
+.endm
+
+/* Restore CPU state from stack after resume from RAM */
+.macro RESUME_RESTORE_REGS_RETURN
+ .set push
+ .set noreorder
+ /* A couple of CP0 registers with space in pt_regs */
+ LONG_L k0, PT_STATUS(sp)
+ mtc0 k0, CP0_STATUS
+ /* Call preserved GPRs */
+ LONG_L $16, PT_R16(sp)
+ LONG_L $17, PT_R17(sp)
+ LONG_L $18, PT_R18(sp)
+ LONG_L $19, PT_R19(sp)
+ LONG_L $20, PT_R20(sp)
+ LONG_L $21, PT_R21(sp)
+ LONG_L $22, PT_R22(sp)
+ LONG_L $23, PT_R23(sp)
+ LONG_L $28, PT_R28(sp)
+ LONG_L $30, PT_R30(sp)
+ LONG_L $31, PT_R31(sp)
+ /* Pop and return */
+ jr ra
+ addiu sp, PT_SIZE
+ .set pop
+.endm
+
+/* Get address of static suspend state into t1 */
+.macro LA_STATIC_SUSPEND
+ la t1, mips_static_suspend_state
+.endm
+
+/* Save important CPU state for early restoration to global data */
+.macro SUSPEND_SAVE_STATIC
+#ifdef CONFIG_EVA
+ /*
+ * Segment configuration is saved in global data where it can be easily
+ * reloaded without depending on the segment configuration.
+ */
+ mfc0 k0, CP0_PAGEMASK, 2 /* SegCtl0 */
+ LONG_S k0, SSS_SEGCTL0(t1)
+ mfc0 k0, CP0_PAGEMASK, 3 /* SegCtl1 */
+ LONG_S k0, SSS_SEGCTL1(t1)
+ mfc0 k0, CP0_PAGEMASK, 4 /* SegCtl2 */
+ LONG_S k0, SSS_SEGCTL2(t1)
+#endif
+ /* save stack pointer (pointing to GPRs) */
+ LONG_S sp, SSS_SP(t1)
+.endm
+
+/* Restore important CPU state early from global data */
+.macro RESUME_RESTORE_STATIC
+#ifdef CONFIG_EVA
+ /*
+ * Segment configuration must be restored prior to any access to
+ * allocated memory, as it may reside outside of the legacy kernel
+ * segments.
+ */
+ LONG_L k0, SSS_SEGCTL0(t1)
+ mtc0 k0, CP0_PAGEMASK, 2 /* SegCtl0 */
+ LONG_L k0, SSS_SEGCTL1(t1)
+ mtc0 k0, CP0_PAGEMASK, 3 /* SegCtl1 */
+ LONG_L k0, SSS_SEGCTL2(t1)
+ mtc0 k0, CP0_PAGEMASK, 4 /* SegCtl2 */
+ tlbw_use_hazard
+#endif
+ /* restore stack pointer (pointing to GPRs) */
+ LONG_L sp, SSS_SP(t1)
+.endm
+
+/* flush caches to make sure context has reached memory */
+.macro SUSPEND_CACHE_FLUSH
+ .extern __wback_cache_all
+ .set push
+ .set noreorder
+ la t1, __wback_cache_all
+ LONG_L t0, 0(t1)
+ jalr t0
+ nop
+ .set pop
+ .endm
+
+/* Save suspend state and flush data caches to RAM */
+.macro SUSPEND_SAVE
+ SUSPEND_SAVE_REGS
+ LA_STATIC_SUSPEND
+ SUSPEND_SAVE_STATIC
+ SUSPEND_CACHE_FLUSH
+.endm
+
+/* Restore saved state after resume from RAM and return */
+.macro RESUME_RESTORE_RETURN
+ LA_STATIC_SUSPEND
+ RESUME_RESTORE_STATIC
+ RESUME_RESTORE_REGS_RETURN
+.endm
+
+#else /* __ASSEMBLY__ */
+
+/**
+ * struct mips_static_suspend_state - Core saved CPU state across S2R.
+ * @segctl: CP0 Segment control registers.
+ * @sp: Stack frame where GP register context is saved.
+ *
+ * This structure contains minimal CPU state that must be saved in static kernel
+ * data in order to be able to restore the rest of the state. This includes
+ * segmentation configuration in the case of EVA being enabled, as they must be
+ * restored prior to any kmalloc'd memory being referenced (even the stack
+ * pointer).
+ */
+struct mips_static_suspend_state {
+#ifdef CONFIG_EVA
+ unsigned long segctl[3];
+#endif
+ unsigned long sp;
+};
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_PM_HELPERS_H */
diff --git a/arch/mips/include/asm/prom.h b/arch/mips/include/asm/prom.h
index ccd2b75f152c..a9494c0141fb 100644
--- a/arch/mips/include/asm/prom.h
+++ b/arch/mips/include/asm/prom.h
@@ -21,13 +21,13 @@ extern void device_tree_init(void);
struct boot_param_header;
-extern void __dt_setup_arch(struct boot_param_header *bph);
+extern void __dt_setup_arch(void *bph);
#define dt_setup_arch(sym) \
({ \
- extern struct boot_param_header __dtb_##sym##_begin; \
+ extern char __dtb_##sym##_begin[]; \
\
- __dt_setup_arch(&__dtb_##sym##_begin); \
+ __dt_setup_arch(__dtb_##sym##_begin); \
})
#else /* CONFIG_OF */
diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
index bf1ac8d35783..7e6e682aece3 100644
--- a/arch/mips/include/asm/ptrace.h
+++ b/arch/mips/include/asm/ptrace.h
@@ -39,9 +39,6 @@ struct pt_regs {
unsigned long cp0_badvaddr;
unsigned long cp0_cause;
unsigned long cp0_epc;
-#ifdef CONFIG_MIPS_MT_SMTC
- unsigned long cp0_tcstatus;
-#endif /* CONFIG_MIPS_MT_SMTC */
#ifdef CONFIG_CPU_CAVIUM_OCTEON
unsigned long long mpl[3]; /* MTM{0,1,2} */
unsigned long long mtp[3]; /* MTP{0,1,2} */
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
index ca64cbe44493..0b8bd28a0df1 100644
--- a/arch/mips/include/asm/r4kcache.h
+++ b/arch/mips/include/asm/r4kcache.h
@@ -43,11 +43,10 @@
: "i" (op), "R" (*(unsigned char *)(addr)))
#ifdef CONFIG_MIPS_MT
+
/*
- * Temporary hacks for SMTC debug. Optionally force single-threaded
- * execution during I-cache flushes.
+ * Optionally force single-threaded execution during I-cache flushes.
*/
-
#define PROTECT_CACHE_FLUSHES 1
#ifdef PROTECT_CACHE_FLUSHES
@@ -524,6 +523,8 @@ __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32,
__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, )
__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, )
__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
+__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 128, )
+__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 128, )
__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
diff --git a/arch/mips/include/asm/sgi/ip22.h b/arch/mips/include/asm/sgi/ip22.h
index 8db1a3588cf2..87ec9eaa04e3 100644
--- a/arch/mips/include/asm/sgi/ip22.h
+++ b/arch/mips/include/asm/sgi/ip22.h
@@ -69,6 +69,8 @@
#define SGI_EISA_IRQ SGINT_LOCAL2 + 3 /* EISA interrupts */
#define SGI_KEYBD_IRQ SGINT_LOCAL2 + 4 /* keyboard */
#define SGI_SERIAL_IRQ SGINT_LOCAL2 + 5 /* onboard serial */
+#define SGI_GIOEXP0_IRQ (SGINT_LOCAL2 + 6) /* Indy GIO EXP0 */
+#define SGI_GIOEXP1_IRQ (SGINT_LOCAL2 + 7) /* Indy GIO EXP1 */
#define ip22_is_fullhouse() (sgioc->sysid & SGIOC_SYSID_FULLHOUSE)
diff --git a/arch/mips/include/asm/sigcontext.h b/arch/mips/include/asm/sigcontext.h
index f54bdbe85c0d..eeeb0f48c767 100644
--- a/arch/mips/include/asm/sigcontext.h
+++ b/arch/mips/include/asm/sigcontext.h
@@ -32,8 +32,6 @@ struct sigcontext32 {
__u32 sc_lo2;
__u32 sc_hi3;
__u32 sc_lo3;
- __u64 sc_msaregs[32]; /* Most significant 64 bits */
- __u32 sc_msa_csr;
};
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */
#endif /* _ASM_SIGCONTEXT_H */
diff --git a/arch/mips/include/asm/smp-cps.h b/arch/mips/include/asm/smp-cps.h
index d60d1a2180d1..a06a08a9afc6 100644
--- a/arch/mips/include/asm/smp-cps.h
+++ b/arch/mips/include/asm/smp-cps.h
@@ -13,17 +13,28 @@
#ifndef __ASSEMBLY__
-struct boot_config {
- unsigned int core;
- unsigned int vpe;
+struct vpe_boot_config {
unsigned long pc;
unsigned long sp;
unsigned long gp;
};
-extern struct boot_config mips_cps_bootcfg;
+struct core_boot_config {
+ atomic_t vpe_mask;
+ struct vpe_boot_config *vpe_config;
+};
+
+extern struct core_boot_config *mips_cps_core_bootcfg;
extern void mips_cps_core_entry(void);
+extern void mips_cps_core_init(void);
+
+extern struct vpe_boot_config *mips_cps_boot_vpes(void);
+
+extern bool mips_cps_smp_in_use(void);
+
+extern void mips_cps_pm_save(void);
+extern void mips_cps_pm_restore(void);
#else /* __ASSEMBLY__ */
diff --git a/arch/mips/include/asm/smp-ops.h b/arch/mips/include/asm/smp-ops.h
index 73d35b18fb64..6ba1fb8b11e2 100644
--- a/arch/mips/include/asm/smp-ops.h
+++ b/arch/mips/include/asm/smp-ops.h
@@ -26,7 +26,6 @@ struct plat_smp_ops {
void (*send_ipi_mask)(const struct cpumask *mask, unsigned int action);
void (*init_secondary)(void);
void (*smp_finish)(void);
- void (*cpus_done)(void);
void (*boot_secondary)(int cpu, struct task_struct *idle);
void (*smp_setup)(void);
void (*prepare_cpus)(unsigned int max_cpus);
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index efa02acd3dd5..b037334fca22 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -46,6 +46,9 @@ extern int __cpu_logical_map[NR_CPUS];
extern volatile cpumask_t cpu_callin_map;
+/* Mask of CPUs which are currently definitely operating coherently */
+extern cpumask_t cpu_coherent_mask;
+
extern void asmlinkage smp_bootstrap(void);
/*
diff --git a/arch/mips/include/asm/smtc.h b/arch/mips/include/asm/smtc.h
deleted file mode 100644
index e56b439b7871..000000000000
--- a/arch/mips/include/asm/smtc.h
+++ /dev/null
@@ -1,78 +0,0 @@
-#ifndef _ASM_SMTC_MT_H
-#define _ASM_SMTC_MT_H
-
-/*
- * Definitions for SMTC multitasking on MIPS MT cores
- */
-
-#include <asm/mips_mt.h>
-#include <asm/smtc_ipi.h>
-
-/*
- * System-wide SMTC status information
- */
-
-extern unsigned int smtc_status;
-
-#define SMTC_TLB_SHARED 0x00000001
-#define SMTC_MTC_ACTIVE 0x00000002
-
-/*
- * TLB/ASID Management information
- */
-
-#define MAX_SMTC_TLBS 2
-#define MAX_SMTC_ASIDS 256
-#if NR_CPUS <= 8
-typedef char asiduse;
-#else
-#if NR_CPUS <= 16
-typedef short asiduse;
-#else
-typedef long asiduse;
-#endif
-#endif
-
-/*
- * VPE Management information
- */
-
-#define MAX_SMTC_VPES MAX_SMTC_TLBS /* FIXME: May not always be true. */
-
-extern asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
-
-struct mm_struct;
-struct task_struct;
-
-void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu);
-void self_ipi(struct smtc_ipi *);
-void smtc_flush_tlb_asid(unsigned long asid);
-extern int smtc_build_cpu_map(int startslot);
-extern void smtc_prepare_cpus(int cpus);
-extern void smtc_smp_finish(void);
-extern void smtc_boot_secondary(int cpu, struct task_struct *t);
-extern void smtc_cpus_done(void);
-extern void smtc_init_secondary(void);
-
-
-/*
- * Sharing the TLB between multiple VPEs means that the
- * "random" index selection function is not allowed to
- * select the current value of the Index register. To
- * avoid additional TLB pressure, the Index registers
- * are "parked" with an non-Valid value.
- */
-
-#define PARKED_INDEX ((unsigned int)0x80000000)
-
-/*
- * Define low-level interrupt mask for IPIs, if necessary.
- * By default, use SW interrupt 1, which requires no external
- * hardware support, but which works only for single-core
- * MIPS MT systems.
- */
-#ifndef MIPS_CPU_IPI_IRQ
-#define MIPS_CPU_IPI_IRQ 1
-#endif
-
-#endif /* _ASM_SMTC_MT_H */
diff --git a/arch/mips/include/asm/smtc_ipi.h b/arch/mips/include/asm/smtc_ipi.h
deleted file mode 100644
index 15278dbd7e79..000000000000
--- a/arch/mips/include/asm/smtc_ipi.h
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * Definitions used in MIPS MT SMTC "Interprocessor Interrupt" code.
- */
-#ifndef __ASM_SMTC_IPI_H
-#define __ASM_SMTC_IPI_H
-
-#include <linux/spinlock.h>
-
-//#define SMTC_IPI_DEBUG
-
-#ifdef SMTC_IPI_DEBUG
-#include <asm/mipsregs.h>
-#include <asm/mipsmtregs.h>
-#endif /* SMTC_IPI_DEBUG */
-
-/*
- * An IPI "message"
- */
-
-struct smtc_ipi {
- struct smtc_ipi *flink;
- int type;
- void *arg;
- int dest;
-#ifdef SMTC_IPI_DEBUG
- int sender;
- long stamp;
-#endif /* SMTC_IPI_DEBUG */
-};
-
-/*
- * Defined IPI Types
- */
-
-#define LINUX_SMP_IPI 1
-#define SMTC_CLOCK_TICK 2
-#define IRQ_AFFINITY_IPI 3
-
-/*
- * A queue of IPI messages
- */
-
-struct smtc_ipi_q {
- struct smtc_ipi *head;
- spinlock_t lock;
- struct smtc_ipi *tail;
- int depth;
- int resched_flag; /* reschedule already queued */
-};
-
-static inline void smtc_ipi_nq(struct smtc_ipi_q *q, struct smtc_ipi *p)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&q->lock, flags);
- if (q->head == NULL)
- q->head = q->tail = p;
- else
- q->tail->flink = p;
- p->flink = NULL;
- q->tail = p;
- q->depth++;
-#ifdef SMTC_IPI_DEBUG
- p->sender = read_c0_tcbind();
- p->stamp = read_c0_count();
-#endif /* SMTC_IPI_DEBUG */
- spin_unlock_irqrestore(&q->lock, flags);
-}
-
-static inline struct smtc_ipi *__smtc_ipi_dq(struct smtc_ipi_q *q)
-{
- struct smtc_ipi *p;
-
- if (q->head == NULL)
- p = NULL;
- else {
- p = q->head;
- q->head = q->head->flink;
- q->depth--;
- /* Arguably unnecessary, but leaves queue cleaner */
- if (q->head == NULL)
- q->tail = NULL;
- }
-
- return p;
-}
-
-static inline struct smtc_ipi *smtc_ipi_dq(struct smtc_ipi_q *q)
-{
- unsigned long flags;
- struct smtc_ipi *p;
-
- spin_lock_irqsave(&q->lock, flags);
- p = __smtc_ipi_dq(q);
- spin_unlock_irqrestore(&q->lock, flags);
-
- return p;
-}
-
-static inline void smtc_ipi_req(struct smtc_ipi_q *q, struct smtc_ipi *p)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&q->lock, flags);
- if (q->head == NULL) {
- q->head = q->tail = p;
- p->flink = NULL;
- } else {
- p->flink = q->head;
- q->head = p;
- }
- q->depth++;
- spin_unlock_irqrestore(&q->lock, flags);
-}
-
-static inline int smtc_ipi_qdepth(struct smtc_ipi_q *q)
-{
- unsigned long flags;
- int retval;
-
- spin_lock_irqsave(&q->lock, flags);
- retval = q->depth;
- spin_unlock_irqrestore(&q->lock, flags);
- return retval;
-}
-
-extern void smtc_send_ipi(int cpu, int type, unsigned int action);
-
-#endif /* __ASM_SMTC_IPI_H */
diff --git a/arch/mips/include/asm/smtc_proc.h b/arch/mips/include/asm/smtc_proc.h
deleted file mode 100644
index 25da651f1f5f..000000000000
--- a/arch/mips/include/asm/smtc_proc.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Definitions for SMTC /proc entries
- * Copyright(C) 2005 MIPS Technologies Inc.
- */
-#ifndef __ASM_SMTC_PROC_H
-#define __ASM_SMTC_PROC_H
-
-/*
- * per-"CPU" statistics
- */
-
-struct smtc_cpu_proc {
- unsigned long timerints;
- unsigned long selfipis;
-};
-
-extern struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS];
-
-/* Count of number of recoveries of "stolen" FPU access rights on 34K */
-
-extern atomic_t smtc_fpu_recoveries;
-
-#endif /* __ASM_SMTC_PROC_H */
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
index d301e108d5b8..b188c797565c 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -19,22 +19,12 @@
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
-/*
- * For SMTC kernel, global IE should be left set, and interrupts
- * controlled exclusively via IXMT.
- */
-#ifdef CONFIG_MIPS_MT_SMTC
-#define STATMASK 0x1e
-#elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
+#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
#define STATMASK 0x3f
#else
#define STATMASK 0x1f
#endif
-#ifdef CONFIG_MIPS_MT_SMTC
-#include <asm/mipsmtregs.h>
-#endif /* CONFIG_MIPS_MT_SMTC */
-
.macro SAVE_AT
.set push
.set noat
@@ -186,16 +176,6 @@
mfc0 v1, CP0_STATUS
LONG_S $2, PT_R2(sp)
LONG_S v1, PT_STATUS(sp)
-#ifdef CONFIG_MIPS_MT_SMTC
- /*
- * Ideally, these instructions would be shuffled in
- * to cover the pipeline delay.
- */
- .set mips32
- mfc0 k0, CP0_TCSTATUS
- .set mips0
- LONG_S k0, PT_TCSTATUS(sp)
-#endif /* CONFIG_MIPS_MT_SMTC */
LONG_S $4, PT_R4(sp)
mfc0 v1, CP0_CAUSE
LONG_S $5, PT_R5(sp)
@@ -321,36 +301,6 @@
.set push
.set reorder
.set noat
-#ifdef CONFIG_MIPS_MT_SMTC
- .set mips32r2
- /*
- * We need to make sure the read-modify-write
- * of Status below isn't perturbed by an interrupt
- * or cross-TC access, so we need to do at least a DMT,
- * protected by an interrupt-inhibit. But setting IXMT
- * also creates a few-cycle window where an IPI could
- * be queued and not be detected before potentially
- * returning to a WAIT or user-mode loop. It must be
- * replayed.
- *
- * We're in the middle of a context switch, and
- * we can't dispatch it directly without trashing
- * some registers, so we'll try to detect this unlikely
- * case and program a software interrupt in the VPE,
- * as would be done for a cross-VPE IPI. To accommodate
- * the handling of that case, we're doing a DVPE instead
- * of just a DMT here to protect against other threads.
- * This is a lot of cruft to cover a tiny window.
- * If you can find a better design, implement it!
- *
- */
- mfc0 v0, CP0_TCSTATUS
- ori v0, TCSTATUS_IXMT
- mtc0 v0, CP0_TCSTATUS
- _ehb
- DVPE 5 # dvpe a1
- jal mips_ihb
-#endif /* CONFIG_MIPS_MT_SMTC */
mfc0 a0, CP0_STATUS
ori a0, STATMASK
xori a0, STATMASK
@@ -362,59 +312,6 @@
and v0, v1
or v0, a0
mtc0 v0, CP0_STATUS
-#ifdef CONFIG_MIPS_MT_SMTC
-/*
- * Only after EXL/ERL have been restored to status can we
- * restore TCStatus.IXMT.
- */
- LONG_L v1, PT_TCSTATUS(sp)
- _ehb
- mfc0 a0, CP0_TCSTATUS
- andi v1, TCSTATUS_IXMT
- bnez v1, 0f
-
-/*
- * We'd like to detect any IPIs queued in the tiny window
- * above and request an software interrupt to service them
- * when we ERET.
- *
- * Computing the offset into the IPIQ array of the executing
- * TC's IPI queue in-line would be tedious. We use part of
- * the TCContext register to hold 16 bits of offset that we
- * can add in-line to find the queue head.
- */
- mfc0 v0, CP0_TCCONTEXT
- la a2, IPIQ
- srl v0, v0, 16
- addu a2, a2, v0
- LONG_L v0, 0(a2)
- beqz v0, 0f
-/*
- * If we have a queue, provoke dispatch within the VPE by setting C_SW1
- */
- mfc0 v0, CP0_CAUSE
- ori v0, v0, C_SW1
- mtc0 v0, CP0_CAUSE
-0:
- /*
- * This test should really never branch but
- * let's be prudent here. Having atomized
- * the shared register modifications, we can
- * now EVPE, and must do so before interrupts
- * are potentially re-enabled.
- */
- andi a1, a1, MVPCONTROL_EVP
- beqz a1, 1f
- evpe
-1:
- /* We know that TCStatua.IXMT should be set from above */
- xori a0, a0, TCSTATUS_IXMT
- or a0, a0, v1
- mtc0 a0, CP0_TCSTATUS
- _ehb
-
- .set mips0
-#endif /* CONFIG_MIPS_MT_SMTC */
LONG_L v1, PT_EPC(sp)
MTC0 v1, CP0_EPC
LONG_L $31, PT_R31(sp)
@@ -467,33 +364,11 @@
* Set cp0 enable bit as sign that we're running on the kernel stack
*/
.macro CLI
-#if !defined(CONFIG_MIPS_MT_SMTC)
mfc0 t0, CP0_STATUS
li t1, ST0_CU0 | STATMASK
or t0, t1
xori t0, STATMASK
mtc0 t0, CP0_STATUS
-#else /* CONFIG_MIPS_MT_SMTC */
- /*
- * For SMTC, we need to set privilege
- * and disable interrupts only for the
- * current TC, using the TCStatus register.
- */
- mfc0 t0, CP0_TCSTATUS
- /* Fortunately CU 0 is in the same place in both registers */
- /* Set TCU0, TMX, TKSU (for later inversion) and IXMT */
- li t1, ST0_CU0 | 0x08001c00
- or t0, t1
- /* Clear TKSU, leave IXMT */
- xori t0, 0x00001800
- mtc0 t0, CP0_TCSTATUS
- _ehb
- /* We need to leave the global IE bit set, but clear EXL...*/
- mfc0 t0, CP0_STATUS
- ori t0, ST0_EXL | ST0_ERL
- xori t0, ST0_EXL | ST0_ERL
- mtc0 t0, CP0_STATUS
-#endif /* CONFIG_MIPS_MT_SMTC */
irq_disable_hazard
.endm
@@ -502,35 +377,11 @@
* Set cp0 enable bit as sign that we're running on the kernel stack
*/
.macro STI
-#if !defined(CONFIG_MIPS_MT_SMTC)
mfc0 t0, CP0_STATUS
li t1, ST0_CU0 | STATMASK
or t0, t1
xori t0, STATMASK & ~1
mtc0 t0, CP0_STATUS
-#else /* CONFIG_MIPS_MT_SMTC */
- /*
- * For SMTC, we need to set privilege
- * and enable interrupts only for the
- * current TC, using the TCStatus register.
- */
- _ehb
- mfc0 t0, CP0_TCSTATUS
- /* Fortunately CU 0 is in the same place in both registers */
- /* Set TCU0, TKSU (for later inversion) and IXMT */
- li t1, ST0_CU0 | 0x08001c00
- or t0, t1
- /* Clear TKSU *and* IXMT */
- xori t0, 0x00001c00
- mtc0 t0, CP0_TCSTATUS
- _ehb
- /* We need to leave the global IE bit set, but clear EXL...*/
- mfc0 t0, CP0_STATUS
- ori t0, ST0_EXL
- xori t0, ST0_EXL
- mtc0 t0, CP0_STATUS
- /* irq_enable_hazard below should expand to EHB for 24K/34K cpus */
-#endif /* CONFIG_MIPS_MT_SMTC */
irq_enable_hazard
.endm
@@ -540,32 +391,6 @@
* Set cp0 enable bit as sign that we're running on the kernel stack
*/
.macro KMODE
-#ifdef CONFIG_MIPS_MT_SMTC
- /*
- * This gets baroque in SMTC. We want to
- * protect the non-atomic clearing of EXL
- * with DMT/EMT, but we don't want to take
- * an interrupt while DMT is still in effect.
- */
-
- /* KMODE gets invoked from both reorder and noreorder code */
- .set push
- .set mips32r2
- .set noreorder
- mfc0 v0, CP0_TCSTATUS
- andi v1, v0, TCSTATUS_IXMT
- ori v0, TCSTATUS_IXMT
- mtc0 v0, CP0_TCSTATUS
- _ehb
- DMT 2 # dmt v0
- /*
- * We don't know a priori if ra is "live"
- */
- move t0, ra
- jal mips_ihb
- nop /* delay slot */
- move ra, t0
-#endif /* CONFIG_MIPS_MT_SMTC */
mfc0 t0, CP0_STATUS
li t1, ST0_CU0 | (STATMASK & ~1)
#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
@@ -576,25 +401,6 @@
or t0, t1
xori t0, STATMASK & ~1
mtc0 t0, CP0_STATUS
-#ifdef CONFIG_MIPS_MT_SMTC
- _ehb
- andi v0, v0, VPECONTROL_TE
- beqz v0, 2f
- nop /* delay slot */
- emt
-2:
- mfc0 v0, CP0_TCSTATUS
- /* Clear IXMT, then OR in previous value */
- ori v0, TCSTATUS_IXMT
- xori v0, TCSTATUS_IXMT
- or v0, v1, v0
- mtc0 v0, CP0_TCSTATUS
- /*
- * irq_disable_hazard below should expand to EHB
- * on 24K/34K CPUS
- */
- .set pop
-#endif /* CONFIG_MIPS_MT_SMTC */
irq_disable_hazard
.endm
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index d2d961d6cb86..7de865805deb 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -159,11 +159,7 @@ static inline struct thread_info *current_thread_info(void)
* We stash processor id into a COP0 register to retrieve it fast
* at kernel exception entry.
*/
-#if defined(CONFIG_MIPS_MT_SMTC)
-#define SMP_CPUID_REG 2, 2 /* TCBIND */
-#define ASM_SMP_CPUID_REG $2, 2
-#define SMP_CPUID_PTRSHIFT 19
-#elif defined(CONFIG_MIPS_PGD_C0_CONTEXT)
+#if defined(CONFIG_MIPS_PGD_C0_CONTEXT)
#define SMP_CPUID_REG 20, 0 /* XCONTEXT */
#define ASM_SMP_CPUID_REG $20
#define SMP_CPUID_PTRSHIFT 48
@@ -179,13 +175,8 @@ static inline struct thread_info *current_thread_info(void)
#define SMP_CPUID_REGSHIFT (SMP_CPUID_PTRSHIFT + 2)
#endif
-#ifdef CONFIG_MIPS_MT_SMTC
-#define ASM_CPUID_MFC0 mfc0
-#define UASM_i_CPUID_MFC0 uasm_i_mfc0
-#else
#define ASM_CPUID_MFC0 MFC0
#define UASM_i_CPUID_MFC0 UASM_i_MFC0
-#endif
#endif /* __KERNEL__ */
#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/mips/include/asm/time.h b/arch/mips/include/asm/time.h
index 24f534a7fbc3..8f3047d611ee 100644
--- a/arch/mips/include/asm/time.h
+++ b/arch/mips/include/asm/time.h
@@ -52,14 +52,11 @@ extern int (*perf_irq)(void);
*/
extern unsigned int __weak get_c0_compare_int(void);
extern int r4k_clockevent_init(void);
-extern int smtc_clockevent_init(void);
extern int gic_clockevent_init(void);
static inline int mips_clockevent_init(void)
{
-#ifdef CONFIG_MIPS_MT_SMTC
- return smtc_clockevent_init();
-#elif defined(CONFIG_CEVT_GIC)
+#if defined(CONFIG_CEVT_GIC)
return (gic_clockevent_init() | r4k_clockevent_init());
#elif defined(CONFIG_CEVT_R4K)
return r4k_clockevent_init();
diff --git a/arch/mips/include/asm/timex.h b/arch/mips/include/asm/timex.h
index c5424757da65..b05bb70a2e46 100644
--- a/arch/mips/include/asm/timex.h
+++ b/arch/mips/include/asm/timex.h
@@ -4,12 +4,16 @@
* for more details.
*
* Copyright (C) 1998, 1999, 2003 by Ralf Baechle
+ * Copyright (C) 2014 by Maciej W. Rozycki
*/
#ifndef _ASM_TIMEX_H
#define _ASM_TIMEX_H
#ifdef __KERNEL__
+#include <linux/compiler.h>
+
+#include <asm/cpu.h>
#include <asm/cpu-features.h>
#include <asm/mipsregs.h>
#include <asm/cpu-type.h>
@@ -45,29 +49,54 @@ typedef unsigned int cycles_t;
* However for now the implementaton of this function doesn't get these
* fine details right.
*/
-static inline cycles_t get_cycles(void)
+static inline int can_use_mips_counter(unsigned int prid)
{
- switch (boot_cpu_type()) {
- case CPU_R4400PC:
- case CPU_R4400SC:
- case CPU_R4400MC:
- if ((read_c0_prid() & 0xff) >= 0x0050)
- return read_c0_count();
- break;
+ int comp = (prid & PRID_COMP_MASK) != PRID_COMP_LEGACY;
- case CPU_R4000PC:
- case CPU_R4000SC:
- case CPU_R4000MC:
- break;
+ if (__builtin_constant_p(cpu_has_counter) && !cpu_has_counter)
+ return 0;
+ else if (__builtin_constant_p(cpu_has_mips_r) && cpu_has_mips_r)
+ return 1;
+ else if (likely(!__builtin_constant_p(cpu_has_mips_r) && comp))
+ return 1;
+ /* Make sure we don't peek at cpu_data[0].options in the fast path! */
+ if (!__builtin_constant_p(cpu_has_counter))
+ asm volatile("" : "=m" (cpu_data[0].options));
+ if (likely(cpu_has_counter &&
+ prid >= (PRID_IMP_R4000 | PRID_REV_ENCODE_44(5, 0))))
+ return 1;
+ else
+ return 0;
+}
- default:
- if (cpu_has_counter)
- return read_c0_count();
- break;
- }
+static inline cycles_t get_cycles(void)
+{
+ if (can_use_mips_counter(read_c0_prid()))
+ return read_c0_count();
+ else
+ return 0; /* no usable counter */
+}
+
+/*
+ * Like get_cycles - but where c0_count is not available we desperately
+ * use c0_random in an attempt to get at least a little bit of entropy.
+ *
+ * R6000 and R6000A neither have a count register nor a random register.
+ * That leaves no entropy source in the CPU itself.
+ */
+static inline unsigned long random_get_entropy(void)
+{
+ unsigned int prid = read_c0_prid();
+ unsigned int imp = prid & PRID_IMP_MASK;
- return 0; /* no usable counter */
+ if (can_use_mips_counter(prid))
+ return read_c0_count();
+ else if (likely(imp != PRID_IMP_R6000 && imp != PRID_IMP_R6000A))
+ return read_c0_random();
+ else
+ return 0; /* no usable register */
}
+#define random_get_entropy random_get_entropy
#endif /* __KERNEL__ */
diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h
index c33a9564fb41..708c5d414905 100644
--- a/arch/mips/include/asm/uasm.h
+++ b/arch/mips/include/asm/uasm.h
@@ -55,6 +55,9 @@ void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
#define Ip_u2u1u3(op) \
void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
+#define Ip_u3u2u1(op) \
+void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
+
#define Ip_u3u1u2(op) \
void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
@@ -64,6 +67,9 @@ void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
#define Ip_u2s3u1(op) \
void ISAOPC(op)(u32 **buf, unsigned int a, signed int b, unsigned int c)
+#define Ip_s3s1s2(op) \
+void ISAOPC(op)(u32 **buf, int a, int b, int c)
+
#define Ip_u2u1s3(op) \
void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
@@ -74,6 +80,9 @@ void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c, \
#define Ip_u1u2(op) \
void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b)
+#define Ip_u2u1(op) \
+void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b)
+
#define Ip_u1s2(op) \
void ISAOPC(op)(u32 **buf, unsigned int a, signed int b)
@@ -99,6 +108,7 @@ Ip_u2u1s3(_daddiu);
Ip_u3u1u2(_daddu);
Ip_u2u1msbu3(_dins);
Ip_u2u1msbu3(_dinsm);
+Ip_u1u2(_divu);
Ip_u1u2u3(_dmfc0);
Ip_u1u2u3(_dmtc0);
Ip_u2u1u3(_drotr);
@@ -114,16 +124,22 @@ Ip_u2u1msbu3(_ext);
Ip_u2u1msbu3(_ins);
Ip_u1(_j);
Ip_u1(_jal);
+Ip_u2u1(_jalr);
Ip_u1(_jr);
+Ip_u2s3u1(_lb);
Ip_u2s3u1(_ld);
Ip_u3u1u2(_ldx);
+Ip_u2s3u1(_lh);
Ip_u2s3u1(_ll);
Ip_u2s3u1(_lld);
Ip_u1s2(_lui);
Ip_u2s3u1(_lw);
Ip_u3u1u2(_lwx);
Ip_u1u2u3(_mfc0);
+Ip_u1(_mfhi);
+Ip_u1(_mflo);
Ip_u1u2u3(_mtc0);
+Ip_u3u1u2(_mul);
Ip_u3u1u2(_or);
Ip_u2u1u3(_ori);
Ip_u2s3u1(_pref);
@@ -133,17 +149,26 @@ Ip_u2s3u1(_sc);
Ip_u2s3u1(_scd);
Ip_u2s3u1(_sd);
Ip_u2u1u3(_sll);
+Ip_u3u2u1(_sllv);
+Ip_s3s1s2(_slt);
+Ip_u2u1s3(_sltiu);
+Ip_u3u1u2(_sltu);
Ip_u2u1u3(_sra);
Ip_u2u1u3(_srl);
+Ip_u3u2u1(_srlv);
Ip_u3u1u2(_subu);
Ip_u2s3u1(_sw);
+Ip_u1(_sync);
Ip_u1(_syscall);
Ip_0(_tlbp);
Ip_0(_tlbr);
Ip_0(_tlbwi);
Ip_0(_tlbwr);
+Ip_u1(_wait);
+Ip_u2u1(_wsbh);
Ip_u3u1u2(_xor);
Ip_u2u1u3(_xori);
+Ip_u2u1(_yield);
/* Handle labels. */
@@ -264,6 +289,8 @@ void uasm_il_bbit0(u32 **p, struct uasm_reloc **r, unsigned int reg,
unsigned int bit, int lid);
void uasm_il_bbit1(u32 **p, struct uasm_reloc **r, unsigned int reg,
unsigned int bit, int lid);
+void uasm_il_beq(u32 **p, struct uasm_reloc **r, unsigned int r1,
+ unsigned int r2, int lid);
void uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
void uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
void uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h
index 413d6c612bec..e55813029d5a 100644
--- a/arch/mips/include/asm/unistd.h
+++ b/arch/mips/include/asm/unistd.h
@@ -29,7 +29,6 @@
#define __ARCH_WANT_SYS_GETHOSTNAME
#define __ARCH_WANT_SYS_IPC
#define __ARCH_WANT_SYS_PAUSE
-#define __ARCH_WANT_SYS_SGETMASK
#define __ARCH_WANT_SYS_UTIME
#define __ARCH_WANT_SYS_WAITPID
#define __ARCH_WANT_SYS_SOCKETCALL
diff --git a/arch/mips/include/uapi/asm/Kbuild b/arch/mips/include/uapi/asm/Kbuild
index be7196eacb88..96fe7395ed8d 100644
--- a/arch/mips/include/uapi/asm/Kbuild
+++ b/arch/mips/include/uapi/asm/Kbuild
@@ -4,6 +4,7 @@ include include/uapi/asm-generic/Kbuild.asm
generic-y += auxvec.h
generic-y += ipcbuf.h
+header-y += bitfield.h
header-y += bitsperlong.h
header-y += break.h
header-y += byteorder.h
diff --git a/arch/mips/include/uapi/asm/bitfield.h b/arch/mips/include/uapi/asm/bitfield.h
new file mode 100644
index 000000000000..ad9861359cea
--- /dev/null
+++ b/arch/mips/include/uapi/asm/bitfield.h
@@ -0,0 +1,29 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2014 by Ralf Baechle <ralf@linux-mips.org>
+ */
+#ifndef __UAPI_ASM_BITFIELD_H
+#define __UAPI_ASM_BITFIELD_H
+
+/*
+ * * Damn ... bitfields depend from byteorder :-(
+ * */
+#ifdef __MIPSEB__
+#define __BITFIELD_FIELD(field, more) \
+ field; \
+ more
+
+#elif defined(__MIPSEL__)
+
+#define __BITFIELD_FIELD(field, more) \
+ more \
+ field;
+
+#else /* !defined (__MIPSEB__) && !defined (__MIPSEL__) */
+#error "MIPS but neither __MIPSEL__ nor __MIPSEB__?"
+#endif
+
+#endif /* __UAPI_ASM_BITFIELD_H */
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
index 3125797f2a88..4bfdb9d4c186 100644
--- a/arch/mips/include/uapi/asm/inst.h
+++ b/arch/mips/include/uapi/asm/inst.h
@@ -13,6 +13,8 @@
#ifndef _UAPI_ASM_INST_H
#define _UAPI_ASM_INST_H
+#include <asm/bitfield.h>
+
/*
* Major opcodes; before MIPS IV cop1x was called cop3.
*/
@@ -74,16 +76,17 @@ enum spec2_op {
enum spec3_op {
ext_op, dextm_op, dextu_op, dext_op,
ins_op, dinsm_op, dinsu_op, dins_op,
- lx_op = 0x0a, lwle_op = 0x19,
- lwre_op = 0x1a, cachee_op = 0x1b,
- sbe_op = 0x1c, she_op = 0x1d,
- sce_op = 0x1e, swe_op = 0x1f,
- bshfl_op = 0x20, swle_op = 0x21,
- swre_op = 0x22, prefe_op = 0x23,
- dbshfl_op = 0x24, lbue_op = 0x28,
- lhue_op = 0x29, lbe_op = 0x2c,
- lhe_op = 0x2d, lle_op = 0x2e,
- lwe_op = 0x2f, rdhwr_op = 0x3b
+ yield_op = 0x09, lx_op = 0x0a,
+ lwle_op = 0x19, lwre_op = 0x1a,
+ cachee_op = 0x1b, sbe_op = 0x1c,
+ she_op = 0x1d, sce_op = 0x1e,
+ swe_op = 0x1f, bshfl_op = 0x20,
+ swle_op = 0x21, swre_op = 0x22,
+ prefe_op = 0x23, dbshfl_op = 0x24,
+ lbue_op = 0x28, lhue_op = 0x29,
+ lbe_op = 0x2c, lhe_op = 0x2d,
+ lle_op = 0x2e, lwe_op = 0x2f,
+ rdhwr_op = 0x3b
};
/*
@@ -125,7 +128,8 @@ enum bcop_op {
enum cop0_coi_func {
tlbr_op = 0x01, tlbwi_op = 0x02,
tlbwr_op = 0x06, tlbp_op = 0x08,
- rfe_op = 0x10, eret_op = 0x18
+ rfe_op = 0x10, eret_op = 0x18,
+ wait_op = 0x20,
};
/*
@@ -202,6 +206,16 @@ enum lx_func {
};
/*
+ * BSHFL opcodes
+ */
+enum bshfl_func {
+ wsbh_op = 0x2,
+ dshd_op = 0x5,
+ seb_op = 0x10,
+ seh_op = 0x18,
+};
+
+/*
* (microMIPS) Major opcodes.
*/
enum mm_major_op {
@@ -244,17 +258,23 @@ enum mm_32i_minor_op {
enum mm_32a_minor_op {
mm_sll32_op = 0x000,
mm_ins_op = 0x00c,
+ mm_sllv32_op = 0x010,
mm_ext_op = 0x02c,
mm_pool32axf_op = 0x03c,
mm_srl32_op = 0x040,
mm_sra_op = 0x080,
+ mm_srlv32_op = 0x090,
mm_rotr_op = 0x0c0,
mm_lwxs_op = 0x118,
mm_addu32_op = 0x150,
mm_subu32_op = 0x1d0,
+ mm_wsbh_op = 0x1ec,
+ mm_mul_op = 0x210,
mm_and_op = 0x250,
mm_or32_op = 0x290,
mm_xor32_op = 0x310,
+ mm_slt_op = 0x350,
+ mm_sltu_op = 0x390,
};
/*
@@ -294,15 +314,20 @@ enum mm_32axf_minor_op {
mm_mfc0_op = 0x003,
mm_mtc0_op = 0x00b,
mm_tlbp_op = 0x00d,
+ mm_mfhi32_op = 0x035,
mm_jalr_op = 0x03c,
mm_tlbr_op = 0x04d,
+ mm_mflo32_op = 0x075,
mm_jalrhb_op = 0x07c,
mm_tlbwi_op = 0x08d,
mm_tlbwr_op = 0x0cd,
mm_jalrs_op = 0x13c,
mm_jalrshb_op = 0x17c,
+ mm_sync_op = 0x1ad,
mm_syscall_op = 0x22d,
+ mm_wait_op = 0x24d,
mm_eret_op = 0x3cd,
+ mm_divu_op = 0x5dc,
};
/*
@@ -480,24 +505,6 @@ enum MIPS6e_i8_func {
*/
#define MM_NOP16 0x0c00
-/*
- * Damn ... bitfields depend from byteorder :-(
- */
-#ifdef __MIPSEB__
-#define __BITFIELD_FIELD(field, more) \
- field; \
- more
-
-#elif defined(__MIPSEL__)
-
-#define __BITFIELD_FIELD(field, more) \
- more \
- field;
-
-#else /* !defined (__MIPSEB__) && !defined (__MIPSEL__) */
-#error "MIPS but neither __MIPSEL__ nor __MIPSEB__?"
-#endif
-
struct j_format {
__BITFIELD_FIELD(unsigned int opcode : 6, /* Jump format */
__BITFIELD_FIELD(unsigned int target : 26,
diff --git a/arch/mips/include/uapi/asm/kvm.h b/arch/mips/include/uapi/asm/kvm.h
index f09ff5ae2059..2c04b6d9ff85 100644
--- a/arch/mips/include/uapi/asm/kvm.h
+++ b/arch/mips/include/uapi/asm/kvm.h
@@ -106,6 +106,41 @@ struct kvm_fpu {
#define KVM_REG_MIPS_LO (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 33)
#define KVM_REG_MIPS_PC (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 34)
+/* KVM specific control registers */
+
+/*
+ * CP0_Count control
+ * DC: Set 0: Master disable CP0_Count and set COUNT_RESUME to now
+ * Set 1: Master re-enable CP0_Count with unchanged bias, handling timer
+ * interrupts since COUNT_RESUME
+ * This can be used to freeze the timer to get a consistent snapshot of
+ * the CP0_Count and timer interrupt pending state, while also resuming
+ * safely without losing time or guest timer interrupts.
+ * Other: Reserved, do not change.
+ */
+#define KVM_REG_MIPS_COUNT_CTL (KVM_REG_MIPS | KVM_REG_SIZE_U64 | \
+ 0x20000 | 0)
+#define KVM_REG_MIPS_COUNT_CTL_DC 0x00000001
+
+/*
+ * CP0_Count resume monotonic nanoseconds
+ * The monotonic nanosecond time of the last set of COUNT_CTL.DC (master
+ * disable). Any reads and writes of Count related registers while
+ * COUNT_CTL.DC=1 will appear to occur at this time. When COUNT_CTL.DC is
+ * cleared again (master enable) any timer interrupts since this time will be
+ * emulated.
+ * Modifications to times in the future are rejected.
+ */
+#define KVM_REG_MIPS_COUNT_RESUME (KVM_REG_MIPS | KVM_REG_SIZE_U64 | \
+ 0x20000 | 1)
+/*
+ * CP0_Count rate in Hz
+ * Specifies the rate of the CP0_Count timer in Hz. Modifications occur without
+ * discontinuities in CP0_Count.
+ */
+#define KVM_REG_MIPS_COUNT_HZ (KVM_REG_MIPS | KVM_REG_SIZE_U64 | \
+ 0x20000 | 2)
+
/*
* KVM MIPS specific structures and definitions
*
diff --git a/arch/mips/include/uapi/asm/kvm_para.h b/arch/mips/include/uapi/asm/kvm_para.h
index 14fab8f0b957..7e16d7c42e65 100644
--- a/arch/mips/include/uapi/asm/kvm_para.h
+++ b/arch/mips/include/uapi/asm/kvm_para.h
@@ -1 +1,5 @@
-#include <asm-generic/kvm_para.h>
+#ifndef _UAPI_ASM_MIPS_KVM_PARA_H
+#define _UAPI_ASM_MIPS_KVM_PARA_H
+
+
+#endif /* _UAPI_ASM_MIPS_KVM_PARA_H */
diff --git a/arch/mips/include/uapi/asm/sigcontext.h b/arch/mips/include/uapi/asm/sigcontext.h
index 681c17603a48..6c9906f59c6e 100644
--- a/arch/mips/include/uapi/asm/sigcontext.h
+++ b/arch/mips/include/uapi/asm/sigcontext.h
@@ -12,10 +12,6 @@
#include <linux/types.h>
#include <asm/sgidefs.h>
-/* Bits which may be set in sc_used_math */
-#define USEDMATH_FP (1 << 0)
-#define USEDMATH_MSA (1 << 1)
-
#if _MIPS_SIM == _MIPS_SIM_ABI32
/*
@@ -41,8 +37,6 @@ struct sigcontext {
unsigned long sc_lo2;
unsigned long sc_hi3;
unsigned long sc_lo3;
- unsigned long long sc_msaregs[32]; /* Most significant 64 bits */
- unsigned long sc_msa_csr;
};
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
@@ -76,8 +70,6 @@ struct sigcontext {
__u32 sc_used_math;
__u32 sc_dsp;
__u32 sc_reserved;
- __u64 sc_msaregs[32];
- __u32 sc_msa_csr;
};
diff --git a/arch/mips/include/uapi/asm/types.h b/arch/mips/include/uapi/asm/types.h
index 7ac9d0baad84..f3dd9ff0cc0c 100644
--- a/arch/mips/include/uapi/asm/types.h
+++ b/arch/mips/include/uapi/asm/types.h
@@ -14,9 +14,12 @@
/*
* We don't use int-l64.h for the kernel anymore but still use it for
* userspace to avoid code changes.
+ *
+ * However, some user programs (e.g. perf) may not want this. They can
+ * flag __SANE_USERSPACE_TYPES__ to get int-ll64.h here.
*/
#ifndef __KERNEL__
-# if _MIPS_SZLONG == 64
+# if _MIPS_SZLONG == 64 && !defined(__SANE_USERSPACE_TYPES__)
# include <asm-generic/int-l64.h>
# else
# include <asm-generic/int-ll64.h>