diff options
Diffstat (limited to 'arch/mips/kernel')
66 files changed, 5220 insertions, 4067 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 1c1b71752c84..008a2fed0584 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -17,7 +17,6 @@ endif obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o -obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o obj-$(CONFIG_CEVT_GIC) += cevt-gic.o obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o @@ -30,6 +29,7 @@ obj-$(CONFIG_CSRC_R4K) += csrc-r4k.o obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o obj-$(CONFIG_SYNC_R4K) += sync-r4k.o +obj-$(CONFIG_DEBUG_FS) += segment.o obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_MODULES) += mips_ksyms.o module.o obj-$(CONFIG_MODULES_USE_ELF_RELA) += module-rela.o @@ -41,7 +41,7 @@ obj-$(CONFIG_CPU_R4K_FPU) += r4k_fpu.o r4k_switch.o obj-$(CONFIG_CPU_R3000) += r2300_fpu.o r2300_switch.o obj-$(CONFIG_CPU_R6000) += r6000_fpu.o r4k_switch.o obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o r2300_switch.o -obj-$(CONFIG_CPU_CAVIUM_OCTEON) += octeon_switch.o +obj-$(CONFIG_CPU_CAVIUM_OCTEON) += r4k_fpu.o octeon_switch.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_SMP_UP) += smp-up.o @@ -49,13 +49,18 @@ obj-$(CONFIG_CPU_BMIPS) += smp-bmips.o bmips_vec.o obj-$(CONFIG_MIPS_MT) += mips-mt.o obj-$(CONFIG_MIPS_MT_FPAFF) += mips-mt-fpaff.o -obj-$(CONFIG_MIPS_MT_SMTC) += smtc.o smtc-asm.o smtc-proc.o obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o obj-$(CONFIG_MIPS_CMP) += smp-cmp.o +obj-$(CONFIG_MIPS_CPS) += smp-cps.o cps-vec.o +obj-$(CONFIG_MIPS_GIC_IPI) += smp-gic.o obj-$(CONFIG_CPU_MIPSR2) += spram.o obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o +obj-$(CONFIG_MIPS_VPE_LOADER_CMP) += vpe-cmp.o +obj-$(CONFIG_MIPS_VPE_LOADER_MT) += vpe-mt.o obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o +obj-$(CONFIG_MIPS_VPE_APSP_API_CMP) += rtlx-cmp.o +obj-$(CONFIG_MIPS_VPE_APSP_API_MT) += rtlx-mt.o obj-$(CONFIG_I8259) += i8259.o obj-$(CONFIG_IRQ_CPU) += irq_cpu.o @@ -97,6 +102,12 @@ obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_mipsxx.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o +obj-$(CONFIG_MIPS_CM) += mips-cm.o +obj-$(CONFIG_MIPS_CPC) += mips-cpc.o + +obj-$(CONFIG_CPU_PM) += pm.o +obj-$(CONFIG_MIPS_CPS_PM) += pm-cps.o + # # DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is not # safe to unconditionnaly use the assembler -mdsp / -mdspr2 switches diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index 0c2e853c3db4..4bb5107511e2 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -14,8 +14,10 @@ #include <linux/mm.h> #include <linux/kbuild.h> #include <linux/suspend.h> +#include <asm/pm.h> #include <asm/ptrace.h> #include <asm/processor.h> +#include <asm/smp-cps.h> #include <linux/kvm_host.h> @@ -63,9 +65,6 @@ void output_ptreg_defines(void) OFFSET(PT_BVADDR, pt_regs, cp0_badvaddr); OFFSET(PT_STATUS, pt_regs, cp0_status); OFFSET(PT_CAUSE, pt_regs, cp0_cause); -#ifdef CONFIG_MIPS_MT_SMTC - OFFSET(PT_TCSTATUS, pt_regs, cp0_tcstatus); -#endif /* CONFIG_MIPS_MT_SMTC */ #ifdef CONFIG_CPU_CAVIUM_OCTEON OFFSET(PT_MPL, pt_regs, mpl); OFFSET(PT_MTP, pt_regs, mtp); @@ -168,6 +167,72 @@ void output_thread_fpu_defines(void) OFFSET(THREAD_FPR30, task_struct, thread.fpu.fpr[30]); OFFSET(THREAD_FPR31, task_struct, thread.fpu.fpr[31]); + /* the least significant 64 bits of each FP register */ + OFFSET(THREAD_FPR0_LS64, task_struct, + thread.fpu.fpr[0].val64[FPR_IDX(64, 0)]); + OFFSET(THREAD_FPR1_LS64, task_struct, + thread.fpu.fpr[1].val64[FPR_IDX(64, 0)]); + OFFSET(THREAD_FPR2_LS64, task_struct, + thread.fpu.fpr[2].val64[FPR_IDX(64, 0)]); + OFFSET(THREAD_FPR3_LS64, task_struct, + thread.fpu.fpr[3].val64[FPR_IDX(64, 0)]); + OFFSET(THREAD_FPR4_LS64, task_struct, + thread.fpu.fpr[4].val64[FPR_IDX(64, 0)]); + OFFSET(THREAD_FPR5_LS64, task_struct, + thread.fpu.fpr[5].val64[FPR_IDX(64, 0)]); + OFFSET(THREAD_FPR6_LS64, task_struct, + thread.fpu.fpr[6].val64[FPR_IDX(64, 0)]); + OFFSET(THREAD_FPR7_LS64, task_struct, + thread.fpu.fpr[7].val64[FPR_IDX(64, 0)]); + OFFSET(THREAD_FPR8_LS64, task_struct, + thread.fpu.fpr[8].val64[FPR_IDX(64, 0)]); + OFFSET(THREAD_FPR9_LS64, task_struct, + thread.fpu.fpr[9].val64[FPR_IDX(64, 0)]); + OFFSET(THREAD_FPR10_LS64, task_struct, + thread.fpu.fpr[10].val64[FPR_IDX(64, 0)]); + OFFSET(THREAD_FPR11_LS64, task_struct, + thread.fpu.fpr[11].val64[FPR_IDX(64, 0)]); + OFFSET(THREAD_FPR12_LS64, task_struct, + thread.fpu.fpr[12].val64[FPR_IDX(64, 0)]); + OFFSET(THREAD_FPR13_LS64, task_struct, + thread.fpu.fpr[13].val64[FPR_IDX(64, 0)]); + OFFSET(THREAD_FPR14_LS64, task_struct, + thread.fpu.fpr[14].val64[FPR_IDX(64, 0)]); + OFFSET(THREAD_FPR15_LS64, task_struct, + thread.fpu.fpr[15].val64[FPR_IDX(64, 0)]); + OFFSET(THREAD_FPR16_LS64, task_struct, + thread.fpu.fpr[16].val64[FPR_IDX(64, 0)]); + OFFSET(THREAD_FPR17_LS64, task_struct, + thread.fpu.fpr[17].val64[FPR_IDX(64, 0)]); + OFFSET(THREAD_FPR18_LS64, task_struct, + thread.fpu.fpr[18].val64[FPR_IDX(64, 0)]); + OFFSET(THREAD_FPR19_LS64, task_struct, + thread.fpu.fpr[19].val64[FPR_IDX(64, 0)]); + OFFSET(THREAD_FPR20_LS64, task_struct, + thread.fpu.fpr[20].val64[FPR_IDX(64, 0)]); + OFFSET(THREAD_FPR21_LS64, task_struct, + thread.fpu.fpr[21].val64[FPR_IDX(64, 0)]); + OFFSET(THREAD_FPR22_LS64, task_struct, + thread.fpu.fpr[22].val64[FPR_IDX(64, 0)]); + OFFSET(THREAD_FPR23_LS64, task_struct, + thread.fpu.fpr[23].val64[FPR_IDX(64, 0)]); + OFFSET(THREAD_FPR24_LS64, task_struct, + thread.fpu.fpr[24].val64[FPR_IDX(64, 0)]); + OFFSET(THREAD_FPR25_LS64, task_struct, + thread.fpu.fpr[25].val64[FPR_IDX(64, 0)]); + OFFSET(THREAD_FPR26_LS64, task_struct, + thread.fpu.fpr[26].val64[FPR_IDX(64, 0)]); + OFFSET(THREAD_FPR27_LS64, task_struct, + thread.fpu.fpr[27].val64[FPR_IDX(64, 0)]); + OFFSET(THREAD_FPR28_LS64, task_struct, + thread.fpu.fpr[28].val64[FPR_IDX(64, 0)]); + OFFSET(THREAD_FPR29_LS64, task_struct, + thread.fpu.fpr[29].val64[FPR_IDX(64, 0)]); + OFFSET(THREAD_FPR30_LS64, task_struct, + thread.fpu.fpr[30].val64[FPR_IDX(64, 0)]); + OFFSET(THREAD_FPR31_LS64, task_struct, + thread.fpu.fpr[31].val64[FPR_IDX(64, 0)]); + OFFSET(THREAD_FCR31, task_struct, thread.fpu.fcr31); BLANK(); } @@ -334,6 +399,20 @@ void output_pbe_defines(void) } #endif +#ifdef CONFIG_CPU_PM +void output_pm_defines(void) +{ + COMMENT(" PM offsets. "); +#ifdef CONFIG_EVA + OFFSET(SSS_SEGCTL0, mips_static_suspend_state, segctl[0]); + OFFSET(SSS_SEGCTL1, mips_static_suspend_state, segctl[1]); + OFFSET(SSS_SEGCTL2, mips_static_suspend_state, segctl[2]); +#endif + OFFSET(SSS_SP, mips_static_suspend_state, sp); + BLANK(); +} +#endif + void output_kvm_defines(void) { COMMENT(" KVM/MIPS Specfic offsets. "); @@ -397,3 +476,19 @@ void output_kvm_defines(void) OFFSET(COP0_STATUS, mips_coproc, reg[MIPS_CP0_STATUS][0]); BLANK(); } + +#ifdef CONFIG_MIPS_CPS +void output_cps_defines(void) +{ + COMMENT(" MIPS CPS offsets. "); + + OFFSET(COREBOOTCFG_VPEMASK, core_boot_config, vpe_mask); + OFFSET(COREBOOTCFG_VPECONFIG, core_boot_config, vpe_config); + DEFINE(COREBOOTCFG_SIZE, sizeof(struct core_boot_config)); + + OFFSET(VPEBOOTCFG_PC, vpe_boot_config, pc); + OFFSET(VPEBOOTCFG_SP, vpe_boot_config, sp); + OFFSET(VPEBOOTCFG_GP, vpe_boot_config, gp); + DEFINE(VPEBOOTCFG_SIZE, sizeof(struct vpe_boot_config)); +} +#endif diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c index 202e581e6096..7faf5f2bee25 100644 --- a/arch/mips/kernel/binfmt_elfo32.c +++ b/arch/mips/kernel/binfmt_elfo32.c @@ -28,6 +28,18 @@ typedef double elf_fpreg_t; typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; /* + * In order to be sure that we don't attempt to execute an O32 binary which + * requires 64 bit FP (FR=1) on a system which does not support it we refuse + * to execute any binary which has bits specified by the following macro set + * in its ELF header flags. + */ +#ifdef CONFIG_MIPS_O32_FP64_SUPPORT +# define __MIPS_O32_FP64_MUST_BE_ZERO 0 +#else +# define __MIPS_O32_FP64_MUST_BE_ZERO EF_MIPS_FP64 +#endif + +/* * This is used to ensure we don't load something for the wrong architecture. */ #define elf_check_arch(hdr) \ @@ -44,6 +56,8 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; if (((__h->e_flags & EF_MIPS_ABI) != 0) && \ ((__h->e_flags & EF_MIPS_ABI) != EF_MIPS_ABI_O32)) \ __res = 0; \ + if (__h->e_flags & __MIPS_O32_FP64_MUST_BE_ZERO) \ + __res = 0; \ \ __res; \ }) diff --git a/arch/mips/kernel/bmips_vec.S b/arch/mips/kernel/bmips_vec.S index bd79c4f9bff4..290c23b51678 100644 --- a/arch/mips/kernel/bmips_vec.S +++ b/arch/mips/kernel/bmips_vec.S @@ -8,11 +8,11 @@ * Reset/NMI/re-entry vectors for BMIPS processors */ -#include <linux/init.h> #include <asm/asm.h> #include <asm/asmmacro.h> #include <asm/cacheops.h> +#include <asm/cpu.h> #include <asm/regdef.h> #include <asm/mipsregs.h> #include <asm/stackframe.h> @@ -91,12 +91,18 @@ NESTED(bmips_reset_nmi_vec, PT_SIZE, sp) beqz k0, bmips_smp_entry #if defined(CONFIG_CPU_BMIPS5000) + mfc0 k0, CP0_PRID + li k1, PRID_IMP_BMIPS5000 + andi k0, 0xff00 + bne k0, k1, 1f + /* if we're not on core 0, this must be the SMP boot signal */ li k1, (3 << 25) mfc0 k0, $22 and k0, k1 bnez k0, bmips_smp_entry -#endif +1: +#endif /* CONFIG_CPU_BMIPS5000 */ #endif /* CONFIG_SMP */ /* nope, it's just a regular NMI */ @@ -116,7 +122,7 @@ NESTED(bmips_reset_nmi_vec, PT_SIZE, sp) jr k0 RESTORE_ALL - .set mips3 + .set arch=r4000 eret /*********************************************************************** @@ -139,7 +145,12 @@ bmips_smp_entry: xori k0, 0x04 mtc0 k0, CP0_CONFIG + mfc0 k0, CP0_PRID + andi k0, 0xff00 #if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) + li k1, PRID_IMP_BMIPS43XX + bne k0, k1, 2f + /* initialize CPU1's local I-cache */ li k0, 0x80000000 li k1, 0x80010000 @@ -150,14 +161,21 @@ bmips_smp_entry: 1: cache Index_Store_Tag_I, 0(k0) addiu k0, 16 bne k0, k1, 1b -#elif defined(CONFIG_CPU_BMIPS5000) + + b 3f +2: +#endif /* CONFIG_CPU_BMIPS4350 || CONFIG_CPU_BMIPS4380 */ +#if defined(CONFIG_CPU_BMIPS5000) /* set exception vector base */ + li k1, PRID_IMP_BMIPS5000 + bne k0, k1, 3f + la k0, ebase lw k0, 0(k0) mtc0 k0, $15, 1 BARRIER -#endif - +#endif /* CONFIG_CPU_BMIPS5000 */ +3: /* jump back to kseg0 in case we need to remap the kseg1 area */ la k0, 1f jr k0 @@ -221,8 +239,18 @@ END(bmips_smp_int_vec) LEAF(bmips_enable_xks01) #if defined(CONFIG_XKS01) - + mfc0 t0, CP0_PRID + andi t2, t0, 0xff00 #if defined(CONFIG_CPU_BMIPS4380) + li t1, PRID_IMP_BMIPS43XX + bne t2, t1, 1f + + andi t0, 0xff + addiu t1, t0, -PRID_REV_BMIPS4380_HI + bgtz t1, 2f + addiu t0, -PRID_REV_BMIPS4380_LO + bltz t0, 2f + mfc0 t0, $22, 3 li t1, 0x1ff0 li t2, (1 << 12) | (1 << 9) @@ -231,7 +259,13 @@ LEAF(bmips_enable_xks01) or t0, t2 mtc0 t0, $22, 3 BARRIER -#elif defined(CONFIG_CPU_BMIPS5000) + b 2f +1: +#endif /* CONFIG_CPU_BMIPS4380 */ +#if defined(CONFIG_CPU_BMIPS5000) + li t1, PRID_IMP_BMIPS5000 + bne t2, t1, 2f + mfc0 t0, $22, 5 li t1, 0x01ff li t2, (1 << 8) | (1 << 5) @@ -240,12 +274,8 @@ LEAF(bmips_enable_xks01) or t0, t2 mtc0 t0, $22, 5 BARRIER -#else - -#error Missing XKS01 setup - -#endif - +#endif /* CONFIG_CPU_BMIPS5000 */ +2: #endif /* defined(CONFIG_XKS01) */ jr ra diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index 4d78bf445a9c..7b2df224f041 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c @@ -48,6 +48,202 @@ int __isa_exception_epc(struct pt_regs *regs) return epc; } +/* (microMIPS) Convert 16-bit register encoding to 32-bit register encoding. */ +static const unsigned int reg16to32map[8] = {16, 17, 2, 3, 4, 5, 6, 7}; + +int __mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, + unsigned long *contpc) +{ + union mips_instruction insn = (union mips_instruction)dec_insn.insn; + int bc_false = 0; + unsigned int fcr31; + unsigned int bit; + + if (!cpu_has_mmips) + return 0; + + switch (insn.mm_i_format.opcode) { + case mm_pool32a_op: + if ((insn.mm_i_format.simmediate & MM_POOL32A_MINOR_MASK) == + mm_pool32axf_op) { + switch (insn.mm_i_format.simmediate >> + MM_POOL32A_MINOR_SHIFT) { + case mm_jalr_op: + case mm_jalrhb_op: + case mm_jalrs_op: + case mm_jalrshb_op: + if (insn.mm_i_format.rt != 0) /* Not mm_jr */ + regs->regs[insn.mm_i_format.rt] = + regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + *contpc = regs->regs[insn.mm_i_format.rs]; + return 1; + } + } + break; + case mm_pool32i_op: + switch (insn.mm_i_format.rt) { + case mm_bltzals_op: + case mm_bltzal_op: + regs->regs[31] = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + /* Fall through */ + case mm_bltz_op: + if ((long)regs->regs[insn.mm_i_format.rs] < 0) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.mm_i_format.simmediate << 1); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + return 1; + case mm_bgezals_op: + case mm_bgezal_op: + regs->regs[31] = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + /* Fall through */ + case mm_bgez_op: + if ((long)regs->regs[insn.mm_i_format.rs] >= 0) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.mm_i_format.simmediate << 1); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + return 1; + case mm_blez_op: + if ((long)regs->regs[insn.mm_i_format.rs] <= 0) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.mm_i_format.simmediate << 1); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + return 1; + case mm_bgtz_op: + if ((long)regs->regs[insn.mm_i_format.rs] <= 0) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.mm_i_format.simmediate << 1); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + return 1; + case mm_bc2f_op: + case mm_bc1f_op: + bc_false = 1; + /* Fall through */ + case mm_bc2t_op: + case mm_bc1t_op: + preempt_disable(); + if (is_fpu_owner()) + asm volatile("cfc1\t%0,$31" : "=r" (fcr31)); + else + fcr31 = current->thread.fpu.fcr31; + preempt_enable(); + + if (bc_false) + fcr31 = ~fcr31; + + bit = (insn.mm_i_format.rs >> 2); + bit += (bit != 0); + bit += 23; + if (fcr31 & (1 << bit)) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.mm_i_format.simmediate << 1); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + dec_insn.next_pc_inc; + return 1; + } + break; + case mm_pool16c_op: + switch (insn.mm_i_format.rt) { + case mm_jalr16_op: + case mm_jalrs16_op: + regs->regs[31] = regs->cp0_epc + + dec_insn.pc_inc + dec_insn.next_pc_inc; + /* Fall through */ + case mm_jr16_op: + *contpc = regs->regs[insn.mm_i_format.rs]; + return 1; + } + break; + case mm_beqz16_op: + if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] == 0) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.mm_b1_format.simmediate << 1); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + dec_insn.next_pc_inc; + return 1; + case mm_bnez16_op: + if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] != 0) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.mm_b1_format.simmediate << 1); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + dec_insn.next_pc_inc; + return 1; + case mm_b16_op: + *contpc = regs->cp0_epc + dec_insn.pc_inc + + (insn.mm_b0_format.simmediate << 1); + return 1; + case mm_beq32_op: + if (regs->regs[insn.mm_i_format.rs] == + regs->regs[insn.mm_i_format.rt]) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.mm_i_format.simmediate << 1); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + return 1; + case mm_bne32_op: + if (regs->regs[insn.mm_i_format.rs] != + regs->regs[insn.mm_i_format.rt]) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.mm_i_format.simmediate << 1); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + dec_insn.next_pc_inc; + return 1; + case mm_jalx32_op: + regs->regs[31] = regs->cp0_epc + + dec_insn.pc_inc + dec_insn.next_pc_inc; + *contpc = regs->cp0_epc + dec_insn.pc_inc; + *contpc >>= 28; + *contpc <<= 28; + *contpc |= (insn.j_format.target << 2); + return 1; + case mm_jals32_op: + case mm_jal32_op: + regs->regs[31] = regs->cp0_epc + + dec_insn.pc_inc + dec_insn.next_pc_inc; + /* Fall through */ + case mm_j32_op: + *contpc = regs->cp0_epc + dec_insn.pc_inc; + *contpc >>= 27; + *contpc <<= 27; + *contpc |= (insn.j_format.target << 1); + set_isa16_mode(*contpc); + return 1; + } + return 0; +} + /* * Compute return address and emulate branch in microMIPS mode after an * exception only. It does not handle compact branches/jumps and cannot @@ -317,7 +513,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, if (regs->regs[insn.i_format.rs] == regs->regs[insn.i_format.rt]) { epc = epc + 4 + (insn.i_format.simmediate << 2); - if (insn.i_format.rt == beql_op) + if (insn.i_format.opcode == beql_op) ret = BRANCH_LIKELY_TAKEN; } else epc += 8; @@ -329,7 +525,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, if (regs->regs[insn.i_format.rs] != regs->regs[insn.i_format.rt]) { epc = epc + 4 + (insn.i_format.simmediate << 2); - if (insn.i_format.rt == bnel_op) + if (insn.i_format.opcode == bnel_op) ret = BRANCH_LIKELY_TAKEN; } else epc += 8; @@ -341,7 +537,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, /* rt field assumed to be zero */ if ((long)regs->regs[insn.i_format.rs] <= 0) { epc = epc + 4 + (insn.i_format.simmediate << 2); - if (insn.i_format.rt == bnel_op) + if (insn.i_format.opcode == blezl_op) ret = BRANCH_LIKELY_TAKEN; } else epc += 8; @@ -353,7 +549,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, /* rt field assumed to be zero */ if ((long)regs->regs[insn.i_format.rs] > 0) { epc = epc + 4 + (insn.i_format.simmediate << 2); - if (insn.i_format.rt == bnel_op) + if (insn.i_format.opcode == bgtzl_op) ret = BRANCH_LIKELY_TAKEN; } else epc += 8; @@ -366,7 +562,11 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, case cop1_op: preempt_disable(); if (is_fpu_owner()) - asm volatile("cfc1\t%0,$31" : "=r" (fcr31)); + asm volatile( + ".set push\n" + "\t.set mips1\n" + "\tcfc1\t%0,$31\n" + "\t.set pop" : "=r" (fcr31)); else fcr31 = current->thread.fpu.fcr31; preempt_enable(); diff --git a/arch/mips/kernel/cevt-gic.c b/arch/mips/kernel/cevt-gic.c index 594cbbf16d62..6093716980b9 100644 --- a/arch/mips/kernel/cevt-gic.c +++ b/arch/mips/kernel/cevt-gic.c @@ -26,7 +26,7 @@ static int gic_next_event(unsigned long delta, struct clock_event_device *evt) cnt = gic_read_count(); cnt += (u64)delta; - gic_write_compare(cnt); + gic_write_cpu_compare(cnt, cpumask_first(evt->cpumask)); res = ((int)(gic_read_count() - cnt) >= 0) ? -ETIME : 0; return res; } @@ -73,7 +73,8 @@ int gic_clockevent_init(void) cd = &per_cpu(gic_clockevent_device, cpu); cd->name = "MIPS GIC"; - cd->features = CLOCK_EVT_FEAT_ONESHOT; + cd->features = CLOCK_EVT_FEAT_ONESHOT | + CLOCK_EVT_FEAT_C3STOP; clockevent_set_clock(cd, gic_frequency); diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c index 50d3f5a8d6bb..bc127e22fdab 100644 --- a/arch/mips/kernel/cevt-r4k.c +++ b/arch/mips/kernel/cevt-r4k.c @@ -12,17 +12,10 @@ #include <linux/smp.h> #include <linux/irq.h> -#include <asm/smtc_ipi.h> #include <asm/time.h> #include <asm/cevt-r4k.h> #include <asm/gic.h> -/* - * The SMTC Kernel for the 34K, 1004K, et. al. replaces several - * of these routines with SMTC-specific variants. - */ - -#ifndef CONFIG_MIPS_MT_SMTC static int mips_next_event(unsigned long delta, struct clock_event_device *evt) { @@ -36,8 +29,6 @@ static int mips_next_event(unsigned long delta, return res; } -#endif /* CONFIG_MIPS_MT_SMTC */ - void mips_set_clock_mode(enum clock_event_mode mode, struct clock_event_device *evt) { @@ -47,7 +38,6 @@ void mips_set_clock_mode(enum clock_event_mode mode, DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); int cp0_timer_irq_installed; -#ifndef CONFIG_MIPS_MT_SMTC irqreturn_t c0_compare_interrupt(int irq, void *dev_id) { const int r2 = cpu_has_mips_r2; @@ -72,9 +62,6 @@ irqreturn_t c0_compare_interrupt(int irq, void *dev_id) /* Clear Count/Compare Interrupt */ write_c0_compare(read_c0_compare()); cd = &per_cpu(mips_clockevent_device, cpu); -#ifdef CONFIG_CEVT_GIC - if (!gic_present) -#endif cd->event_handler(cd); } @@ -82,8 +69,6 @@ out: return IRQ_HANDLED; } -#endif /* Not CONFIG_MIPS_MT_SMTC */ - struct irqaction c0_compare_irqaction = { .handler = c0_compare_interrupt, .flags = IRQF_PERCPU | IRQF_TIMER, @@ -170,7 +155,6 @@ int c0_compare_int_usable(void) return 1; } -#ifndef CONFIG_MIPS_MT_SMTC int r4k_clockevent_init(void) { unsigned int cpu = smp_processor_id(); @@ -195,7 +179,9 @@ int r4k_clockevent_init(void) cd = &per_cpu(mips_clockevent_device, cpu); cd->name = "MIPS"; - cd->features = CLOCK_EVT_FEAT_ONESHOT; + cd->features = CLOCK_EVT_FEAT_ONESHOT | + CLOCK_EVT_FEAT_C3STOP | + CLOCK_EVT_FEAT_PERCPU; clockevent_set_clock(cd, mips_hpt_frequency); @@ -210,9 +196,6 @@ int r4k_clockevent_init(void) cd->set_mode = mips_set_clock_mode; cd->event_handler = mips_event_handler; -#ifdef CONFIG_CEVT_GIC - if (!gic_present) -#endif clockevents_register_device(cd); if (cp0_timer_irq_installed) @@ -225,4 +208,3 @@ int r4k_clockevent_init(void) return 0; } -#endif /* Not CONFIG_MIPS_MT_SMTC */ diff --git a/arch/mips/kernel/cevt-smtc.c b/arch/mips/kernel/cevt-smtc.c deleted file mode 100644 index b6cf0a60d896..000000000000 --- a/arch/mips/kernel/cevt-smtc.c +++ /dev/null @@ -1,324 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2007 MIPS Technologies, Inc. - * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org> - * Copyright (C) 2008 Kevin D. Kissell, Paralogos sarl - */ -#include <linux/clockchips.h> -#include <linux/interrupt.h> -#include <linux/percpu.h> -#include <linux/smp.h> -#include <linux/irq.h> - -#include <asm/smtc_ipi.h> -#include <asm/time.h> -#include <asm/cevt-r4k.h> - -/* - * Variant clock event timer support for SMTC on MIPS 34K, 1004K - * or other MIPS MT cores. - * - * Notes on SMTC Support: - * - * SMTC has multiple microthread TCs pretending to be Linux CPUs. - * But there's only one Count/Compare pair per VPE, and Compare - * interrupts are taken opportunisitically by available TCs - * bound to the VPE with the Count register. The new timer - * framework provides for global broadcasts, but we really - * want VPE-level multicasts for best behavior. So instead - * of invoking the high-level clock-event broadcast code, - * this version of SMTC support uses the historical SMTC - * multicast mechanisms "under the hood", appearing to the - * generic clock layer as if the interrupts are per-CPU. - * - * The approach taken here is to maintain a set of NR_CPUS - * virtual timers, and track which "CPU" needs to be alerted - * at each event. - * - * It's unlikely that we'll see a MIPS MT core with more than - * 2 VPEs, but we *know* that we won't need to handle more - * VPEs than we have "CPUs". So NCPUs arrays of NCPUs elements - * is always going to be overkill, but always going to be enough. - */ - -unsigned long smtc_nexttime[NR_CPUS][NR_CPUS]; -static int smtc_nextinvpe[NR_CPUS]; - -/* - * Timestamps stored are absolute values to be programmed - * into Count register. Valid timestamps will never be zero. - * If a Zero Count value is actually calculated, it is converted - * to be a 1, which will introduce 1 or two CPU cycles of error - * roughly once every four billion events, which at 1000 HZ means - * about once every 50 days. If that's actually a problem, one - * could alternate squashing 0 to 1 and to -1. - */ - -#define MAKEVALID(x) (((x) == 0L) ? 1L : (x)) -#define ISVALID(x) ((x) != 0L) - -/* - * Time comparison is subtle, as it's really truncated - * modular arithmetic. - */ - -#define IS_SOONER(a, b, reference) \ - (((a) - (unsigned long)(reference)) < ((b) - (unsigned long)(reference))) - -/* - * CATCHUP_INCREMENT, used when the function falls behind the counter. - * Could be an increasing function instead of a constant; - */ - -#define CATCHUP_INCREMENT 64 - -static int mips_next_event(unsigned long delta, - struct clock_event_device *evt) -{ - unsigned long flags; - unsigned int mtflags; - unsigned long timestamp, reference, previous; - unsigned long nextcomp = 0L; - int vpe = current_cpu_data.vpe_id; - int cpu = smp_processor_id(); - local_irq_save(flags); - mtflags = dmt(); - - /* - * Maintain the per-TC virtual timer - * and program the per-VPE shared Count register - * as appropriate here... - */ - reference = (unsigned long)read_c0_count(); - timestamp = MAKEVALID(reference + delta); - /* - * To really model the clock, we have to catch the case - * where the current next-in-VPE timestamp is the old - * timestamp for the calling CPE, but the new value is - * in fact later. In that case, we have to do a full - * scan and discover the new next-in-VPE CPU id and - * timestamp. - */ - previous = smtc_nexttime[vpe][cpu]; - if (cpu == smtc_nextinvpe[vpe] && ISVALID(previous) - && IS_SOONER(previous, timestamp, reference)) { - int i; - int soonest = cpu; - - /* - * Update timestamp array here, so that new - * value gets considered along with those of - * other virtual CPUs on the VPE. - */ - smtc_nexttime[vpe][cpu] = timestamp; - for_each_online_cpu(i) { - if (ISVALID(smtc_nexttime[vpe][i]) - && IS_SOONER(smtc_nexttime[vpe][i], - smtc_nexttime[vpe][soonest], reference)) { - soonest = i; - } - } - smtc_nextinvpe[vpe] = soonest; - nextcomp = smtc_nexttime[vpe][soonest]; - /* - * Otherwise, we don't have to process the whole array rank, - * we just have to see if the event horizon has gotten closer. - */ - } else { - if (!ISVALID(smtc_nexttime[vpe][smtc_nextinvpe[vpe]]) || - IS_SOONER(timestamp, - smtc_nexttime[vpe][smtc_nextinvpe[vpe]], reference)) { - smtc_nextinvpe[vpe] = cpu; - nextcomp = timestamp; - } - /* - * Since next-in-VPE may me the same as the executing - * virtual CPU, we update the array *after* checking - * its value. - */ - smtc_nexttime[vpe][cpu] = timestamp; - } - - /* - * It may be that, in fact, we don't need to update Compare, - * but if we do, we want to make sure we didn't fall into - * a crack just behind Count. - */ - if (ISVALID(nextcomp)) { - write_c0_compare(nextcomp); - ehb(); - /* - * We never return an error, we just make sure - * that we trigger the handlers as quickly as - * we can if we fell behind. - */ - while ((nextcomp - (unsigned long)read_c0_count()) - > (unsigned long)LONG_MAX) { - nextcomp += CATCHUP_INCREMENT; - write_c0_compare(nextcomp); - ehb(); - } - } - emt(mtflags); - local_irq_restore(flags); - return 0; -} - - -void smtc_distribute_timer(int vpe) -{ - unsigned long flags; - unsigned int mtflags; - int cpu; - struct clock_event_device *cd; - unsigned long nextstamp; - unsigned long reference; - - -repeat: - nextstamp = 0L; - for_each_online_cpu(cpu) { - /* - * Find virtual CPUs within the current VPE who have - * unserviced timer requests whose time is now past. - */ - local_irq_save(flags); - mtflags = dmt(); - if (cpu_data[cpu].vpe_id == vpe && - ISVALID(smtc_nexttime[vpe][cpu])) { - reference = (unsigned long)read_c0_count(); - if ((smtc_nexttime[vpe][cpu] - reference) - > (unsigned long)LONG_MAX) { - smtc_nexttime[vpe][cpu] = 0L; - emt(mtflags); - local_irq_restore(flags); - /* - * We don't send IPIs to ourself. - */ - if (cpu != smp_processor_id()) { - smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0); - } else { - cd = &per_cpu(mips_clockevent_device, cpu); - cd->event_handler(cd); - } - } else { - /* Local to VPE but Valid Time not yet reached. */ - if (!ISVALID(nextstamp) || - IS_SOONER(smtc_nexttime[vpe][cpu], nextstamp, - reference)) { - smtc_nextinvpe[vpe] = cpu; - nextstamp = smtc_nexttime[vpe][cpu]; - } - emt(mtflags); - local_irq_restore(flags); - } - } else { - emt(mtflags); - local_irq_restore(flags); - - } - } - /* Reprogram for interrupt at next soonest timestamp for VPE */ - if (ISVALID(nextstamp)) { - write_c0_compare(nextstamp); - ehb(); - if ((nextstamp - (unsigned long)read_c0_count()) - > (unsigned long)LONG_MAX) - goto repeat; - } -} - - -irqreturn_t c0_compare_interrupt(int irq, void *dev_id) -{ - int cpu = smp_processor_id(); - - /* If we're running SMTC, we've got MIPS MT and therefore MIPS32R2 */ - handle_perf_irq(1); - - if (read_c0_cause() & (1 << 30)) { - /* Clear Count/Compare Interrupt */ - write_c0_compare(read_c0_compare()); - smtc_distribute_timer(cpu_data[cpu].vpe_id); - } - return IRQ_HANDLED; -} - - -int smtc_clockevent_init(void) -{ - uint64_t mips_freq = mips_hpt_frequency; - unsigned int cpu = smp_processor_id(); - struct clock_event_device *cd; - unsigned int irq; - int i; - int j; - - if (!cpu_has_counter || !mips_hpt_frequency) - return -ENXIO; - if (cpu == 0) { - for (i = 0; i < num_possible_cpus(); i++) { - smtc_nextinvpe[i] = 0; - for (j = 0; j < num_possible_cpus(); j++) - smtc_nexttime[i][j] = 0L; - } - /* - * SMTC also can't have the usablility test - * run by secondary TCs once Compare is in use. - */ - if (!c0_compare_int_usable()) - return -ENXIO; - } - - /* - * With vectored interrupts things are getting platform specific. - * get_c0_compare_int is a hook to allow a platform to return the - * interrupt number of it's liking. - */ - irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq; - if (get_c0_compare_int) - irq = get_c0_compare_int(); - - cd = &per_cpu(mips_clockevent_device, cpu); - - cd->name = "MIPS"; - cd->features = CLOCK_EVT_FEAT_ONESHOT; - - /* Calculate the min / max delta */ - cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32); - cd->shift = 32; - cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); - cd->min_delta_ns = clockevent_delta2ns(0x300, cd); - - cd->rating = 300; - cd->irq = irq; - cd->cpumask = cpumask_of(cpu); - cd->set_next_event = mips_next_event; - cd->set_mode = mips_set_clock_mode; - cd->event_handler = mips_event_handler; - - clockevents_register_device(cd); - - /* - * On SMTC we only want to do the data structure - * initialization and IRQ setup once. - */ - if (cpu) - return 0; - /* - * And we need the hwmask associated with the c0_compare - * vector to be initialized. - */ - irq_hwmask[irq] = (0x100 << cp0_compare_irq); - if (cp0_timer_irq_installed) - return 0; - - cp0_timer_irq_installed = 1; - - setup_irq(irq, &c0_compare_irqaction); - - return 0; -} diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S new file mode 100644 index 000000000000..6f4f739dad96 --- /dev/null +++ b/arch/mips/kernel/cps-vec.S @@ -0,0 +1,487 @@ +/* + * Copyright (C) 2013 Imagination Technologies + * Author: Paul Burton <paul.burton@imgtec.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <asm/addrspace.h> +#include <asm/asm.h> +#include <asm/asm-offsets.h> +#include <asm/asmmacro.h> +#include <asm/cacheops.h> +#include <asm/mipsregs.h> +#include <asm/mipsmtregs.h> +#include <asm/pm.h> + +#define GCR_CL_COHERENCE_OFS 0x2008 +#define GCR_CL_ID_OFS 0x2028 + +.extern mips_cm_base + +.set noreorder + + /* + * Set dest to non-zero if the core supports the MT ASE, else zero. If + * MT is not supported then branch to nomt. + */ + .macro has_mt dest, nomt + mfc0 \dest, CP0_CONFIG + bgez \dest, \nomt + mfc0 \dest, CP0_CONFIG, 1 + bgez \dest, \nomt + mfc0 \dest, CP0_CONFIG, 2 + bgez \dest, \nomt + mfc0 \dest, CP0_CONFIG, 3 + andi \dest, \dest, MIPS_CONF3_MT + beqz \dest, \nomt + .endm + +.section .text.cps-vec +.balign 0x1000 + +LEAF(mips_cps_core_entry) + /* + * These first 12 bytes will be patched by cps_smp_setup to load the + * base address of the CM GCRs into register v1 and the CCA to use into + * register s0. + */ + .quad 0 + .word 0 + + /* Check whether we're here due to an NMI */ + mfc0 k0, CP0_STATUS + and k0, k0, ST0_NMI + beqz k0, not_nmi + nop + + /* This is an NMI */ + la k0, nmi_handler + jr k0 + nop + +not_nmi: + /* Setup Cause */ + li t0, CAUSEF_IV + mtc0 t0, CP0_CAUSE + + /* Setup Status */ + li t0, ST0_CU1 | ST0_CU0 + mtc0 t0, CP0_STATUS + + /* + * Clear the bits used to index the caches. Note that the architecture + * dictates that writing to any of TagLo or TagHi selects 0 or 2 should + * be valid for all MIPS32 CPUs, even those for which said writes are + * unnecessary. + */ + mtc0 zero, CP0_TAGLO, 0 + mtc0 zero, CP0_TAGHI, 0 + mtc0 zero, CP0_TAGLO, 2 + mtc0 zero, CP0_TAGHI, 2 + ehb + + /* Primary cache configuration is indicated by Config1 */ + mfc0 v0, CP0_CONFIG, 1 + + /* Detect I-cache line size */ + _EXT t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ + beqz t0, icache_done + li t1, 2 + sllv t0, t1, t0 + + /* Detect I-cache size */ + _EXT t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ + xori t2, t1, 0x7 + beqz t2, 1f + li t3, 32 + addi t1, t1, 1 + sllv t1, t3, t1 +1: /* At this point t1 == I-cache sets per way */ + _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ + addi t2, t2, 1 + mul t1, t1, t0 + mul t1, t1, t2 + + li a0, KSEG0 + add a1, a0, t1 +1: cache Index_Store_Tag_I, 0(a0) + add a0, a0, t0 + bne a0, a1, 1b + nop +icache_done: + + /* Detect D-cache line size */ + _EXT t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ + beqz t0, dcache_done + li t1, 2 + sllv t0, t1, t0 + + /* Detect D-cache size */ + _EXT t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ + xori t2, t1, 0x7 + beqz t2, 1f + li t3, 32 + addi t1, t1, 1 + sllv t1, t3, t1 +1: /* At this point t1 == D-cache sets per way */ + _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ + addi t2, t2, 1 + mul t1, t1, t0 + mul t1, t1, t2 + + li a0, KSEG0 + addu a1, a0, t1 + subu a1, a1, t0 +1: cache Index_Store_Tag_D, 0(a0) + bne a0, a1, 1b + add a0, a0, t0 +dcache_done: + + /* Set Kseg0 CCA to that in s0 */ + mfc0 t0, CP0_CONFIG + ori t0, 0x7 + xori t0, 0x7 + or t0, t0, s0 + mtc0 t0, CP0_CONFIG + ehb + + /* Enter the coherent domain */ + li t0, 0xff + sw t0, GCR_CL_COHERENCE_OFS(v1) + ehb + + /* Jump to kseg0 */ + la t0, 1f + jr t0 + nop + + /* + * We're up, cached & coherent. Perform any further required core-level + * initialisation. + */ +1: jal mips_cps_core_init + nop + + /* + * Boot any other VPEs within this core that should be online, and + * deactivate this VPE if it should be offline. + */ + jal mips_cps_boot_vpes + nop + + /* Off we go! */ + lw t1, VPEBOOTCFG_PC(v0) + lw gp, VPEBOOTCFG_GP(v0) + lw sp, VPEBOOTCFG_SP(v0) + jr t1 + nop + END(mips_cps_core_entry) + +.org 0x200 +LEAF(excep_tlbfill) + b . + nop + END(excep_tlbfill) + +.org 0x280 +LEAF(excep_xtlbfill) + b . + nop + END(excep_xtlbfill) + +.org 0x300 +LEAF(excep_cache) + b . + nop + END(excep_cache) + +.org 0x380 +LEAF(excep_genex) + b . + nop + END(excep_genex) + +.org 0x400 +LEAF(excep_intex) + b . + nop + END(excep_intex) + +.org 0x480 +LEAF(excep_ejtag) + la k0, ejtag_debug_handler + jr k0 + nop + END(excep_ejtag) + +LEAF(mips_cps_core_init) +#ifdef CONFIG_MIPS_MT + /* Check that the core implements the MT ASE */ + has_mt t0, 3f + nop + + .set push + .set mt + + /* Only allow 1 TC per VPE to execute... */ + dmt + + /* ...and for the moment only 1 VPE */ + dvpe + la t1, 1f + jr.hb t1 + nop + + /* Enter VPE configuration state */ +1: mfc0 t0, CP0_MVPCONTROL + ori t0, t0, MVPCONTROL_VPC + mtc0 t0, CP0_MVPCONTROL + + /* Retrieve the number of VPEs within the core */ + mfc0 t0, CP0_MVPCONF0 + srl t0, t0, MVPCONF0_PVPE_SHIFT + andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT) + addi t7, t0, 1 + + /* If there's only 1, we're done */ + beqz t0, 2f + nop + + /* Loop through each VPE within this core */ + li t5, 1 + +1: /* Operate on the appropriate TC */ + mtc0 t5, CP0_VPECONTROL + ehb + + /* Bind TC to VPE (1:1 TC:VPE mapping) */ + mttc0 t5, CP0_TCBIND + + /* Set exclusive TC, non-active, master */ + li t0, VPECONF0_MVP + sll t1, t5, VPECONF0_XTC_SHIFT + or t0, t0, t1 + mttc0 t0, CP0_VPECONF0 + + /* Set TC non-active, non-allocatable */ + mttc0 zero, CP0_TCSTATUS + + /* Set TC halted */ + li t0, TCHALT_H + mttc0 t0, CP0_TCHALT + + /* Next VPE */ + addi t5, t5, 1 + slt t0, t5, t7 + bnez t0, 1b + nop + + /* Leave VPE configuration state */ +2: mfc0 t0, CP0_MVPCONTROL + xori t0, t0, MVPCONTROL_VPC + mtc0 t0, CP0_MVPCONTROL + +3: .set pop +#endif + jr ra + nop + END(mips_cps_core_init) + +LEAF(mips_cps_boot_vpes) + /* Retrieve CM base address */ + la t0, mips_cm_base + lw t0, 0(t0) + + /* Calculate a pointer to this cores struct core_boot_config */ + lw t0, GCR_CL_ID_OFS(t0) + li t1, COREBOOTCFG_SIZE + mul t0, t0, t1 + la t1, mips_cps_core_bootcfg + lw t1, 0(t1) + addu t0, t0, t1 + + /* Calculate this VPEs ID. If the core doesn't support MT use 0 */ + has_mt t6, 1f + li t9, 0 + + /* Find the number of VPEs present in the core */ + mfc0 t1, CP0_MVPCONF0 + srl t1, t1, MVPCONF0_PVPE_SHIFT + andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT + addi t1, t1, 1 + + /* Calculate a mask for the VPE ID from EBase.CPUNum */ + clz t1, t1 + li t2, 31 + subu t1, t2, t1 + li t2, 1 + sll t1, t2, t1 + addiu t1, t1, -1 + + /* Retrieve the VPE ID from EBase.CPUNum */ + mfc0 t9, $15, 1 + and t9, t9, t1 + +1: /* Calculate a pointer to this VPEs struct vpe_boot_config */ + li t1, VPEBOOTCFG_SIZE + mul v0, t9, t1 + lw t7, COREBOOTCFG_VPECONFIG(t0) + addu v0, v0, t7 + +#ifdef CONFIG_MIPS_MT + + /* If the core doesn't support MT then return */ + bnez t6, 1f + nop + jr ra + nop + + .set push + .set mt + +1: /* Enter VPE configuration state */ + dvpe + la t1, 1f + jr.hb t1 + nop +1: mfc0 t1, CP0_MVPCONTROL + ori t1, t1, MVPCONTROL_VPC + mtc0 t1, CP0_MVPCONTROL + ehb + + /* Loop through each VPE */ + lw t6, COREBOOTCFG_VPEMASK(t0) + move t8, t6 + li t5, 0 + + /* Check whether the VPE should be running. If not, skip it */ +1: andi t0, t6, 1 + beqz t0, 2f + nop + + /* Operate on the appropriate TC */ + mfc0 t0, CP0_VPECONTROL + ori t0, t0, VPECONTROL_TARGTC + xori t0, t0, VPECONTROL_TARGTC + or t0, t0, t5 + mtc0 t0, CP0_VPECONTROL + ehb + + /* Skip the VPE if its TC is not halted */ + mftc0 t0, CP0_TCHALT + beqz t0, 2f + nop + + /* Calculate a pointer to the VPEs struct vpe_boot_config */ + li t0, VPEBOOTCFG_SIZE + mul t0, t0, t5 + addu t0, t0, t7 + + /* Set the TC restart PC */ + lw t1, VPEBOOTCFG_PC(t0) + mttc0 t1, CP0_TCRESTART + + /* Set the TC stack pointer */ + lw t1, VPEBOOTCFG_SP(t0) + mttgpr t1, sp + + /* Set the TC global pointer */ + lw t1, VPEBOOTCFG_GP(t0) + mttgpr t1, gp + + /* Copy config from this VPE */ + mfc0 t0, CP0_CONFIG + mttc0 t0, CP0_CONFIG + + /* Ensure no software interrupts are pending */ + mttc0 zero, CP0_CAUSE + mttc0 zero, CP0_STATUS + + /* Set TC active, not interrupt exempt */ + mftc0 t0, CP0_TCSTATUS + li t1, ~TCSTATUS_IXMT + and t0, t0, t1 + ori t0, t0, TCSTATUS_A + mttc0 t0, CP0_TCSTATUS + + /* Clear the TC halt bit */ + mttc0 zero, CP0_TCHALT + + /* Set VPE active */ + mftc0 t0, CP0_VPECONF0 + ori t0, t0, VPECONF0_VPA + mttc0 t0, CP0_VPECONF0 + + /* Next VPE */ +2: srl t6, t6, 1 + addi t5, t5, 1 + bnez t6, 1b + nop + + /* Leave VPE configuration state */ + mfc0 t1, CP0_MVPCONTROL + xori t1, t1, MVPCONTROL_VPC + mtc0 t1, CP0_MVPCONTROL + ehb + evpe + + /* Check whether this VPE is meant to be running */ + li t0, 1 + sll t0, t0, t9 + and t0, t0, t8 + bnez t0, 2f + nop + + /* This VPE should be offline, halt the TC */ + li t0, TCHALT_H + mtc0 t0, CP0_TCHALT + la t0, 1f +1: jr.hb t0 + nop + +2: .set pop + +#endif /* CONFIG_MIPS_MT */ + + /* Return */ + jr ra + nop + END(mips_cps_boot_vpes) + +#if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM) + + /* Calculate a pointer to this CPUs struct mips_static_suspend_state */ + .macro psstate dest + .set push + .set noat + lw $1, TI_CPU(gp) + sll $1, $1, LONGLOG + la \dest, __per_cpu_offset + addu $1, $1, \dest + lw $1, 0($1) + la \dest, cps_cpu_state + addu \dest, \dest, $1 + .set pop + .endm + +LEAF(mips_cps_pm_save) + /* Save CPU state */ + SUSPEND_SAVE_REGS + psstate t1 + SUSPEND_SAVE_STATIC + jr v0 + nop + END(mips_cps_pm_save) + +LEAF(mips_cps_pm_restore) + /* Restore CPU state */ + psstate t1 + RESUME_RESTORE_STATIC + RESUME_RESTORE_REGS_RETURN + END(mips_cps_pm_restore) + +#endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */ diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index c814287bdf5d..d74f957c561e 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -23,6 +23,8 @@ #include <asm/cpu-type.h> #include <asm/fpu.h> #include <asm/mipsregs.h> +#include <asm/mipsmtregs.h> +#include <asm/msa.h> #include <asm/watch.h> #include <asm/elf.h> #include <asm/spram.h> @@ -60,7 +62,7 @@ static inline void check_errata(void) case CPU_34K: /* * Erratum "RPS May Cause Incorrect Instruction Execution" - * This code only handles VPE0, any SMP/SMTC/RTOS code + * This code only handles VPE0, any SMP/RTOS code * making use of VPE1 will be responsable for that VPE. */ if ((c->processor_id & PRID_REV_MASK) <= PRID_REV_34K_V1_0_2) @@ -112,7 +114,7 @@ static inline unsigned long cpu_get_fpu_id(void) unsigned long tmp, fpu_id; tmp = read_c0_status(); - __enable_fpu(); + __enable_fpu(FPU_AS_IS); fpu_id = read_32bit_cp1_register(CP1_REVISION); write_c0_status(tmp); return fpu_id; @@ -126,6 +128,20 @@ static inline int __cpu_has_fpu(void) return ((cpu_get_fpu_id() & FPIR_IMP_MASK) != FPIR_IMP_NONE); } +static inline unsigned long cpu_get_msa_id(void) +{ + unsigned long status, conf5, msa_id; + + status = read_c0_status(); + __enable_fpu(FPU_64BIT); + conf5 = read_c0_config5(); + enable_msa(); + msa_id = read_msa_ir(); + write_c0_config5(conf5); + write_c0_status(status); + return msa_id; +} + static inline void cpu_probe_vmbits(struct cpuinfo_mips *c) { #ifdef __NEED_VMBITS_PROBE @@ -163,6 +179,27 @@ static void set_isa(struct cpuinfo_mips *c, unsigned int isa) static char unknown_isa[] = KERN_ERR \ "Unsupported ISA type, c0.config0: %d."; +static void set_ftlb_enable(struct cpuinfo_mips *c, int enable) +{ + unsigned int config6; + + /* It's implementation dependent how the FTLB can be enabled */ + switch (c->cputype) { + case CPU_PROAPTIV: + case CPU_P5600: + /* proAptiv & related cores use Config6 to enable the FTLB */ + config6 = read_c0_config6(); + if (enable) + /* Enable FTLB */ + write_c0_config6(config6 | MIPS_CONF6_FTLBEN); + else + /* Disable FTLB */ + write_c0_config6(config6 & ~MIPS_CONF6_FTLBEN); + back_to_back_c0_hazard(); + break; + } +} + static inline unsigned int decode_config0(struct cpuinfo_mips *c) { unsigned int config0; @@ -170,8 +207,13 @@ static inline unsigned int decode_config0(struct cpuinfo_mips *c) config0 = read_c0_config(); - if (((config0 & MIPS_CONF_MT) >> 7) == 1) + /* + * Look for Standard TLB or Dual VTLB and FTLB + */ + if ((((config0 & MIPS_CONF_MT) >> 7) == 1) || + (((config0 & MIPS_CONF_MT) >> 7) == 4)) c->options |= MIPS_CPU_TLB; + isa = (config0 & MIPS_CONF_AT) >> 13; switch (isa) { case 0: @@ -226,8 +268,11 @@ static inline unsigned int decode_config1(struct cpuinfo_mips *c) c->options |= MIPS_CPU_FPU; c->options |= MIPS_CPU_32FPR; } - if (cpu_has_tlb) + if (cpu_has_tlb) { c->tlbsize = ((config1 & MIPS_CONF1_TLBS) >> 25) + 1; + c->tlbsizevtlb = c->tlbsize; + c->tlbsizeftlbsets = 0; + } return config1 & MIPS_CONF_M; } @@ -272,6 +317,10 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c) c->options |= MIPS_CPU_MICROMIPS; if (config3 & MIPS_CONF3_VZ) c->ases |= MIPS_ASE_VZ; + if (config3 & MIPS_CONF3_SC) + c->options |= MIPS_CPU_SEGMENTS; + if (config3 & MIPS_CONF3_MSA) + c->ases |= MIPS_ASE_MSA; return config3 & MIPS_CONF_M; } @@ -279,12 +328,51 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c) static inline unsigned int decode_config4(struct cpuinfo_mips *c) { unsigned int config4; + unsigned int newcf4; + unsigned int mmuextdef; + unsigned int ftlb_page = MIPS_CONF4_FTLBPAGESIZE; config4 = read_c0_config4(); - if ((config4 & MIPS_CONF4_MMUEXTDEF) == MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT - && cpu_has_tlb) - c->tlbsize += (config4 & MIPS_CONF4_MMUSIZEEXT) * 0x40; + if (cpu_has_tlb) { + if (((config4 & MIPS_CONF4_IE) >> 29) == 2) + c->options |= MIPS_CPU_TLBINV; + mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF; + switch (mmuextdef) { + case MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT: + c->tlbsize += (config4 & MIPS_CONF4_MMUSIZEEXT) * 0x40; + c->tlbsizevtlb = c->tlbsize; + break; + case MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT: + c->tlbsizevtlb += + ((config4 & MIPS_CONF4_VTLBSIZEEXT) >> + MIPS_CONF4_VTLBSIZEEXT_SHIFT) * 0x40; + c->tlbsize = c->tlbsizevtlb; + ftlb_page = MIPS_CONF4_VFTLBPAGESIZE; + /* fall through */ + case MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT: + newcf4 = (config4 & ~ftlb_page) | + (page_size_ftlb(mmuextdef) << + MIPS_CONF4_FTLBPAGESIZE_SHIFT); + write_c0_config4(newcf4); + back_to_back_c0_hazard(); + config4 = read_c0_config4(); + if (config4 != newcf4) { + pr_err("PAGE_SIZE 0x%lx is not supported by FTLB (config4=0x%x)\n", + PAGE_SIZE, config4); + /* Switch FTLB off */ + set_ftlb_enable(c, 0); + break; + } + c->tlbsizeftlbsets = 1 << + ((config4 & MIPS_CONF4_FTLBSETS) >> + MIPS_CONF4_FTLBSETS_SHIFT); + c->tlbsizeftlbways = ((config4 & MIPS_CONF4_FTLBWAYS) >> + MIPS_CONF4_FTLBWAYS_SHIFT) + 2; + c->tlbsize += c->tlbsizeftlbways * c->tlbsizeftlbsets; + break; + } + } c->kscratch_mask = (config4 >> 16) & 0xff; @@ -299,6 +387,9 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c) config5 &= ~MIPS_CONF5_UFR; write_c0_config5(config5); + if (config5 & MIPS_CONF5_EVA) + c->options |= MIPS_CPU_EVA; + return config5 & MIPS_CONF_M; } @@ -312,6 +403,9 @@ static void decode_configs(struct cpuinfo_mips *c) c->scache.flags = MIPS_CACHE_NOT_PRESENT; + /* Enable FTLB if present */ + set_ftlb_enable(c, 1); + ok = decode_config0(c); /* Read Config registers. */ BUG_ON(!ok); /* Arch spec violation! */ if (ok) @@ -327,8 +421,13 @@ static void decode_configs(struct cpuinfo_mips *c) mips_probe_watch_registers(c); - if (cpu_has_mips_r2) - c->core = read_c0_ebase() & 0x3ff; +#ifndef CONFIG_MIPS_CPS + if (cpu_has_mips_r2) { + c->core = get_ebase_cpunum(); + if (cpu_has_mipsmt) + c->core >>= fls(core_nvpes()) - 1; + } +#endif } #define R4K_OPTS (MIPS_CPU_TLB | MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE \ @@ -585,21 +684,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) */ c->tlbsize = (read_c0_info() & (1 << 29)) ? 64 : 48; break; - case PRID_IMP_RM9000: - c->cputype = CPU_RM9000; - __cpu_name[cpu] = "RM9000"; - set_isa(c, MIPS_CPU_ISA_IV); - c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | - MIPS_CPU_LLSC; - /* - * Bit 29 in the info register of the RM9000 - * indicates if the TLB has 48 or 64 entries. - * - * 29 1 => 64 entry JTLB - * 0 => 48 entry JTLB - */ - c->tlbsize = (read_c0_info() & (1 << 29)) ? 64 : 48; - break; case PRID_IMP_R8000: c->cputype = CPU_R8000; __cpu_name[cpu] = "RM8000"; @@ -639,17 +723,23 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) MIPS_CPU_LLSC; c->tlbsize = 64; break; - case PRID_IMP_LOONGSON2: - c->cputype = CPU_LOONGSON2; - __cpu_name[cpu] = "ICT Loongson-2"; - + case PRID_IMP_LOONGSON_64: /* Loongson-2/3 */ switch (c->processor_id & PRID_REV_MASK) { case PRID_REV_LOONGSON2E: + c->cputype = CPU_LOONGSON2; + __cpu_name[cpu] = "ICT Loongson-2"; set_elf_platform(cpu, "loongson2e"); break; case PRID_REV_LOONGSON2F: + c->cputype = CPU_LOONGSON2; + __cpu_name[cpu] = "ICT Loongson-2"; set_elf_platform(cpu, "loongson2f"); break; + case PRID_REV_LOONGSON3A: + c->cputype = CPU_LOONGSON3; + __cpu_name[cpu] = "ICT Loongson-3"; + set_elf_platform(cpu, "loongson3a"); + break; } set_isa(c, MIPS_CPU_ISA_III); @@ -658,7 +748,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) MIPS_CPU_32FPR; c->tlbsize = 64; break; - case PRID_IMP_LOONGSON1: + case PRID_IMP_LOONGSON_32: /* Loongson-1 */ decode_configs(c); c->cputype = CPU_LOONGSON1; @@ -675,7 +765,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu) { - decode_configs(c); switch (c->processor_id & PRID_IMP_MASK) { case PRID_IMP_4KC: c->cputype = CPU_4KC; @@ -736,11 +825,37 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu) __cpu_name[cpu] = "MIPS 1004Kc"; break; case PRID_IMP_1074K: - c->cputype = CPU_74K; + c->cputype = CPU_1074K; __cpu_name[cpu] = "MIPS 1074Kc"; break; + case PRID_IMP_INTERAPTIV_UP: + c->cputype = CPU_INTERAPTIV; + __cpu_name[cpu] = "MIPS interAptiv"; + break; + case PRID_IMP_INTERAPTIV_MP: + c->cputype = CPU_INTERAPTIV; + __cpu_name[cpu] = "MIPS interAptiv (multi)"; + break; + case PRID_IMP_PROAPTIV_UP: + c->cputype = CPU_PROAPTIV; + __cpu_name[cpu] = "MIPS proAptiv"; + break; + case PRID_IMP_PROAPTIV_MP: + c->cputype = CPU_PROAPTIV; + __cpu_name[cpu] = "MIPS proAptiv (multi)"; + break; + case PRID_IMP_P5600: + c->cputype = CPU_P5600; + __cpu_name[cpu] = "MIPS P5600"; + break; + case PRID_IMP_M5150: + c->cputype = CPU_M5150; + __cpu_name[cpu] = "MIPS M5150"; + break; } + decode_configs(c); + spram_config(); } @@ -911,6 +1026,7 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu) decode_configs(c); /* JZRISC does not implement the CP0 counter. */ c->options &= ~MIPS_CPU_COUNTER; + BUG_ON(!__builtin_constant_p(cpu_has_counter) || cpu_has_counter); switch (c->processor_id & PRID_IMP_MASK) { case PRID_IMP_JZRISC: c->cputype = CPU_JZRISC; @@ -943,6 +1059,8 @@ static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu) switch (c->processor_id & PRID_IMP_MASK) { case PRID_IMP_NETLOGIC_XLP2XX: + case PRID_IMP_NETLOGIC_XLP9XX: + case PRID_IMP_NETLOGIC_XLP5XX: c->cputype = CPU_XLP; __cpu_name[cpu] = "Broadcom XLPII"; break; @@ -1087,6 +1205,12 @@ void cpu_probe(void) else c->srsets = 1; + if (cpu_has_msa) { + c->msa_id = cpu_get_msa_id(); + WARN(c->msa_id & MSA_IR_WRPF, + "Vector register partitioning unimplemented!"); + } + cpu_probe_vmbits(c); #ifdef CONFIG_64BIT @@ -1103,4 +1227,6 @@ void cpu_report(void) smp_processor_id(), c->processor_id, cpu_name_string()); if (c->options & MIPS_CPU_FPU) printk(KERN_INFO "FPU revision is: %08x\n", c->fpu_id); + if (cpu_has_msa) + pr_info("MSA revision is: %08x\n", c->msa_id); } diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c index 93aa302948d7..d21264681e97 100644 --- a/arch/mips/kernel/crash.c +++ b/arch/mips/kernel/crash.c @@ -5,7 +5,6 @@ #include <linux/bootmem.h> #include <linux/crash_dump.h> #include <linux/delay.h> -#include <linux/init.h> #include <linux/irq.h> #include <linux/types.h> #include <linux/sched.h> diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index e5786858cdb6..4353d323f017 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S @@ -16,9 +16,6 @@ #include <asm/isadep.h> #include <asm/thread_info.h> #include <asm/war.h> -#ifdef CONFIG_MIPS_MT_SMTC -#include <asm/mipsmtregs.h> -#endif #ifndef CONFIG_PREEMPT #define resume_kernel restore_all @@ -89,41 +86,6 @@ FEXPORT(syscall_exit) bnez t0, syscall_exit_work restore_all: # restore full frame -#ifdef CONFIG_MIPS_MT_SMTC -#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP -/* Re-arm any temporarily masked interrupts not explicitly "acked" */ - mfc0 v0, CP0_TCSTATUS - ori v1, v0, TCSTATUS_IXMT - mtc0 v1, CP0_TCSTATUS - andi v0, TCSTATUS_IXMT - _ehb - mfc0 t0, CP0_TCCONTEXT - DMT 9 # dmt t1 - jal mips_ihb - mfc0 t2, CP0_STATUS - andi t3, t0, 0xff00 - or t2, t2, t3 - mtc0 t2, CP0_STATUS - _ehb - andi t1, t1, VPECONTROL_TE - beqz t1, 1f - EMT -1: - mfc0 v1, CP0_TCSTATUS - /* We set IXMT above, XOR should clear it here */ - xori v1, v1, TCSTATUS_IXMT - or v1, v0, v1 - mtc0 v1, CP0_TCSTATUS - _ehb - xor t0, t0, t3 - mtc0 t0, CP0_TCCONTEXT -#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ -/* Detect and execute deferred IPI "interrupts" */ - LONG_L s0, TI_REGS($28) - LONG_S sp, TI_REGS($28) - jal deferred_smtc_ipi - LONG_S s0, TI_REGS($28) -#endif /* CONFIG_MIPS_MT_SMTC */ .set noat RESTORE_TEMP RESTORE_AT diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index 185ba258361b..60e7e5e45af1 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c @@ -90,6 +90,7 @@ static inline void ftrace_dyn_arch_init_insns(void) static int ftrace_modify_code(unsigned long ip, unsigned int new_code) { int faulted; + mm_segment_t old_fs; /* *(unsigned int *)ip = new_code; */ safe_store_code(new_code, ip, faulted); @@ -97,7 +98,10 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code) if (unlikely(faulted)) return -EFAULT; + old_fs = get_fs(); + set_fs(get_ds()); flush_icache_range(ip, ip + 8); + set_fs(old_fs); return 0; } @@ -111,11 +115,10 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1, safe_store_code(new_code1, ip, faulted); if (unlikely(faulted)) return -EFAULT; - ip += 4; - safe_store_code(new_code2, ip, faulted); + safe_store_code(new_code2, ip + 4, faulted); if (unlikely(faulted)) return -EFAULT; - flush_icache_range(ip, ip + 8); /* original ip + 12 */ + flush_icache_range(ip, ip + 8); return 0; } #endif @@ -198,7 +201,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func) return ftrace_modify_code(FTRACE_CALL_IP, new); } -int __init ftrace_dyn_arch_init(void *data) +int __init ftrace_dyn_arch_init(void) { /* Encode the instructions when booting */ ftrace_dyn_arch_init_insns(); @@ -206,9 +209,6 @@ int __init ftrace_dyn_arch_init(void *data) /* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */ ftrace_modify_code(MCOUNT_ADDR, INSN_NOP); - /* The return code is retured via data */ - *(unsigned long *)data = 0; - return 0; } #endif /* CONFIG_DYNAMIC_FTRACE */ diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index 47d7583cd67f..ac35e12cb1f3 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S @@ -21,20 +21,6 @@ #include <asm/war.h> #include <asm/thread_info.h> -#ifdef CONFIG_MIPS_MT_SMTC -#define PANIC_PIC(msg) \ - .set push; \ - .set nomicromips; \ - .set reorder; \ - PTR_LA a0,8f; \ - .set noat; \ - PTR_LA AT, panic; \ - jr AT; \ -9: b 9b; \ - .set pop; \ - TEXT(msg) -#endif - __INIT /* @@ -67,7 +53,7 @@ NESTED(except_vec3_generic, 0, sp) */ NESTED(except_vec3_r4000, 0, sp) .set push - .set mips3 + .set arch=r4000 .set noat mfc0 k1, CP0_CAUSE li k0, 31<<2 @@ -139,7 +125,7 @@ LEAF(__r4k_wait) nop nop #endif - .set mips3 + .set arch=r4000 wait /* end of rollback region (the region size must be power of two) */ 1: @@ -251,15 +237,6 @@ NESTED(except_vec_vi, 0, sp) SAVE_AT .set push .set noreorder -#ifdef CONFIG_MIPS_MT_SMTC - /* - * To keep from blindly blocking *all* interrupts - * during service by SMTC kernel, we also want to - * pass the IM value to be cleared. - */ -FEXPORT(except_vec_vi_mori) - ori a0, $0, 0 -#endif /* CONFIG_MIPS_MT_SMTC */ PTR_LA v1, except_vec_vi_handler FEXPORT(except_vec_vi_lui) lui v0, 0 /* Patched */ @@ -277,37 +254,10 @@ EXPORT(except_vec_vi_end) NESTED(except_vec_vi_handler, 0, sp) SAVE_TEMP SAVE_STATIC -#ifdef CONFIG_MIPS_MT_SMTC - /* - * SMTC has an interesting problem that interrupts are level-triggered, - * and the CLI macro will clear EXL, potentially causing a duplicate - * interrupt service invocation. So we need to clear the associated - * IM bit of Status prior to doing CLI, and restore it after the - * service routine has been invoked - we must assume that the - * service routine will have cleared the state, and any active - * level represents a new or otherwised unserviced event... - */ - mfc0 t1, CP0_STATUS - and t0, a0, t1 -#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP - mfc0 t2, CP0_TCCONTEXT - or t2, t0, t2 - mtc0 t2, CP0_TCCONTEXT -#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ - xor t1, t1, t0 - mtc0 t1, CP0_STATUS - _ehb -#endif /* CONFIG_MIPS_MT_SMTC */ CLI #ifdef CONFIG_TRACE_IRQFLAGS move s0, v0 -#ifdef CONFIG_MIPS_MT_SMTC - move s1, a0 -#endif TRACE_IRQS_OFF -#ifdef CONFIG_MIPS_MT_SMTC - move a0, s1 -#endif move v0, s0 #endif @@ -475,7 +425,10 @@ NESTED(nmi_handler, PT_SIZE, sp) BUILD_HANDLER cpu cpu sti silent /* #11 */ BUILD_HANDLER ov ov sti silent /* #12 */ BUILD_HANDLER tr tr sti silent /* #13 */ + BUILD_HANDLER msa_fpe msa_fpe sti silent /* #14 */ BUILD_HANDLER fpe fpe fpe silent /* #15 */ + BUILD_HANDLER ftlb ftlb none silent /* #16 */ + BUILD_HANDLER msa msa sti silent /* #21 */ BUILD_HANDLER mdmx mdmx sti silent /* #22 */ #ifdef CONFIG_HARDWARE_WATCHPOINTS /* @@ -493,9 +446,6 @@ NESTED(nmi_handler, PT_SIZE, sp) .align 5 LEAF(handle_ri_rdhwr_vivt) -#ifdef CONFIG_MIPS_MT_SMTC - PANIC_PIC("handle_ri_rdhwr_vivt called") -#else .set push .set noat .set noreorder @@ -514,7 +464,6 @@ NESTED(nmi_handler, PT_SIZE, sp) .set pop bltz k1, handle_ri /* slow path */ /* fall thru */ -#endif END(handle_ri_rdhwr_vivt) LEAF(handle_ri_rdhwr) @@ -574,7 +523,7 @@ isrdhwr: ori k1, _THREAD_MASK xori k1, _THREAD_MASK LONG_L v1, TI_TP_VALUE(k1) - .set mips3 + .set arch=r4000 eret .set mips0 #endif diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S index 7b6a5b3e3acf..95afd663cd45 100644 --- a/arch/mips/kernel/head.S +++ b/arch/mips/kernel/head.S @@ -35,33 +35,12 @@ */ .macro setup_c0_status set clr .set push -#ifdef CONFIG_MIPS_MT_SMTC - /* - * For SMTC, we need to set privilege and disable interrupts only for - * the current TC, using the TCStatus register. - */ - mfc0 t0, CP0_TCSTATUS - /* Fortunately CU 0 is in the same place in both registers */ - /* Set TCU0, TMX, TKSU (for later inversion) and IXMT */ - li t1, ST0_CU0 | 0x08001c00 - or t0, t1 - /* Clear TKSU, leave IXMT */ - xori t0, 0x00001800 - mtc0 t0, CP0_TCSTATUS - _ehb - /* We need to leave the global IE bit set, but clear EXL...*/ - mfc0 t0, CP0_STATUS - or t0, ST0_CU0 | ST0_EXL | ST0_ERL | \set | \clr - xor t0, ST0_EXL | ST0_ERL | \clr - mtc0 t0, CP0_STATUS -#else mfc0 t0, CP0_STATUS or t0, ST0_CU0|\set|0x1f|\clr xor t0, 0x1f|\clr mtc0 t0, CP0_STATUS .set noreorder sll zero,3 # ehb -#endif .set pop .endm @@ -115,24 +94,6 @@ NESTED(kernel_entry, 16, sp) # kernel entry point jr t0 0: -#ifdef CONFIG_MIPS_MT_SMTC - /* - * In SMTC kernel, "CLI" is thread-specific, in TCStatus. - * We still need to enable interrupts globally in Status, - * and clear EXL/ERL. - * - * TCContext is used to track interrupt levels under - * service in SMTC kernel. Clear for boot TC before - * allowing any interrupts. - */ - mtc0 zero, CP0_TCCONTEXT - - mfc0 t0, CP0_STATUS - ori t0, t0, 0xff1f - xori t0, t0, 0x001e - mtc0 t0, CP0_STATUS -#endif /* CONFIG_MIPS_MT_SMTC */ - PTR_LA t0, __bss_start # clear .bss LONG_S zero, (t0) PTR_LA t1, __bss_stop - LONGSIZE @@ -164,25 +125,8 @@ NESTED(kernel_entry, 16, sp) # kernel entry point * function after setting up the stack and gp registers. */ NESTED(smp_bootstrap, 16, sp) -#ifdef CONFIG_MIPS_MT_SMTC - /* - * Read-modify-writes of Status must be atomic, and this - * is one case where CLI is invoked without EXL being - * necessarily set. The CLI and setup_c0_status will - * in fact be redundant for all but the first TC of - * each VPE being booted. - */ - DMT 10 # dmt t2 /* t0, t1 are used by CLI and setup_c0_status() */ - jal mips_ihb -#endif /* CONFIG_MIPS_MT_SMTC */ - setup_c0_status_sec smp_slave_setup -#ifdef CONFIG_MIPS_MT_SMTC - andi t2, t2, VPECONTROL_TE - beqz t2, 2f - EMT # emt -2: -#endif /* CONFIG_MIPS_MT_SMTC */ + setup_c0_status_sec j start_secondary END(smp_bootstrap) #endif /* CONFIG_SMP */ diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c index 2b91fe80c436..50b364897dda 100644 --- a/arch/mips/kernel/i8259.c +++ b/arch/mips/kernel/i8259.c @@ -42,9 +42,6 @@ static struct irq_chip i8259A_chip = { .irq_disable = disable_8259A_irq, .irq_unmask = enable_8259A_irq, .irq_mask_ack = mask_and_ack_8259A, -#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF - .irq_set_affinity = plat_set_irq_affinity, -#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ }; /* @@ -180,7 +177,6 @@ handle_real_irq: outb(cached_master_mask, PIC_MASTER_IMR); outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */ } - smtc_im_ack_irq(irq); raw_spin_unlock_irqrestore(&i8259A_lock, flags); return; diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c index f7991d95bff9..09ce45980758 100644 --- a/arch/mips/kernel/idle.c +++ b/arch/mips/kernel/idle.c @@ -64,7 +64,7 @@ void r4k_wait_irqoff(void) if (!need_resched()) __asm__( " .set push \n" - " .set mips3 \n" + " .set arch=r4000 \n" " wait \n" " .set pop \n"); local_irq_enable(); @@ -82,7 +82,7 @@ static void rm7k_wait_irqoff(void) if (!need_resched()) __asm__( " .set push \n" - " .set mips3 \n" + " .set arch=r4000 \n" " .set noat \n" " mfc0 $1, $12 \n" " sync \n" @@ -103,7 +103,7 @@ static void au1k_wait(void) unsigned long c0status = read_c0_status() | 1; /* irqs on */ __asm__( - " .set mips3 \n" + " .set arch=r4000 \n" " cache 0x14, 0(%0) \n" " cache 0x14, 32(%0) \n" " sync \n" @@ -184,6 +184,11 @@ void __init check_wait(void) case CPU_24K: case CPU_34K: case CPU_1004K: + case CPU_1074K: + case CPU_INTERAPTIV: + case CPU_PROAPTIV: + case CPU_P5600: + case CPU_M5150: cpu_wait = r4k_wait; if (read_c0_config7() & MIPS_CONF7_WII) cpu_wait = r4k_wait_irqoff; @@ -219,29 +224,26 @@ void __init check_wait(void) cpu_wait = r4k_wait; */ break; - case CPU_RM9000: - if ((c->processor_id & 0x00ff) >= 0x40) - cpu_wait = r4k_wait; - break; default: break; } } -static void smtc_idle_hook(void) -{ -#ifdef CONFIG_MIPS_MT_SMTC - void smtc_idle_loop_hook(void); - - smtc_idle_loop_hook(); -#endif -} - void arch_cpu_idle(void) { - smtc_idle_hook(); if (cpu_wait) cpu_wait(); else local_irq_enable(); } + +#ifdef CONFIG_CPU_IDLE + +int mips_cpuidle_wait_enter(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) +{ + arch_cpu_idle(); + return index; +} + +#endif diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c index 5b5ddb231f26..88e4c323382c 100644 --- a/arch/mips/kernel/irq-gic.c +++ b/arch/mips/kernel/irq-gic.c @@ -16,7 +16,6 @@ #include <asm/gic.h> #include <asm/setup.h> #include <asm/traps.h> -#include <asm/gcmpregs.h> #include <linux/hardirq.h> #include <asm-generic/bitops/find.h> @@ -55,6 +54,21 @@ void gic_write_compare(cycle_t cnt) (int)(cnt & 0xffffffff)); } +void gic_write_cpu_compare(cycle_t cnt, int cpu) +{ + unsigned long flags; + + local_irq_save(flags); + + GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), cpu); + GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI), + (int)(cnt >> 32)); + GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO), + (int)(cnt & 0xffffffff)); + + local_irq_restore(flags); +} + cycle_t gic_read_compare(void) { unsigned int hi, lo; diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c index fab40f7d2e03..a734b2c2f9ea 100644 --- a/arch/mips/kernel/irq-msc01.c +++ b/arch/mips/kernel/irq-msc01.c @@ -53,13 +53,9 @@ static inline void unmask_msc_irq(struct irq_data *d) */ static void level_mask_and_ack_msc_irq(struct irq_data *d) { - unsigned int irq = d->irq; - mask_msc_irq(d); if (!cpu_has_veic) MSCIC_WRITE(MSC01_IC_EOI, 0); - /* This actually needs to be a call into platform code */ - smtc_im_ack_irq(irq); } /* @@ -78,7 +74,6 @@ static void edge_mask_and_ack_msc_irq(struct irq_data *d) MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT); MSCIC_WRITE(MSC01_IC_SUP+irq*8, r); } - smtc_im_ack_irq(irq); } /* @@ -131,7 +126,7 @@ void __init init_msc_irqs(unsigned long icubase, unsigned int irqbase, msc_irqma board_bind_eic_interrupt = &msc_bind_eic_interrupt; - for (; nirq >= 0; nirq--, imp++) { + for (; nirq > 0; nirq--, imp++) { int n = imp->im_irq; switch (imp->im_type) { diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c index d1fea7a054be..d2bfbc2e8995 100644 --- a/arch/mips/kernel/irq.c +++ b/arch/mips/kernel/irq.c @@ -62,9 +62,9 @@ void __init alloc_legacy_irqno(void) void free_irqno(unsigned int irq) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(irq, irq_map); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); } /* @@ -73,7 +73,6 @@ void free_irqno(unsigned int irq) */ void ack_bad_irq(unsigned int irq) { - smtc_im_ack_irq(irq); printk("unexpected IRQ # %d\n", irq); } @@ -142,23 +141,7 @@ void __irq_entry do_IRQ(unsigned int irq) { irq_enter(); check_stack_overflow(); - if (!smtc_handle_on_other_cpu(irq)) - generic_handle_irq(irq); - irq_exit(); -} - -#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF -/* - * To avoid inefficient and in some cases pathological re-checking of - * IRQ affinity, we have this variant that skips the affinity check. - */ - -void __irq_entry do_IRQ_no_affinity(unsigned int irq) -{ - irq_enter(); - smtc_im_backstop(irq); generic_handle_irq(irq); irq_exit(); } -#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c index fcaac2f132f0..7afcc2f22c0d 100644 --- a/arch/mips/kernel/kgdb.c +++ b/arch/mips/kernel/kgdb.c @@ -32,6 +32,7 @@ #include <asm/cacheflush.h> #include <asm/processor.h> #include <asm/sigcontext.h> +#include <asm/uaccess.h> static struct hard_trap_info { unsigned char tt; /* Trap type code for MIPS R3xxx and R4xxx */ @@ -208,7 +209,14 @@ void arch_kgdb_breakpoint(void) static void kgdb_call_nmi_hook(void *ignored) { + mm_segment_t old_fs; + + old_fs = get_fs(); + set_fs(get_ds()); + kgdb_nmicallback(raw_smp_processor_id(), NULL); + + set_fs(old_fs); } void kgdb_roundup_cpus(unsigned long flags) @@ -282,6 +290,7 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd, struct die_args *args = (struct die_args *)ptr; struct pt_regs *regs = args->regs; int trap = (regs->cp0_cause & 0x7c) >> 2; + mm_segment_t old_fs; #ifdef CONFIG_KPROBES /* @@ -296,11 +305,17 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd, if (user_mode(regs)) return NOTIFY_DONE; + /* Kernel mode. Set correct address limit */ + old_fs = get_fs(); + set_fs(get_ds()); + if (atomic_read(&kgdb_active) != -1) kgdb_nmicallback(smp_processor_id(), regs); - if (kgdb_handle_exception(trap, compute_signal(trap), cmd, regs)) + if (kgdb_handle_exception(trap, compute_signal(trap), cmd, regs)) { + set_fs(old_fs); return NOTIFY_DONE; + } if (atomic_read(&kgdb_setting_breakpoint)) if ((trap == 9) && (regs->cp0_epc == (unsigned long)breakinst)) @@ -310,6 +325,7 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd, local_irq_enable(); __flush_cache_all(); + set_fs(old_fs); return NOTIFY_STOP; } diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c new file mode 100644 index 000000000000..f76f7a08412d --- /dev/null +++ b/arch/mips/kernel/mips-cm.c @@ -0,0 +1,121 @@ +/* + * Copyright (C) 2013 Imagination Technologies + * Author: Paul Burton <paul.burton@imgtec.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/errno.h> + +#include <asm/mips-cm.h> +#include <asm/mipsregs.h> + +void __iomem *mips_cm_base; +void __iomem *mips_cm_l2sync_base; + +phys_t __mips_cm_phys_base(void) +{ + u32 config3 = read_c0_config3(); + u32 cmgcr; + + /* Check the CMGCRBase register is implemented */ + if (!(config3 & MIPS_CONF3_CMGCR)) + return 0; + + /* Read the address from CMGCRBase */ + cmgcr = read_c0_cmgcrbase(); + return (cmgcr & MIPS_CMGCRF_BASE) << (36 - 32); +} + +phys_t mips_cm_phys_base(void) + __attribute__((weak, alias("__mips_cm_phys_base"))); + +phys_t __mips_cm_l2sync_phys_base(void) +{ + u32 base_reg; + + /* + * If the L2-only sync region is already enabled then leave it at it's + * current location. + */ + base_reg = read_gcr_l2_only_sync_base(); + if (base_reg & CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN_MSK) + return base_reg & CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE_MSK; + + /* Default to following the CM */ + return mips_cm_phys_base() + MIPS_CM_GCR_SIZE; +} + +phys_t mips_cm_l2sync_phys_base(void) + __attribute__((weak, alias("__mips_cm_l2sync_phys_base"))); + +static void mips_cm_probe_l2sync(void) +{ + unsigned major_rev; + phys_t addr; + + /* L2-only sync was introduced with CM major revision 6 */ + major_rev = (read_gcr_rev() & CM_GCR_REV_MAJOR_MSK) >> + CM_GCR_REV_MAJOR_SHF; + if (major_rev < 6) + return; + + /* Find a location for the L2 sync region */ + addr = mips_cm_l2sync_phys_base(); + BUG_ON((addr & CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE_MSK) != addr); + if (!addr) + return; + + /* Set the region base address & enable it */ + write_gcr_l2_only_sync_base(addr | CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN_MSK); + + /* Map the region */ + mips_cm_l2sync_base = ioremap_nocache(addr, MIPS_CM_L2SYNC_SIZE); +} + +int mips_cm_probe(void) +{ + phys_t addr; + u32 base_reg; + + addr = mips_cm_phys_base(); + BUG_ON((addr & CM_GCR_BASE_GCRBASE_MSK) != addr); + if (!addr) + return -ENODEV; + + mips_cm_base = ioremap_nocache(addr, MIPS_CM_GCR_SIZE); + if (!mips_cm_base) + return -ENXIO; + + /* sanity check that we're looking at a CM */ + base_reg = read_gcr_base(); + if ((base_reg & CM_GCR_BASE_GCRBASE_MSK) != addr) { + pr_err("GCRs appear to have been moved (expected them at 0x%08lx)!\n", + (unsigned long)addr); + mips_cm_base = NULL; + return -ENODEV; + } + + /* set default target to memory */ + base_reg &= ~CM_GCR_BASE_CMDEFTGT_MSK; + base_reg |= CM_GCR_BASE_CMDEFTGT_MEM; + write_gcr_base(base_reg); + + /* disable CM regions */ + write_gcr_reg0_base(CM_GCR_REGn_BASE_BASEADDR_MSK); + write_gcr_reg0_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK); + write_gcr_reg1_base(CM_GCR_REGn_BASE_BASEADDR_MSK); + write_gcr_reg1_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK); + write_gcr_reg2_base(CM_GCR_REGn_BASE_BASEADDR_MSK); + write_gcr_reg2_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK); + write_gcr_reg3_base(CM_GCR_REGn_BASE_BASEADDR_MSK); + write_gcr_reg3_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK); + + /* probe for an L2-only sync region */ + mips_cm_probe_l2sync(); + + return 0; +} diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c new file mode 100644 index 000000000000..ba473608a347 --- /dev/null +++ b/arch/mips/kernel/mips-cpc.c @@ -0,0 +1,80 @@ +/* + * Copyright (C) 2013 Imagination Technologies + * Author: Paul Burton <paul.burton@imgtec.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/errno.h> +#include <linux/percpu.h> +#include <linux/spinlock.h> + +#include <asm/mips-cm.h> +#include <asm/mips-cpc.h> + +void __iomem *mips_cpc_base; + +static DEFINE_PER_CPU_ALIGNED(spinlock_t, cpc_core_lock); + +static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags); + +phys_t __weak mips_cpc_phys_base(void) +{ + u32 cpc_base; + + if (!mips_cm_present()) + return 0; + + if (!(read_gcr_cpc_status() & CM_GCR_CPC_STATUS_EX_MSK)) + return 0; + + /* If the CPC is already enabled, leave it so */ + cpc_base = read_gcr_cpc_base(); + if (cpc_base & CM_GCR_CPC_BASE_CPCEN_MSK) + return cpc_base & CM_GCR_CPC_BASE_CPCBASE_MSK; + + /* Otherwise, give it the default address & enable it */ + cpc_base = mips_cpc_default_phys_base(); + write_gcr_cpc_base(cpc_base | CM_GCR_CPC_BASE_CPCEN_MSK); + return cpc_base; +} + +int mips_cpc_probe(void) +{ + phys_t addr; + unsigned cpu; + + for_each_possible_cpu(cpu) + spin_lock_init(&per_cpu(cpc_core_lock, cpu)); + + addr = mips_cpc_phys_base(); + if (!addr) + return -ENODEV; + + mips_cpc_base = ioremap_nocache(addr, 0x8000); + if (!mips_cpc_base) + return -ENXIO; + + return 0; +} + +void mips_cpc_lock_other(unsigned int core) +{ + unsigned curr_core; + preempt_disable(); + curr_core = current_cpu_data.core; + spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core), + per_cpu(cpc_core_lock_flags, curr_core)); + write_cpc_cl_other(core << CPC_Cx_OTHER_CORENUM_SHF); +} + +void mips_cpc_unlock_other(void) +{ + unsigned curr_core = current_cpu_data.core; + spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core), + per_cpu(cpc_core_lock_flags, curr_core)); + preempt_enable(); +} diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c index cb098628aee8..362bb3707e62 100644 --- a/arch/mips/kernel/mips-mt-fpaff.c +++ b/arch/mips/kernel/mips-mt-fpaff.c @@ -1,5 +1,5 @@ /* - * General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels + * General MIPS MT support routines, usable in AP/SP and SMVP. * Copyright (C) 2005 Mips Technologies, Inc */ #include <linux/cpu.h> diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c index 6ded9bd1489c..88b1ef5f868a 100644 --- a/arch/mips/kernel/mips-mt.c +++ b/arch/mips/kernel/mips-mt.c @@ -1,5 +1,5 @@ /* - * General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels + * General MIPS MT support routines, usable in AP/SP and SMVP. * Copyright (C) 2005 Mips Technologies, Inc */ @@ -57,9 +57,6 @@ void mips_mt_regdump(unsigned long mvpctl) int tc; unsigned long haltval; unsigned long tcstatval; -#ifdef CONFIG_MIPS_MT_SMTC - void smtc_soft_dump(void); -#endif /* CONFIG_MIPT_MT_SMTC */ local_irq_save(flags); vpflags = dvpe(); @@ -116,9 +113,6 @@ void mips_mt_regdump(unsigned long mvpctl) if (!haltval) write_tc_c0_tchalt(0); } -#ifdef CONFIG_MIPS_MT_SMTC - smtc_soft_dump(); -#endif /* CONFIG_MIPT_MT_SMTC */ printk("===========================\n"); evpe(vpflags); local_irq_restore(flags); @@ -295,21 +289,11 @@ void mips_mt_set_cpuoptions(void) void mt_cflush_lockdown(void) { -#ifdef CONFIG_MIPS_MT_SMTC - void smtc_cflush_lockdown(void); - - smtc_cflush_lockdown(); -#endif /* CONFIG_MIPS_MT_SMTC */ /* FILL IN VSMP and AP/SP VERSIONS HERE */ } void mt_cflush_release(void) { -#ifdef CONFIG_MIPS_MT_SMTC - void smtc_cflush_release(void); - - smtc_cflush_release(); -#endif /* CONFIG_MIPS_MT_SMTC */ /* FILL IN VSMP and AP/SP VERSIONS HERE */ } diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c index 6e58e97fcd39..2607c3a4ff7e 100644 --- a/arch/mips/kernel/mips_ksyms.c +++ b/arch/mips/kernel/mips_ksyms.c @@ -16,12 +16,20 @@ #include <asm/ftrace.h> extern void *__bzero(void *__s, size_t __count); +extern long __strncpy_from_kernel_nocheck_asm(char *__to, + const char *__from, long __len); +extern long __strncpy_from_kernel_asm(char *__to, const char *__from, + long __len); extern long __strncpy_from_user_nocheck_asm(char *__to, const char *__from, long __len); extern long __strncpy_from_user_asm(char *__to, const char *__from, long __len); +extern long __strlen_kernel_nocheck_asm(const char *s); +extern long __strlen_kernel_asm(const char *s); extern long __strlen_user_nocheck_asm(const char *s); extern long __strlen_user_asm(const char *s); +extern long __strnlen_kernel_nocheck_asm(const char *s); +extern long __strnlen_kernel_asm(const char *s); extern long __strnlen_user_nocheck_asm(const char *s); extern long __strnlen_user_asm(const char *s); @@ -43,17 +51,31 @@ EXPORT_SYMBOL(copy_page); */ EXPORT_SYMBOL(__copy_user); EXPORT_SYMBOL(__copy_user_inatomic); +#ifdef CONFIG_EVA +EXPORT_SYMBOL(__copy_from_user_eva); +EXPORT_SYMBOL(__copy_in_user_eva); +EXPORT_SYMBOL(__copy_to_user_eva); +EXPORT_SYMBOL(__copy_user_inatomic_eva); +#endif EXPORT_SYMBOL(__bzero); +EXPORT_SYMBOL(__strncpy_from_kernel_nocheck_asm); +EXPORT_SYMBOL(__strncpy_from_kernel_asm); EXPORT_SYMBOL(__strncpy_from_user_nocheck_asm); EXPORT_SYMBOL(__strncpy_from_user_asm); +EXPORT_SYMBOL(__strlen_kernel_nocheck_asm); +EXPORT_SYMBOL(__strlen_kernel_asm); EXPORT_SYMBOL(__strlen_user_nocheck_asm); EXPORT_SYMBOL(__strlen_user_asm); +EXPORT_SYMBOL(__strnlen_kernel_nocheck_asm); +EXPORT_SYMBOL(__strnlen_kernel_asm); EXPORT_SYMBOL(__strnlen_user_nocheck_asm); EXPORT_SYMBOL(__strnlen_user_asm); EXPORT_SYMBOL(csum_partial); EXPORT_SYMBOL(csum_partial_copy_nocheck); -EXPORT_SYMBOL(__csum_partial_copy_user); +EXPORT_SYMBOL(__csum_partial_copy_kernel); +EXPORT_SYMBOL(__csum_partial_copy_to_user); +EXPORT_SYMBOL(__csum_partial_copy_from_user); EXPORT_SYMBOL(invalid_pte_table); #ifdef CONFIG_FUNCTION_TRACER diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S index 029e002a4ea0..f6547680c81c 100644 --- a/arch/mips/kernel/octeon_switch.S +++ b/arch/mips/kernel/octeon_switch.S @@ -10,24 +10,12 @@ * Copyright (C) 2000 MIPS Technologies, Inc. * written by Carsten Langgaard, carstenl@mips.com */ -#include <asm/asm.h> -#include <asm/cachectl.h> -#include <asm/fpregdef.h> -#include <asm/mipsregs.h> -#include <asm/asm-offsets.h> -#include <asm/pgtable-bits.h> -#include <asm/regdef.h> -#include <asm/stackframe.h> -#include <asm/thread_info.h> - -#include <asm/asmmacro.h> - -/* - * Offset to the current process status flags, the first 32 bytes of the - * stack are not used. - */ -#define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS) +#define USE_ALTERNATE_RESUME_IMPL 1 + .set push + .set arch=mips64r2 +#include "r4k_switch.S" + .set pop /* * task_struct *resume(task_struct *prev, task_struct *next, * struct thread_info *next_ti, int usedfpu) @@ -40,6 +28,61 @@ cpu_save_nonscratch a0 LONG_S ra, THREAD_REG31(a0) + /* + * check if we need to save FPU registers + */ + PTR_L t3, TASK_THREAD_INFO(a0) + LONG_L t0, TI_FLAGS(t3) + li t1, _TIF_USEDFPU + and t2, t0, t1 + beqz t2, 1f + nor t1, zero, t1 + + and t0, t0, t1 + LONG_S t0, TI_FLAGS(t3) + + /* + * clear saved user stack CU1 bit + */ + LONG_L t0, ST_OFF(t3) + li t1, ~ST0_CU1 + and t0, t0, t1 + LONG_S t0, ST_OFF(t3) + + .set push + .set arch=mips64r2 + fpu_save_double a0 t0 t1 # c0_status passed in t0 + # clobbers t1 + .set pop +1: + + /* check if we need to save COP2 registers */ + PTR_L t2, TASK_THREAD_INFO(a0) + LONG_L t0, ST_OFF(t2) + bbit0 t0, 30, 1f + + /* Disable COP2 in the stored process state */ + li t1, ST0_CU2 + xor t0, t1 + LONG_S t0, ST_OFF(t2) + + /* Enable COP2 so we can save it */ + mfc0 t0, CP0_STATUS + or t0, t1 + mtc0 t0, CP0_STATUS + + /* Save COP2 */ + daddu a0, THREAD_CP2 + jal octeon_cop2_save + dsubu a0, THREAD_CP2 + + /* Disable COP2 now that we are done */ + mfc0 t0, CP0_STATUS + li t1, ST0_CU2 + xor t0, t1 + mtc0 t0, CP0_STATUS + +1: #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0 /* Check if we need to store CVMSEG state */ mfc0 t0, $11,7 /* CvmMemCtl */ @@ -85,12 +128,7 @@ move $28, a2 cpu_restore_nonscratch a1 -#if (_THREAD_SIZE - 32) < 0x8000 - PTR_ADDIU t0, $28, _THREAD_SIZE - 32 -#else - PTR_LI t0, _THREAD_SIZE - 32 - PTR_ADDU t0, $28 -#endif + PTR_ADDU t0, $28, _THREAD_SIZE - 32 set_saved_sp t0, t1, t2 mfc0 t1, CP0_STATUS /* Do we really need this? */ diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index 24cdf64789c3..4f2d9dece7ab 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -805,7 +805,7 @@ static void reset_counters(void *arg) } } -/* 24K/34K/1004K cores can share the same event map. */ +/* 24K/34K/1004K/interAptiv/loongson1 cores share the same event map. */ static const struct mips_perf_event mipsxxcore_event_map [PERF_COUNT_HW_MAX] = { [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P }, @@ -814,8 +814,8 @@ static const struct mips_perf_event mipsxxcore_event_map [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T }, }; -/* 74K core has different branch event code. */ -static const struct mips_perf_event mipsxx74Kcore_event_map +/* 74K/proAptiv core has different branch event code. */ +static const struct mips_perf_event mipsxxcore_event_map2 [PERF_COUNT_HW_MAX] = { [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P }, [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T }, @@ -849,7 +849,7 @@ static const struct mips_perf_event xlp_event_map[PERF_COUNT_HW_MAX] = { [PERF_COUNT_HW_BRANCH_MISSES] = { 0x1c, CNTR_ALL }, /* PAPI_BR_MSP */ }; -/* 24K/34K/1004K cores can share the same cache event map. */ +/* 24K/34K/1004K/interAptiv/loongson1 cores share the same cache event map. */ static const struct mips_perf_event mipsxxcore_cache_map [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] @@ -930,8 +930,8 @@ static const struct mips_perf_event mipsxxcore_cache_map }, }; -/* 74K core has completely different cache event map. */ -static const struct mips_perf_event mipsxx74Kcore_cache_map +/* 74K/proAptiv core has completely different cache event map. */ +static const struct mips_perf_event mipsxxcore_cache_map2 [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = { @@ -978,6 +978,11 @@ static const struct mips_perf_event mipsxx74Kcore_cache_map [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN, P }, }, }, +/* + * 74K core does not have specific DTLB events. proAptiv core has + * "speculative" DTLB events which are numbered 0x63 (even/odd) and + * not included here. One can use raw events if really needed. + */ [C(ITLB)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T }, @@ -1378,6 +1383,10 @@ static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev) #define IS_BOTH_COUNTERS_74K_EVENT(b) \ ((b) == 0 || (b) == 1) +/* proAptiv */ +#define IS_BOTH_COUNTERS_PROAPTIV_EVENT(b) \ + ((b) == 0 || (b) == 1) + /* 1004K */ #define IS_BOTH_COUNTERS_1004K_EVENT(b) \ ((b) == 0 || (b) == 1 || (b) == 11) @@ -1391,6 +1400,20 @@ static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev) #define IS_RANGE_V_1004K_EVENT(r) ((r) == 47) #endif +/* interAptiv */ +#define IS_BOTH_COUNTERS_INTERAPTIV_EVENT(b) \ + ((b) == 0 || (b) == 1 || (b) == 11) +#ifdef CONFIG_MIPS_MT_SMP +/* The P/V/T info is not provided for "(b) == 38" in SUM, assume P. */ +#define IS_RANGE_P_INTERAPTIV_EVENT(r, b) \ + ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \ + (b) == 25 || (b) == 36 || (b) == 38 || (b) == 39 || \ + (r) == 44 || (r) == 174 || (r) == 176 || ((b) >= 50 && \ + (b) <= 59) || (r) == 188 || (b) == 61 || (b) == 62 || \ + ((b) >= 64 && (b) <= 67)) +#define IS_RANGE_V_INTERAPTIV_EVENT(r) ((r) == 47 || (r) == 175) +#endif + /* BMIPS5000 */ #define IS_BOTH_COUNTERS_BMIPS5000_EVENT(b) \ ((b) == 0 || (b) == 1) @@ -1442,6 +1465,7 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config) #endif break; case CPU_74K: + case CPU_1074K: if (IS_BOTH_COUNTERS_74K_EVENT(base_id)) raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; else @@ -1451,6 +1475,16 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config) raw_event.range = P; #endif break; + case CPU_PROAPTIV: + if (IS_BOTH_COUNTERS_PROAPTIV_EVENT(base_id)) + raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; + else + raw_event.cntr_mask = + raw_id > 127 ? CNTR_ODD : CNTR_EVEN; +#ifdef CONFIG_MIPS_MT_SMP + raw_event.range = P; +#endif + break; case CPU_1004K: if (IS_BOTH_COUNTERS_1004K_EVENT(base_id)) raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; @@ -1466,6 +1500,21 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config) raw_event.range = T; #endif break; + case CPU_INTERAPTIV: + if (IS_BOTH_COUNTERS_INTERAPTIV_EVENT(base_id)) + raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; + else + raw_event.cntr_mask = + raw_id > 127 ? CNTR_ODD : CNTR_EVEN; +#ifdef CONFIG_MIPS_MT_SMP + if (IS_RANGE_P_INTERAPTIV_EVENT(raw_id, base_id)) + raw_event.range = P; + else if (unlikely(IS_RANGE_V_INTERAPTIV_EVENT(raw_id))) + raw_event.range = V; + else + raw_event.range = T; +#endif + break; case CPU_BMIPS5000: if (IS_BOTH_COUNTERS_BMIPS5000_EVENT(base_id)) raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; @@ -1576,14 +1625,29 @@ init_hw_perf_events(void) break; case CPU_74K: mipspmu.name = "mips/74K"; - mipspmu.general_event_map = &mipsxx74Kcore_event_map; - mipspmu.cache_event_map = &mipsxx74Kcore_cache_map; + mipspmu.general_event_map = &mipsxxcore_event_map2; + mipspmu.cache_event_map = &mipsxxcore_cache_map2; + break; + case CPU_PROAPTIV: + mipspmu.name = "mips/proAptiv"; + mipspmu.general_event_map = &mipsxxcore_event_map2; + mipspmu.cache_event_map = &mipsxxcore_cache_map2; break; case CPU_1004K: mipspmu.name = "mips/1004K"; mipspmu.general_event_map = &mipsxxcore_event_map; mipspmu.cache_event_map = &mipsxxcore_cache_map; break; + case CPU_1074K: + mipspmu.name = "mips/1074K"; + mipspmu.general_event_map = &mipsxxcore_event_map; + mipspmu.cache_event_map = &mipsxxcore_cache_map; + break; + case CPU_INTERAPTIV: + mipspmu.name = "mips/interAptiv"; + mipspmu.general_event_map = &mipsxxcore_event_map; + mipspmu.cache_event_map = &mipsxxcore_cache_map; + break; case CPU_LOONGSON1: mipspmu.name = "mips/loongson1"; mipspmu.general_event_map = &mipsxxcore_event_map; diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c new file mode 100644 index 000000000000..c4c2069d3a20 --- /dev/null +++ b/arch/mips/kernel/pm-cps.c @@ -0,0 +1,716 @@ +/* + * Copyright (C) 2014 Imagination Technologies + * Author: Paul Burton <paul.burton@imgtec.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/init.h> +#include <linux/percpu.h> +#include <linux/slab.h> + +#include <asm/asm-offsets.h> +#include <asm/cacheflush.h> +#include <asm/cacheops.h> +#include <asm/idle.h> +#include <asm/mips-cm.h> +#include <asm/mips-cpc.h> +#include <asm/mipsmtregs.h> +#include <asm/pm.h> +#include <asm/pm-cps.h> +#include <asm/smp-cps.h> +#include <asm/uasm.h> + +/* + * cps_nc_entry_fn - type of a generated non-coherent state entry function + * @online: the count of online coupled VPEs + * @nc_ready_count: pointer to a non-coherent mapping of the core ready_count + * + * The code entering & exiting non-coherent states is generated at runtime + * using uasm, in order to ensure that the compiler cannot insert a stray + * memory access at an unfortunate time and to allow the generation of optimal + * core-specific code particularly for cache routines. If coupled_coherence + * is non-zero and this is the entry function for the CPS_PM_NC_WAIT state, + * returns the number of VPEs that were in the wait state at the point this + * VPE left it. Returns garbage if coupled_coherence is zero or this is not + * the entry function for CPS_PM_NC_WAIT. + */ +typedef unsigned (*cps_nc_entry_fn)(unsigned online, u32 *nc_ready_count); + +/* + * The entry point of the generated non-coherent idle state entry/exit + * functions. Actually per-core rather than per-CPU. + */ +static DEFINE_PER_CPU_READ_MOSTLY(cps_nc_entry_fn[CPS_PM_STATE_COUNT], + nc_asm_enter); + +/* Bitmap indicating which states are supported by the system */ +DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT); + +/* + * Indicates the number of coupled VPEs ready to operate in a non-coherent + * state. Actually per-core rather than per-CPU. + */ +static DEFINE_PER_CPU_ALIGNED(u32*, ready_count); +static DEFINE_PER_CPU_ALIGNED(void*, ready_count_alloc); + +/* Indicates online CPUs coupled with the current CPU */ +static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled); + +/* + * Used to synchronize entry to deep idle states. Actually per-core rather + * than per-CPU. + */ +static DEFINE_PER_CPU_ALIGNED(atomic_t, pm_barrier); + +/* Saved CPU state across the CPS_PM_POWER_GATED state */ +DEFINE_PER_CPU_ALIGNED(struct mips_static_suspend_state, cps_cpu_state); + +/* A somewhat arbitrary number of labels & relocs for uasm */ +static struct uasm_label labels[32] __initdata; +static struct uasm_reloc relocs[32] __initdata; + +/* CPU dependant sync types */ +static unsigned stype_intervention; +static unsigned stype_memory; +static unsigned stype_ordering; + +enum mips_reg { + zero, at, v0, v1, a0, a1, a2, a3, + t0, t1, t2, t3, t4, t5, t6, t7, + s0, s1, s2, s3, s4, s5, s6, s7, + t8, t9, k0, k1, gp, sp, fp, ra, +}; + +bool cps_pm_support_state(enum cps_pm_state state) +{ + return test_bit(state, state_support); +} + +static void coupled_barrier(atomic_t *a, unsigned online) +{ + /* + * This function is effectively the same as + * cpuidle_coupled_parallel_barrier, which can't be used here since + * there's no cpuidle device. + */ + + if (!coupled_coherence) + return; + + smp_mb__before_atomic(); + atomic_inc(a); + + while (atomic_read(a) < online) + cpu_relax(); + + if (atomic_inc_return(a) == online * 2) { + atomic_set(a, 0); + return; + } + + while (atomic_read(a) > online) + cpu_relax(); +} + +int cps_pm_enter_state(enum cps_pm_state state) +{ + unsigned cpu = smp_processor_id(); + unsigned core = current_cpu_data.core; + unsigned online, left; + cpumask_t *coupled_mask = this_cpu_ptr(&online_coupled); + u32 *core_ready_count, *nc_core_ready_count; + void *nc_addr; + cps_nc_entry_fn entry; + struct core_boot_config *core_cfg; + struct vpe_boot_config *vpe_cfg; + + /* Check that there is an entry function for this state */ + entry = per_cpu(nc_asm_enter, core)[state]; + if (!entry) + return -EINVAL; + + /* Calculate which coupled CPUs (VPEs) are online */ +#ifdef CONFIG_MIPS_MT + if (cpu_online(cpu)) { + cpumask_and(coupled_mask, cpu_online_mask, + &cpu_sibling_map[cpu]); + online = cpumask_weight(coupled_mask); + cpumask_clear_cpu(cpu, coupled_mask); + } else +#endif + { + cpumask_clear(coupled_mask); + online = 1; + } + + /* Setup the VPE to run mips_cps_pm_restore when started again */ + if (config_enabled(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) { + core_cfg = &mips_cps_core_bootcfg[core]; + vpe_cfg = &core_cfg->vpe_config[current_cpu_data.vpe_id]; + vpe_cfg->pc = (unsigned long)mips_cps_pm_restore; + vpe_cfg->gp = (unsigned long)current_thread_info(); + vpe_cfg->sp = 0; + } + + /* Indicate that this CPU might not be coherent */ + cpumask_clear_cpu(cpu, &cpu_coherent_mask); + smp_mb__after_atomic(); + + /* Create a non-coherent mapping of the core ready_count */ + core_ready_count = per_cpu(ready_count, core); + nc_addr = kmap_noncoherent(virt_to_page(core_ready_count), + (unsigned long)core_ready_count); + nc_addr += ((unsigned long)core_ready_count & ~PAGE_MASK); + nc_core_ready_count = nc_addr; + + /* Ensure ready_count is zero-initialised before the assembly runs */ + ACCESS_ONCE(*nc_core_ready_count) = 0; + coupled_barrier(&per_cpu(pm_barrier, core), online); + + /* Run the generated entry code */ + left = entry(online, nc_core_ready_count); + + /* Remove the non-coherent mapping of ready_count */ + kunmap_noncoherent(); + + /* Indicate that this CPU is definitely coherent */ + cpumask_set_cpu(cpu, &cpu_coherent_mask); + + /* + * If this VPE is the first to leave the non-coherent wait state then + * it needs to wake up any coupled VPEs still running their wait + * instruction so that they return to cpuidle, which can then complete + * coordination between the coupled VPEs & provide the governor with + * a chance to reflect on the length of time the VPEs were in the + * idle state. + */ + if (coupled_coherence && (state == CPS_PM_NC_WAIT) && (left == online)) + arch_send_call_function_ipi_mask(coupled_mask); + + return 0; +} + +static void __init cps_gen_cache_routine(u32 **pp, struct uasm_label **pl, + struct uasm_reloc **pr, + const struct cache_desc *cache, + unsigned op, int lbl) +{ + unsigned cache_size = cache->ways << cache->waybit; + unsigned i; + const unsigned unroll_lines = 32; + + /* If the cache isn't present this function has it easy */ + if (cache->flags & MIPS_CACHE_NOT_PRESENT) + return; + + /* Load base address */ + UASM_i_LA(pp, t0, (long)CKSEG0); + + /* Calculate end address */ + if (cache_size < 0x8000) + uasm_i_addiu(pp, t1, t0, cache_size); + else + UASM_i_LA(pp, t1, (long)(CKSEG0 + cache_size)); + + /* Start of cache op loop */ + uasm_build_label(pl, *pp, lbl); + + /* Generate the cache ops */ + for (i = 0; i < unroll_lines; i++) + uasm_i_cache(pp, op, i * cache->linesz, t0); + + /* Update the base address */ + uasm_i_addiu(pp, t0, t0, unroll_lines * cache->linesz); + + /* Loop if we haven't reached the end address yet */ + uasm_il_bne(pp, pr, t0, t1, lbl); + uasm_i_nop(pp); +} + +static int __init cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl, + struct uasm_reloc **pr, + const struct cpuinfo_mips *cpu_info, + int lbl) +{ + unsigned i, fsb_size = 8; + unsigned num_loads = (fsb_size * 3) / 2; + unsigned line_stride = 2; + unsigned line_size = cpu_info->dcache.linesz; + unsigned perf_counter, perf_event; + unsigned revision = cpu_info->processor_id & PRID_REV_MASK; + + /* + * Determine whether this CPU requires an FSB flush, and if so which + * performance counter/event reflect stalls due to a full FSB. + */ + switch (__get_cpu_type(cpu_info->cputype)) { + case CPU_INTERAPTIV: + perf_counter = 1; + perf_event = 51; + break; + + case CPU_PROAPTIV: + /* Newer proAptiv cores don't require this workaround */ + if (revision >= PRID_REV_ENCODE_332(1, 1, 0)) + return 0; + + /* On older ones it's unavailable */ + return -1; + + /* CPUs which do not require the workaround */ + case CPU_P5600: + return 0; + + default: + WARN_ONCE(1, "pm-cps: FSB flush unsupported for this CPU\n"); + return -1; + } + + /* + * Ensure that the fill/store buffer (FSB) is not holding the results + * of a prefetch, since if it is then the CPC sequencer may become + * stuck in the D3 (ClrBus) state whilst entering a low power state. + */ + + /* Preserve perf counter setup */ + uasm_i_mfc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */ + uasm_i_mfc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */ + + /* Setup perf counter to count FSB full pipeline stalls */ + uasm_i_addiu(pp, t0, zero, (perf_event << 5) | 0xf); + uasm_i_mtc0(pp, t0, 25, (perf_counter * 2) + 0); /* PerfCtlN */ + uasm_i_ehb(pp); + uasm_i_mtc0(pp, zero, 25, (perf_counter * 2) + 1); /* PerfCntN */ + uasm_i_ehb(pp); + + /* Base address for loads */ + UASM_i_LA(pp, t0, (long)CKSEG0); + + /* Start of clear loop */ + uasm_build_label(pl, *pp, lbl); + + /* Perform some loads to fill the FSB */ + for (i = 0; i < num_loads; i++) + uasm_i_lw(pp, zero, i * line_size * line_stride, t0); + + /* + * Invalidate the new D-cache entries so that the cache will need + * refilling (via the FSB) if the loop is executed again. + */ + for (i = 0; i < num_loads; i++) { + uasm_i_cache(pp, Hit_Invalidate_D, + i * line_size * line_stride, t0); + uasm_i_cache(pp, Hit_Writeback_Inv_SD, + i * line_size * line_stride, t0); + } + + /* Completion barrier */ + uasm_i_sync(pp, stype_memory); + uasm_i_ehb(pp); + + /* Check whether the pipeline stalled due to the FSB being full */ + uasm_i_mfc0(pp, t1, 25, (perf_counter * 2) + 1); /* PerfCntN */ + + /* Loop if it didn't */ + uasm_il_beqz(pp, pr, t1, lbl); + uasm_i_nop(pp); + + /* Restore perf counter 1. The count may well now be wrong... */ + uasm_i_mtc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */ + uasm_i_ehb(pp); + uasm_i_mtc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */ + uasm_i_ehb(pp); + + return 0; +} + +static void __init cps_gen_set_top_bit(u32 **pp, struct uasm_label **pl, + struct uasm_reloc **pr, + unsigned r_addr, int lbl) +{ + uasm_i_lui(pp, t0, uasm_rel_hi(0x80000000)); + uasm_build_label(pl, *pp, lbl); + uasm_i_ll(pp, t1, 0, r_addr); + uasm_i_or(pp, t1, t1, t0); + uasm_i_sc(pp, t1, 0, r_addr); + uasm_il_beqz(pp, pr, t1, lbl); + uasm_i_nop(pp); +} + +static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) +{ + struct uasm_label *l = labels; + struct uasm_reloc *r = relocs; + u32 *buf, *p; + const unsigned r_online = a0; + const unsigned r_nc_count = a1; + const unsigned r_pcohctl = t7; + const unsigned max_instrs = 256; + unsigned cpc_cmd; + int err; + enum { + lbl_incready = 1, + lbl_poll_cont, + lbl_secondary_hang, + lbl_disable_coherence, + lbl_flush_fsb, + lbl_invicache, + lbl_flushdcache, + lbl_hang, + lbl_set_cont, + lbl_secondary_cont, + lbl_decready, + }; + + /* Allocate a buffer to hold the generated code */ + p = buf = kcalloc(max_instrs, sizeof(u32), GFP_KERNEL); + if (!buf) + return NULL; + + /* Clear labels & relocs ready for (re)use */ + memset(labels, 0, sizeof(labels)); + memset(relocs, 0, sizeof(relocs)); + + if (config_enabled(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) { + /* + * Save CPU state. Note the non-standard calling convention + * with the return address placed in v0 to avoid clobbering + * the ra register before it is saved. + */ + UASM_i_LA(&p, t0, (long)mips_cps_pm_save); + uasm_i_jalr(&p, v0, t0); + uasm_i_nop(&p); + } + + /* + * Load addresses of required CM & CPC registers. This is done early + * because they're needed in both the enable & disable coherence steps + * but in the coupled case the enable step will only run on one VPE. + */ + UASM_i_LA(&p, r_pcohctl, (long)addr_gcr_cl_coherence()); + + if (coupled_coherence) { + /* Increment ready_count */ + uasm_i_sync(&p, stype_ordering); + uasm_build_label(&l, p, lbl_incready); + uasm_i_ll(&p, t1, 0, r_nc_count); + uasm_i_addiu(&p, t2, t1, 1); + uasm_i_sc(&p, t2, 0, r_nc_count); + uasm_il_beqz(&p, &r, t2, lbl_incready); + uasm_i_addiu(&p, t1, t1, 1); + + /* Ordering barrier */ + uasm_i_sync(&p, stype_ordering); + + /* + * If this is the last VPE to become ready for non-coherence + * then it should branch below. + */ + uasm_il_beq(&p, &r, t1, r_online, lbl_disable_coherence); + uasm_i_nop(&p); + + if (state < CPS_PM_POWER_GATED) { + /* + * Otherwise this is not the last VPE to become ready + * for non-coherence. It needs to wait until coherence + * has been disabled before proceeding, which it will do + * by polling for the top bit of ready_count being set. + */ + uasm_i_addiu(&p, t1, zero, -1); + uasm_build_label(&l, p, lbl_poll_cont); + uasm_i_lw(&p, t0, 0, r_nc_count); + uasm_il_bltz(&p, &r, t0, lbl_secondary_cont); + uasm_i_ehb(&p); + uasm_i_yield(&p, zero, t1); + uasm_il_b(&p, &r, lbl_poll_cont); + uasm_i_nop(&p); + } else { + /* + * The core will lose power & this VPE will not continue + * so it can simply halt here. + */ + uasm_i_addiu(&p, t0, zero, TCHALT_H); + uasm_i_mtc0(&p, t0, 2, 4); + uasm_build_label(&l, p, lbl_secondary_hang); + uasm_il_b(&p, &r, lbl_secondary_hang); + uasm_i_nop(&p); + } + } + + /* + * This is the point of no return - this VPE will now proceed to + * disable coherence. At this point we *must* be sure that no other + * VPE within the core will interfere with the L1 dcache. + */ + uasm_build_label(&l, p, lbl_disable_coherence); + + /* Invalidate the L1 icache */ + cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].icache, + Index_Invalidate_I, lbl_invicache); + + /* Writeback & invalidate the L1 dcache */ + cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].dcache, + Index_Writeback_Inv_D, lbl_flushdcache); + + /* Completion barrier */ + uasm_i_sync(&p, stype_memory); + uasm_i_ehb(&p); + + /* + * Disable all but self interventions. The load from COHCTL is defined + * by the interAptiv & proAptiv SUMs as ensuring that the operation + * resulting from the preceeding store is complete. + */ + uasm_i_addiu(&p, t0, zero, 1 << cpu_data[cpu].core); + uasm_i_sw(&p, t0, 0, r_pcohctl); + uasm_i_lw(&p, t0, 0, r_pcohctl); + + /* Sync to ensure previous interventions are complete */ + uasm_i_sync(&p, stype_intervention); + uasm_i_ehb(&p); + + /* Disable coherence */ + uasm_i_sw(&p, zero, 0, r_pcohctl); + uasm_i_lw(&p, t0, 0, r_pcohctl); + + if (state >= CPS_PM_CLOCK_GATED) { + err = cps_gen_flush_fsb(&p, &l, &r, &cpu_data[cpu], + lbl_flush_fsb); + if (err) + goto out_err; + + /* Determine the CPC command to issue */ + switch (state) { + case CPS_PM_CLOCK_GATED: + cpc_cmd = CPC_Cx_CMD_CLOCKOFF; + break; + case CPS_PM_POWER_GATED: + cpc_cmd = CPC_Cx_CMD_PWRDOWN; + break; + default: + BUG(); + goto out_err; + } + + /* Issue the CPC command */ + UASM_i_LA(&p, t0, (long)addr_cpc_cl_cmd()); + uasm_i_addiu(&p, t1, zero, cpc_cmd); + uasm_i_sw(&p, t1, 0, t0); + + if (state == CPS_PM_POWER_GATED) { + /* If anything goes wrong just hang */ + uasm_build_label(&l, p, lbl_hang); + uasm_il_b(&p, &r, lbl_hang); + uasm_i_nop(&p); + + /* + * There's no point generating more code, the core is + * powered down & if powered back up will run from the + * reset vector not from here. + */ + goto gen_done; + } + + /* Completion barrier */ + uasm_i_sync(&p, stype_memory); + uasm_i_ehb(&p); + } + + if (state == CPS_PM_NC_WAIT) { + /* + * At this point it is safe for all VPEs to proceed with + * execution. This VPE will set the top bit of ready_count + * to indicate to the other VPEs that they may continue. + */ + if (coupled_coherence) + cps_gen_set_top_bit(&p, &l, &r, r_nc_count, + lbl_set_cont); + + /* + * VPEs which did not disable coherence will continue + * executing, after coherence has been disabled, from this + * point. + */ + uasm_build_label(&l, p, lbl_secondary_cont); + + /* Now perform our wait */ + uasm_i_wait(&p, 0); + } + + /* + * Re-enable coherence. Note that for CPS_PM_NC_WAIT all coupled VPEs + * will run this. The first will actually re-enable coherence & the + * rest will just be performing a rather unusual nop. + */ + uasm_i_addiu(&p, t0, zero, CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK); + uasm_i_sw(&p, t0, 0, r_pcohctl); + uasm_i_lw(&p, t0, 0, r_pcohctl); + + /* Completion barrier */ + uasm_i_sync(&p, stype_memory); + uasm_i_ehb(&p); + + if (coupled_coherence && (state == CPS_PM_NC_WAIT)) { + /* Decrement ready_count */ + uasm_build_label(&l, p, lbl_decready); + uasm_i_sync(&p, stype_ordering); + uasm_i_ll(&p, t1, 0, r_nc_count); + uasm_i_addiu(&p, t2, t1, -1); + uasm_i_sc(&p, t2, 0, r_nc_count); + uasm_il_beqz(&p, &r, t2, lbl_decready); + uasm_i_andi(&p, v0, t1, (1 << fls(smp_num_siblings)) - 1); + + /* Ordering barrier */ + uasm_i_sync(&p, stype_ordering); + } + + if (coupled_coherence && (state == CPS_PM_CLOCK_GATED)) { + /* + * At this point it is safe for all VPEs to proceed with + * execution. This VPE will set the top bit of ready_count + * to indicate to the other VPEs that they may continue. + */ + cps_gen_set_top_bit(&p, &l, &r, r_nc_count, lbl_set_cont); + + /* + * This core will be reliant upon another core sending a + * power-up command to the CPC in order to resume operation. + * Thus an arbitrary VPE can't trigger the core leaving the + * idle state and the one that disables coherence might as well + * be the one to re-enable it. The rest will continue from here + * after that has been done. + */ + uasm_build_label(&l, p, lbl_secondary_cont); + + /* Ordering barrier */ + uasm_i_sync(&p, stype_ordering); + } + + /* The core is coherent, time to return to C code */ + uasm_i_jr(&p, ra); + uasm_i_nop(&p); + +gen_done: + /* Ensure the code didn't exceed the resources allocated for it */ + BUG_ON((p - buf) > max_instrs); + BUG_ON((l - labels) > ARRAY_SIZE(labels)); + BUG_ON((r - relocs) > ARRAY_SIZE(relocs)); + + /* Patch branch offsets */ + uasm_resolve_relocs(relocs, labels); + + /* Flush the icache */ + local_flush_icache_range((unsigned long)buf, (unsigned long)p); + + return buf; +out_err: + kfree(buf); + return NULL; +} + +static int __init cps_gen_core_entries(unsigned cpu) +{ + enum cps_pm_state state; + unsigned core = cpu_data[cpu].core; + unsigned dlinesz = cpu_data[cpu].dcache.linesz; + void *entry_fn, *core_rc; + + for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) { + if (per_cpu(nc_asm_enter, core)[state]) + continue; + if (!test_bit(state, state_support)) + continue; + + entry_fn = cps_gen_entry_code(cpu, state); + if (!entry_fn) { + pr_err("Failed to generate core %u state %u entry\n", + core, state); + clear_bit(state, state_support); + } + + per_cpu(nc_asm_enter, core)[state] = entry_fn; + } + + if (!per_cpu(ready_count, core)) { + core_rc = kmalloc(dlinesz * 2, GFP_KERNEL); + if (!core_rc) { + pr_err("Failed allocate core %u ready_count\n", core); + return -ENOMEM; + } + per_cpu(ready_count_alloc, core) = core_rc; + + /* Ensure ready_count is aligned to a cacheline boundary */ + core_rc += dlinesz - 1; + core_rc = (void *)((unsigned long)core_rc & ~(dlinesz - 1)); + per_cpu(ready_count, core) = core_rc; + } + + return 0; +} + +static int __init cps_pm_init(void) +{ + unsigned cpu; + int err; + + /* Detect appropriate sync types for the system */ + switch (current_cpu_data.cputype) { + case CPU_INTERAPTIV: + case CPU_PROAPTIV: + case CPU_M5150: + case CPU_P5600: + stype_intervention = 0x2; + stype_memory = 0x3; + stype_ordering = 0x10; + break; + + default: + pr_warn("Power management is using heavyweight sync 0\n"); + } + + /* A CM is required for all non-coherent states */ + if (!mips_cm_present()) { + pr_warn("pm-cps: no CM, non-coherent states unavailable\n"); + goto out; + } + + /* + * If interrupts were enabled whilst running a wait instruction on a + * non-coherent core then the VPE may end up processing interrupts + * whilst non-coherent. That would be bad. + */ + if (cpu_wait == r4k_wait_irqoff) + set_bit(CPS_PM_NC_WAIT, state_support); + else + pr_warn("pm-cps: non-coherent wait unavailable\n"); + + /* Detect whether a CPC is present */ + if (mips_cpc_present()) { + /* Detect whether clock gating is implemented */ + if (read_cpc_cl_stat_conf() & CPC_Cx_STAT_CONF_CLKGAT_IMPL_MSK) + set_bit(CPS_PM_CLOCK_GATED, state_support); + else + pr_warn("pm-cps: CPC does not support clock gating\n"); + + /* Power gating is available with CPS SMP & any CPC */ + if (mips_cps_smp_in_use()) + set_bit(CPS_PM_POWER_GATED, state_support); + else + pr_warn("pm-cps: CPS SMP not in use, power gating unavailable\n"); + } else { + pr_warn("pm-cps: no CPC, clock & power gating unavailable\n"); + } + + for_each_present_cpu(cpu) { + err = cps_gen_core_entries(cpu); + if (err) + return err; + } +out: + return 0; +} +arch_initcall(cps_pm_init); diff --git a/arch/mips/kernel/pm.c b/arch/mips/kernel/pm.c new file mode 100644 index 000000000000..fefdf39d3df3 --- /dev/null +++ b/arch/mips/kernel/pm.c @@ -0,0 +1,99 @@ +/* + * Copyright (C) 2014 Imagination Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * CPU PM notifiers for saving/restoring general CPU state. + */ + +#include <linux/cpu_pm.h> +#include <linux/init.h> + +#include <asm/dsp.h> +#include <asm/fpu.h> +#include <asm/mmu_context.h> +#include <asm/pm.h> +#include <asm/watch.h> + +/* Used by PM helper macros in asm/pm.h */ +struct mips_static_suspend_state mips_static_suspend_state; + +/** + * mips_cpu_save() - Save general CPU state. + * Ensures that general CPU context is saved, notably FPU and DSP. + */ +static int mips_cpu_save(void) +{ + /* Save FPU state */ + lose_fpu(1); + + /* Save DSP state */ + save_dsp(current); + + return 0; +} + +/** + * mips_cpu_restore() - Restore general CPU state. + * Restores important CPU context. + */ +static void mips_cpu_restore(void) +{ + unsigned int cpu = smp_processor_id(); + + /* Restore ASID */ + if (current->mm) + write_c0_entryhi(cpu_asid(cpu, current->mm)); + + /* Restore DSP state */ + restore_dsp(current); + + /* Restore UserLocal */ + if (cpu_has_userlocal) + write_c0_userlocal(current_thread_info()->tp_value); + + /* Restore watch registers */ + __restore_watch(); +} + +/** + * mips_pm_notifier() - Notifier for preserving general CPU context. + * @self: Notifier block. + * @cmd: CPU PM event. + * @v: Private data (unused). + * + * This is called when a CPU power management event occurs, and is used to + * ensure that important CPU context is preserved across a CPU power down. + */ +static int mips_pm_notifier(struct notifier_block *self, unsigned long cmd, + void *v) +{ + int ret; + + switch (cmd) { + case CPU_PM_ENTER: + ret = mips_cpu_save(); + if (ret) + return NOTIFY_STOP; + break; + case CPU_PM_ENTER_FAILED: + case CPU_PM_EXIT: + mips_cpu_restore(); + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block mips_pm_notifier_block = { + .notifier_call = mips_pm_notifier, +}; + +static int __init mips_pm_init(void) +{ + return cpu_pm_register_notifier(&mips_pm_notifier_block); +} +arch_initcall(mips_pm_init); diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index 8c58d8a84bf3..037a44d962f3 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c @@ -17,8 +17,24 @@ unsigned int vced_count, vcei_count; +/* + * * No lock; only written during early bootup by CPU 0. + * */ +static RAW_NOTIFIER_HEAD(proc_cpuinfo_chain); + +int __ref register_proc_cpuinfo_notifier(struct notifier_block *nb) +{ + return raw_notifier_chain_register(&proc_cpuinfo_chain, nb); +} + +int proc_cpuinfo_notifier_call_chain(unsigned long val, void *v) +{ + return raw_notifier_call_chain(&proc_cpuinfo_chain, val, v); +} + static int show_cpuinfo(struct seq_file *m, void *v) { + struct proc_cpuinfo_notifier_args proc_cpuinfo_notifier_args; unsigned long n = (unsigned long) v - 1; unsigned int version = cpu_data[n].processor_id; unsigned int fp_vers = cpu_data[n].fpu_id; @@ -65,26 +81,25 @@ static int show_cpuinfo(struct seq_file *m, void *v) cpu_data[n].watch_reg_masks[i]); seq_printf(m, "]\n"); } - if (cpu_has_mips_r) { - seq_printf(m, "isa\t\t\t: mips1"); - if (cpu_has_mips_2) - seq_printf(m, "%s", " mips2"); - if (cpu_has_mips_3) - seq_printf(m, "%s", " mips3"); - if (cpu_has_mips_4) - seq_printf(m, "%s", " mips4"); - if (cpu_has_mips_5) - seq_printf(m, "%s", " mips5"); - if (cpu_has_mips32r1) - seq_printf(m, "%s", " mips32r1"); - if (cpu_has_mips32r2) - seq_printf(m, "%s", " mips32r2"); - if (cpu_has_mips64r1) - seq_printf(m, "%s", " mips64r1"); - if (cpu_has_mips64r2) - seq_printf(m, "%s", " mips64r2"); - seq_printf(m, "\n"); - } + + seq_printf(m, "isa\t\t\t: mips1"); + if (cpu_has_mips_2) + seq_printf(m, "%s", " mips2"); + if (cpu_has_mips_3) + seq_printf(m, "%s", " mips3"); + if (cpu_has_mips_4) + seq_printf(m, "%s", " mips4"); + if (cpu_has_mips_5) + seq_printf(m, "%s", " mips5"); + if (cpu_has_mips32r1) + seq_printf(m, "%s", " mips32r1"); + if (cpu_has_mips32r2) + seq_printf(m, "%s", " mips32r2"); + if (cpu_has_mips64r1) + seq_printf(m, "%s", " mips64r1"); + if (cpu_has_mips64r2) + seq_printf(m, "%s", " mips64r2"); + seq_printf(m, "\n"); seq_printf(m, "ASEs implemented\t:"); if (cpu_has_mips16) seq_printf(m, "%s", " mips16"); @@ -96,6 +111,8 @@ static int show_cpuinfo(struct seq_file *m, void *v) if (cpu_has_mipsmt) seq_printf(m, "%s", " mt"); if (cpu_has_mmips) seq_printf(m, "%s", " micromips"); if (cpu_has_vz) seq_printf(m, "%s", " vz"); + if (cpu_has_msa) seq_printf(m, "%s", " msa"); + if (cpu_has_eva) seq_printf(m, "%s", " eva"); seq_printf(m, "\n"); if (cpu_has_mmips) { @@ -112,6 +129,13 @@ static int show_cpuinfo(struct seq_file *m, void *v) cpu_has_vce ? "%u" : "not available"); seq_printf(m, fmt, 'D', vced_count); seq_printf(m, fmt, 'I', vcei_count); + + proc_cpuinfo_notifier_args.m = m; + proc_cpuinfo_notifier_args.n = n; + + raw_notifier_call_chain(&proc_cpuinfo_chain, 0, + &proc_cpuinfo_notifier_args); + seq_printf(m, "\n"); return 0; diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index ddc76103e78c..0a1ec0f3beff 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -32,6 +32,7 @@ #include <asm/cpu.h> #include <asm/dsp.h> #include <asm/fpu.h> +#include <asm/msa.h> #include <asm/pgtable.h> #include <asm/mipsregs.h> #include <asm/processor.h> @@ -60,15 +61,13 @@ void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp) /* New thread loses kernel privileges. */ status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_FR|KU_MASK); -#ifdef CONFIG_64BIT - status |= test_thread_flag(TIF_32BIT_REGS) ? 0 : ST0_FR; -#endif status |= KU_USER; regs->cp0_status = status; clear_used_math(); clear_fpu_owner(); - if (cpu_has_dsp) - __init_dsp(); + init_dsp(); + clear_thread_flag(TIF_MSA_CTX_LIVE); + disable_msa(); regs->cp0_epc = pc; regs->regs[29] = sp; } @@ -93,7 +92,9 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, preempt_disable(); - if (is_fpu_owner()) + if (is_msa_enabled()) + save_msa(p); + else if (is_fpu_owner()) save_fp(p); if (cpu_has_dsp) @@ -139,13 +140,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, */ childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); -#ifdef CONFIG_MIPS_MT_SMTC - /* - * SMTC restores TCStatus after Status, and the CU bits - * are aliased there. - */ - childregs->cp0_tcstatus &= ~(ST0_CU2|ST0_CU1); -#endif clear_tsk_thread_flag(p, TIF_USEDFPU); #ifdef CONFIG_MIPS_MT_FPAFF @@ -161,7 +155,13 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, /* Fill in the fpu structure for a core dump.. */ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r) { - memcpy(r, ¤t->thread.fpu, sizeof(current->thread.fpu)); + int i; + + for (i = 0; i < NUM_FPU_REGS; i++) + memcpy(&r[i], ¤t->thread.fpu.fpr[i], sizeof(*r)); + + memcpy(&r[NUM_FPU_REGS], ¤t->thread.fpu.fcr31, + sizeof(current->thread.fpu.fcr31)); return 1; } @@ -196,7 +196,13 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) int dump_task_fpu(struct task_struct *t, elf_fpregset_t *fpr) { - memcpy(fpr, &t->thread.fpu, sizeof(current->thread.fpu)); + int i; + + for (i = 0; i < NUM_FPU_REGS; i++) + memcpy(&fpr[i], &t->thread.fpu.fpr[i], sizeof(*fpr)); + + memcpy(&fpr[NUM_FPU_REGS], &t->thread.fpu.fcr31, + sizeof(t->thread.fpu.fcr31)); return 1; } diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c index 3c3b0df8f48d..5d39bb85bf35 100644 --- a/arch/mips/kernel/prom.c +++ b/arch/mips/kernel/prom.c @@ -47,7 +47,7 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS)); } -void __init __dt_setup_arch(struct boot_param_header *bph) +void __init __dt_setup_arch(void *bph) { if (!early_init_dt_scan(bph)) return; diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index b52e1d2b33e0..f639ccd5060c 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -114,51 +114,30 @@ int ptrace_setregs(struct task_struct *child, __s64 __user *data) int ptrace_getfpregs(struct task_struct *child, __u32 __user *data) { int i; - unsigned int tmp; if (!access_ok(VERIFY_WRITE, data, 33 * 8)) return -EIO; if (tsk_used_math(child)) { - fpureg_t *fregs = get_fpu_regs(child); + union fpureg *fregs = get_fpu_regs(child); for (i = 0; i < 32; i++) - __put_user(fregs[i], i + (__u64 __user *) data); + __put_user(get_fpr64(&fregs[i], 0), + i + (__u64 __user *)data); } else { for (i = 0; i < 32; i++) __put_user((__u64) -1, i + (__u64 __user *) data); } __put_user(child->thread.fpu.fcr31, data + 64); - - preempt_disable(); - if (cpu_has_fpu) { - unsigned int flags; - - if (cpu_has_mipsmt) { - unsigned int vpflags = dvpe(); - flags = read_c0_status(); - __enable_fpu(); - __asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp)); - write_c0_status(flags); - evpe(vpflags); - } else { - flags = read_c0_status(); - __enable_fpu(); - __asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp)); - write_c0_status(flags); - } - } else { - tmp = 0; - } - preempt_enable(); - __put_user(tmp, data + 65); + __put_user(current_cpu_data.fpu_id, data + 65); return 0; } int ptrace_setfpregs(struct task_struct *child, __u32 __user *data) { - fpureg_t *fregs; + union fpureg *fregs; + u64 fpr_val; int i; if (!access_ok(VERIFY_READ, data, 33 * 8)) @@ -166,8 +145,10 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data) fregs = get_fpu_regs(child); - for (i = 0; i < 32; i++) - __get_user(fregs[i], i + (__u64 __user *) data); + for (i = 0; i < 32; i++) { + __get_user(fpr_val, i + (__u64 __user *)data); + set_fpr64(&fregs[i], 0, fpr_val); + } __get_user(child->thread.fpu.fcr31, data + 64); @@ -182,7 +163,7 @@ int ptrace_get_watch_regs(struct task_struct *child, enum pt_watch_style style; int i; - if (!cpu_has_watch || current_cpu_data.watch_reg_use_cnt == 0) + if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0) return -EIO; if (!access_ok(VERIFY_WRITE, addr, sizeof(struct pt_watch_regs))) return -EIO; @@ -196,14 +177,14 @@ int ptrace_get_watch_regs(struct task_struct *child, #endif __put_user(style, &addr->style); - __put_user(current_cpu_data.watch_reg_use_cnt, + __put_user(boot_cpu_data.watch_reg_use_cnt, &addr->WATCH_STYLE.num_valid); - for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) { + for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) { __put_user(child->thread.watch.mips3264.watchlo[i], &addr->WATCH_STYLE.watchlo[i]); __put_user(child->thread.watch.mips3264.watchhi[i] & 0xfff, &addr->WATCH_STYLE.watchhi[i]); - __put_user(current_cpu_data.watch_reg_masks[i], + __put_user(boot_cpu_data.watch_reg_masks[i], &addr->WATCH_STYLE.watch_masks[i]); } for (; i < 8; i++) { @@ -223,12 +204,12 @@ int ptrace_set_watch_regs(struct task_struct *child, unsigned long lt[NUM_WATCH_REGS]; u16 ht[NUM_WATCH_REGS]; - if (!cpu_has_watch || current_cpu_data.watch_reg_use_cnt == 0) + if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0) return -EIO; if (!access_ok(VERIFY_READ, addr, sizeof(struct pt_watch_regs))) return -EIO; /* Check the values. */ - for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) { + for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) { __get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]); #ifdef CONFIG_32BIT if (lt[i] & __UA_LIMIT) @@ -247,7 +228,7 @@ int ptrace_set_watch_regs(struct task_struct *child, return -EINVAL; } /* Install them. */ - for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) { + for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) { if (lt[i] & 7) watch_active = 1; child->thread.watch.mips3264.watchlo[i] = lt[i]; @@ -300,10 +281,27 @@ static int fpr_get(struct task_struct *target, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { - return user_regset_copyout(&pos, &count, &kbuf, &ubuf, - &target->thread.fpu, - 0, sizeof(elf_fpregset_t)); + unsigned i; + int err; + u64 fpr_val; + /* XXX fcr31 */ + + if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t)) + return user_regset_copyout(&pos, &count, &kbuf, &ubuf, + &target->thread.fpu, + 0, sizeof(elf_fpregset_t)); + + for (i = 0; i < NUM_FPU_REGS; i++) { + fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0); + err = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + &fpr_val, i * sizeof(elf_fpreg_t), + (i + 1) * sizeof(elf_fpreg_t)); + if (err) + return err; + } + + return 0; } static int fpr_set(struct task_struct *target, @@ -311,10 +309,27 @@ static int fpr_set(struct task_struct *target, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { - return user_regset_copyin(&pos, &count, &kbuf, &ubuf, - &target->thread.fpu, - 0, sizeof(elf_fpregset_t)); + unsigned i; + int err; + u64 fpr_val; + /* XXX fcr31 */ + + if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t)) + return user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.fpu, + 0, sizeof(elf_fpregset_t)); + + for (i = 0; i < NUM_FPU_REGS; i++) { + err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &fpr_val, i * sizeof(elf_fpreg_t), + (i + 1) * sizeof(elf_fpreg_t)); + if (err) + return err; + set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val); + } + + return 0; } enum mips_regset { @@ -408,6 +423,7 @@ long arch_ptrace(struct task_struct *child, long request, /* Read the word at location addr in the USER area. */ case PTRACE_PEEKUSR: { struct pt_regs *regs; + union fpureg *fregs; unsigned long tmp = 0; regs = task_pt_regs(child); @@ -418,26 +434,26 @@ long arch_ptrace(struct task_struct *child, long request, tmp = regs->regs[addr]; break; case FPR_BASE ... FPR_BASE + 31: - if (tsk_used_math(child)) { - fpureg_t *fregs = get_fpu_regs(child); + if (!tsk_used_math(child)) { + /* FP not yet used */ + tmp = -1; + break; + } + fregs = get_fpu_regs(child); #ifdef CONFIG_32BIT + if (test_thread_flag(TIF_32BIT_FPREGS)) { /* * The odd registers are actually the high * order bits of the values stored in the even * registers - unless we're using r2k_switch.S. */ - if (addr & 1) - tmp = (unsigned long) (fregs[((addr & ~1) - 32)] >> 32); - else - tmp = (unsigned long) (fregs[(addr - 32)] & 0xffffffff); -#endif -#ifdef CONFIG_64BIT - tmp = fregs[addr - FPR_BASE]; -#endif - } else { - tmp = -1; /* FP not yet used */ + tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE], + addr & 1); + break; } +#endif + tmp = get_fpr32(&fregs[addr - FPR_BASE], 0); break; case PC: tmp = regs->cp0_epc; @@ -462,44 +478,10 @@ long arch_ptrace(struct task_struct *child, long request, case FPC_CSR: tmp = child->thread.fpu.fcr31; break; - case FPC_EIR: { /* implementation / version register */ - unsigned int flags; -#ifdef CONFIG_MIPS_MT_SMTC - unsigned long irqflags; - unsigned int mtflags; -#endif /* CONFIG_MIPS_MT_SMTC */ - - preempt_disable(); - if (!cpu_has_fpu) { - preempt_enable(); - break; - } - -#ifdef CONFIG_MIPS_MT_SMTC - /* Read-modify-write of Status must be atomic */ - local_irq_save(irqflags); - mtflags = dmt(); -#endif /* CONFIG_MIPS_MT_SMTC */ - if (cpu_has_mipsmt) { - unsigned int vpflags = dvpe(); - flags = read_c0_status(); - __enable_fpu(); - __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp)); - write_c0_status(flags); - evpe(vpflags); - } else { - flags = read_c0_status(); - __enable_fpu(); - __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp)); - write_c0_status(flags); - } -#ifdef CONFIG_MIPS_MT_SMTC - emt(mtflags); - local_irq_restore(irqflags); -#endif /* CONFIG_MIPS_MT_SMTC */ - preempt_enable(); + case FPC_EIR: + /* implementation / version register */ + tmp = current_cpu_data.fpu_id; break; - } case DSP_BASE ... DSP_BASE + 5: { dspreg_t *dregs; @@ -545,7 +527,7 @@ long arch_ptrace(struct task_struct *child, long request, regs->regs[addr] = data; break; case FPR_BASE ... FPR_BASE + 31: { - fpureg_t *fregs = get_fpu_regs(child); + union fpureg *fregs = get_fpu_regs(child); if (!tsk_used_math(child)) { /* FP not yet used */ @@ -554,22 +536,18 @@ long arch_ptrace(struct task_struct *child, long request, child->thread.fpu.fcr31 = 0; } #ifdef CONFIG_32BIT - /* - * The odd registers are actually the high order bits - * of the values stored in the even registers - unless - * we're using r2k_switch.S. - */ - if (addr & 1) { - fregs[(addr & ~1) - FPR_BASE] &= 0xffffffff; - fregs[(addr & ~1) - FPR_BASE] |= ((unsigned long long) data) << 32; - } else { - fregs[addr - FPR_BASE] &= ~0xffffffffLL; - fregs[addr - FPR_BASE] |= data; + if (test_thread_flag(TIF_32BIT_FPREGS)) { + /* + * The odd registers are actually the high + * order bits of the values stored in the even + * registers - unless we're using r2k_switch.S. + */ + set_fpr32(&fregs[(addr & ~1) - FPR_BASE], + addr & 1, data); + break; } #endif -#ifdef CONFIG_64BIT - fregs[addr - FPR_BASE] = data; -#endif + set_fpr64(&fregs[addr - FPR_BASE], 0, data); break; } case PC: @@ -656,13 +634,13 @@ long arch_ptrace(struct task_struct *child, long request, * Notification of system call entry/exit * - triggered by current->work.syscall_trace */ -asmlinkage void syscall_trace_enter(struct pt_regs *regs) +asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall) { long ret = 0; user_exit(); - /* do the secure computing check first */ - secure_computing_strict(regs->regs[2]); + if (secure_computing(syscall) == -1) + return -1; if (test_thread_flag(TIF_SYSCALL_TRACE) && tracehook_report_syscall_entry(regs)) @@ -671,10 +649,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs) if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) trace_sys_enter(regs, regs->regs[2]); - audit_syscall_entry(__syscall_get_arch(), - regs->regs[2], + audit_syscall_entry(syscall_get_arch(), + syscall, regs->regs[4], regs->regs[5], regs->regs[6], regs->regs[7]); + return syscall; } /* diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c index 9486055ba660..b40c3ca60ee5 100644 --- a/arch/mips/kernel/ptrace32.c +++ b/arch/mips/kernel/ptrace32.c @@ -80,6 +80,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, /* Read the word at location addr in the USER area. */ case PTRACE_PEEKUSR: { struct pt_regs *regs; + union fpureg *fregs; unsigned int tmp; regs = task_pt_regs(child); @@ -90,21 +91,23 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, tmp = regs->regs[addr]; break; case FPR_BASE ... FPR_BASE + 31: - if (tsk_used_math(child)) { - fpureg_t *fregs = get_fpu_regs(child); - + if (!tsk_used_math(child)) { + /* FP not yet used */ + tmp = -1; + break; + } + fregs = get_fpu_regs(child); + if (test_thread_flag(TIF_32BIT_FPREGS)) { /* * The odd registers are actually the high * order bits of the values stored in the even * registers - unless we're using r2k_switch.S. */ - if (addr & 1) - tmp = (unsigned long) (fregs[((addr & ~1) - 32)] >> 32); - else - tmp = (unsigned long) (fregs[(addr - 32)] & 0xffffffff); - } else { - tmp = -1; /* FP not yet used */ + tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE], + addr & 1); + break; } + tmp = get_fpr32(&fregs[addr - FPR_BASE], 0); break; case PC: tmp = regs->cp0_epc; @@ -124,46 +127,10 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, case FPC_CSR: tmp = child->thread.fpu.fcr31; break; - case FPC_EIR: { /* implementation / version register */ - unsigned int flags; -#ifdef CONFIG_MIPS_MT_SMTC - unsigned int irqflags; - unsigned int mtflags; -#endif /* CONFIG_MIPS_MT_SMTC */ - - preempt_disable(); - if (!cpu_has_fpu) { - preempt_enable(); - tmp = 0; - break; - } - -#ifdef CONFIG_MIPS_MT_SMTC - /* Read-modify-write of Status must be atomic */ - local_irq_save(irqflags); - mtflags = dmt(); -#endif /* CONFIG_MIPS_MT_SMTC */ - - if (cpu_has_mipsmt) { - unsigned int vpflags = dvpe(); - flags = read_c0_status(); - __enable_fpu(); - __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp)); - write_c0_status(flags); - evpe(vpflags); - } else { - flags = read_c0_status(); - __enable_fpu(); - __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp)); - write_c0_status(flags); - } -#ifdef CONFIG_MIPS_MT_SMTC - emt(mtflags); - local_irq_restore(irqflags); -#endif /* CONFIG_MIPS_MT_SMTC */ - preempt_enable(); + case FPC_EIR: + /* implementation / version register */ + tmp = current_cpu_data.fpu_id; break; - } case DSP_BASE ... DSP_BASE + 5: { dspreg_t *dregs; @@ -228,7 +195,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, regs->regs[addr] = data; break; case FPR_BASE ... FPR_BASE + 31: { - fpureg_t *fregs = get_fpu_regs(child); + union fpureg *fregs = get_fpu_regs(child); if (!tsk_used_math(child)) { /* FP not yet used */ @@ -236,20 +203,17 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, sizeof(child->thread.fpu)); child->thread.fpu.fcr31 = 0; } - /* - * The odd registers are actually the high order bits - * of the values stored in the even registers - unless - * we're using r2k_switch.S. - */ - if (addr & 1) { - fregs[(addr & ~1) - FPR_BASE] &= 0xffffffff; - fregs[(addr & ~1) - FPR_BASE] |= ((unsigned long long) data) << 32; - } else { - fregs[addr - FPR_BASE] &= ~0xffffffffLL; - /* Must cast, lest sign extension fill upper - bits! */ - fregs[addr - FPR_BASE] |= (unsigned int)data; + if (test_thread_flag(TIF_32BIT_FPREGS)) { + /* + * The odd registers are actually the high + * order bits of the values stored in the even + * registers - unless we're using r2k_switch.S. + */ + set_fpr32(&fregs[(addr & ~1) - FPR_BASE], + addr & 1, data); + break; } + set_fpr64(&fregs[addr - FPR_BASE], 0, data); break; } case PC: diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S index 55ffe149dae9..8352523568e6 100644 --- a/arch/mips/kernel/r4k_fpu.S +++ b/arch/mips/kernel/r4k_fpu.S @@ -30,12 +30,20 @@ .endm .set noreorder - .set mips3 + .set arch=r4000 LEAF(_save_fp_context) cfc1 t1, fcr31 -#ifdef CONFIG_64BIT +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) + .set push +#ifdef CONFIG_CPU_MIPS32_R2 + .set mips64r2 + mfc0 t0, CP0_STATUS + sll t0, t0, 5 + bgez t0, 1f # skip storing odd if FR=0 + nop +#endif /* Store the 16 odd double precision registers */ EX sdc1 $f1, SC_FPREGS+8(a0) EX sdc1 $f3, SC_FPREGS+24(a0) @@ -53,6 +61,7 @@ LEAF(_save_fp_context) EX sdc1 $f27, SC_FPREGS+216(a0) EX sdc1 $f29, SC_FPREGS+232(a0) EX sdc1 $f31, SC_FPREGS+248(a0) +1: .set pop #endif /* Store the 16 even double precision registers */ @@ -82,7 +91,31 @@ LEAF(_save_fp_context) LEAF(_save_fp_context32) cfc1 t1, fcr31 - EX sdc1 $f0, SC32_FPREGS+0(a0) + mfc0 t0, CP0_STATUS + sll t0, t0, 5 + bgez t0, 1f # skip storing odd if FR=0 + nop + + /* Store the 16 odd double precision registers */ + EX sdc1 $f1, SC32_FPREGS+8(a0) + EX sdc1 $f3, SC32_FPREGS+24(a0) + EX sdc1 $f5, SC32_FPREGS+40(a0) + EX sdc1 $f7, SC32_FPREGS+56(a0) + EX sdc1 $f9, SC32_FPREGS+72(a0) + EX sdc1 $f11, SC32_FPREGS+88(a0) + EX sdc1 $f13, SC32_FPREGS+104(a0) + EX sdc1 $f15, SC32_FPREGS+120(a0) + EX sdc1 $f17, SC32_FPREGS+136(a0) + EX sdc1 $f19, SC32_FPREGS+152(a0) + EX sdc1 $f21, SC32_FPREGS+168(a0) + EX sdc1 $f23, SC32_FPREGS+184(a0) + EX sdc1 $f25, SC32_FPREGS+200(a0) + EX sdc1 $f27, SC32_FPREGS+216(a0) + EX sdc1 $f29, SC32_FPREGS+232(a0) + EX sdc1 $f31, SC32_FPREGS+248(a0) + + /* Store the 16 even double precision registers */ +1: EX sdc1 $f0, SC32_FPREGS+0(a0) EX sdc1 $f2, SC32_FPREGS+16(a0) EX sdc1 $f4, SC32_FPREGS+32(a0) EX sdc1 $f6, SC32_FPREGS+48(a0) @@ -113,8 +146,17 @@ LEAF(_save_fp_context32) * - cp1 status/control register */ LEAF(_restore_fp_context) - EX lw t0, SC_FPC_CSR(a0) -#ifdef CONFIG_64BIT + EX lw t1, SC_FPC_CSR(a0) + +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) + .set push +#ifdef CONFIG_CPU_MIPS32_R2 + .set mips64r2 + mfc0 t0, CP0_STATUS + sll t0, t0, 5 + bgez t0, 1f # skip loading odd if FR=0 + nop +#endif EX ldc1 $f1, SC_FPREGS+8(a0) EX ldc1 $f3, SC_FPREGS+24(a0) EX ldc1 $f5, SC_FPREGS+40(a0) @@ -131,6 +173,7 @@ LEAF(_restore_fp_context) EX ldc1 $f27, SC_FPREGS+216(a0) EX ldc1 $f29, SC_FPREGS+232(a0) EX ldc1 $f31, SC_FPREGS+248(a0) +1: .set pop #endif EX ldc1 $f0, SC_FPREGS+0(a0) EX ldc1 $f2, SC_FPREGS+16(a0) @@ -148,7 +191,7 @@ LEAF(_restore_fp_context) EX ldc1 $f26, SC_FPREGS+208(a0) EX ldc1 $f28, SC_FPREGS+224(a0) EX ldc1 $f30, SC_FPREGS+240(a0) - ctc1 t0, fcr31 + ctc1 t1, fcr31 jr ra li v0, 0 # success END(_restore_fp_context) @@ -156,8 +199,31 @@ LEAF(_restore_fp_context) #ifdef CONFIG_MIPS32_COMPAT LEAF(_restore_fp_context32) /* Restore an o32 sigcontext. */ - EX lw t0, SC32_FPC_CSR(a0) - EX ldc1 $f0, SC32_FPREGS+0(a0) + EX lw t1, SC32_FPC_CSR(a0) + + mfc0 t0, CP0_STATUS + sll t0, t0, 5 + bgez t0, 1f # skip loading odd if FR=0 + nop + + EX ldc1 $f1, SC32_FPREGS+8(a0) + EX ldc1 $f3, SC32_FPREGS+24(a0) + EX ldc1 $f5, SC32_FPREGS+40(a0) + EX ldc1 $f7, SC32_FPREGS+56(a0) + EX ldc1 $f9, SC32_FPREGS+72(a0) + EX ldc1 $f11, SC32_FPREGS+88(a0) + EX ldc1 $f13, SC32_FPREGS+104(a0) + EX ldc1 $f15, SC32_FPREGS+120(a0) + EX ldc1 $f17, SC32_FPREGS+136(a0) + EX ldc1 $f19, SC32_FPREGS+152(a0) + EX ldc1 $f21, SC32_FPREGS+168(a0) + EX ldc1 $f23, SC32_FPREGS+184(a0) + EX ldc1 $f25, SC32_FPREGS+200(a0) + EX ldc1 $f27, SC32_FPREGS+216(a0) + EX ldc1 $f29, SC32_FPREGS+232(a0) + EX ldc1 $f31, SC32_FPREGS+248(a0) + +1: EX ldc1 $f0, SC32_FPREGS+0(a0) EX ldc1 $f2, SC32_FPREGS+16(a0) EX ldc1 $f4, SC32_FPREGS+32(a0) EX ldc1 $f6, SC32_FPREGS+48(a0) @@ -173,7 +239,7 @@ LEAF(_restore_fp_context32) EX ldc1 $f26, SC32_FPREGS+208(a0) EX ldc1 $f28, SC32_FPREGS+224(a0) EX ldc1 $f30, SC32_FPREGS+240(a0) - ctc1 t0, fcr31 + ctc1 t1, fcr31 jr ra li v0, 0 # success END(_restore_fp_context32) diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S index 078de5eaca8f..81ca3f70fe29 100644 --- a/arch/mips/kernel/r4k_switch.S +++ b/arch/mips/kernel/r4k_switch.S @@ -28,19 +28,10 @@ */ #define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS) -/* - * FPU context is saved iff the process has used it's FPU in the current - * time slice as indicated by _TIF_USEDFPU. In any case, the CU1 bit for user - * space STATUS register should be 0, so that a process *always* starts its - * userland with FPU disabled after each context switch. - * - * FPU will be enabled as soon as the process accesses FPU again, through - * do_cpu() trap. - */ - +#ifndef USE_ALTERNATE_RESUME_IMPL /* * task_struct *resume(task_struct *prev, task_struct *next, - * struct thread_info *next_ti, int usedfpu) + * struct thread_info *next_ti, s32 fp_save) */ .align 5 LEAF(resume) @@ -50,23 +41,37 @@ LONG_S ra, THREAD_REG31(a0) /* - * check if we need to save FPU registers + * Check whether we need to save any FP context. FP context is saved + * iff the process has used the context with the scalar FPU or the MSA + * ASE in the current time slice, as indicated by _TIF_USEDFPU and + * _TIF_USEDMSA respectively. switch_to will have set fp_save + * accordingly to an FP_SAVE_ enum value. */ + beqz a3, 2f - beqz a3, 1f - - PTR_L t3, TASK_THREAD_INFO(a0) /* - * clear saved user stack CU1 bit + * We do. Clear the saved CU1 bit for prev, such that next time it is + * scheduled it will start in userland with the FPU disabled. If the + * task uses the FPU then it will be enabled again via the do_cpu trap. + * This allows us to lazily restore the FP context. */ + PTR_L t3, TASK_THREAD_INFO(a0) LONG_L t0, ST_OFF(t3) li t1, ~ST0_CU1 and t0, t0, t1 LONG_S t0, ST_OFF(t3) + /* Check whether we're saving scalar or vector context. */ + bgtz a3, 1f + + /* Save 128b MSA vector context. */ + msa_save_all a0 + b 2f + +1: /* Save 32b/64b scalar FP context. */ fpu_save_double a0 t0 t1 # c0_status passed in t0 # clobbers t1 -1: +2: #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) PTR_LA t8, __stack_chk_guard @@ -83,18 +88,6 @@ PTR_ADDU t0, $28, _THREAD_SIZE - 32 set_saved_sp t0, t1, t2 -#ifdef CONFIG_MIPS_MT_SMTC - /* Read-modify-writes of Status must be atomic on a VPE */ - mfc0 t2, CP0_TCSTATUS - ori t1, t2, TCSTATUS_IXMT - mtc0 t1, CP0_TCSTATUS - andi t2, t2, TCSTATUS_IXMT - _ehb - DMT 8 # dmt t0 - move t1,ra - jal mips_ihb - move ra,t1 -#endif /* CONFIG_MIPS_MT_SMTC */ mfc0 t1, CP0_STATUS /* Do we really need this? */ li a3, 0xff01 and t1, a3 @@ -103,27 +96,17 @@ and a2, a3 or a2, t1 mtc0 a2, CP0_STATUS -#ifdef CONFIG_MIPS_MT_SMTC - _ehb - andi t0, t0, VPECONTROL_TE - beqz t0, 1f - emt -1: - mfc0 t1, CP0_TCSTATUS - xori t1, t1, TCSTATUS_IXMT - or t1, t1, t2 - mtc0 t1, CP0_TCSTATUS - _ehb -#endif /* CONFIG_MIPS_MT_SMTC */ move v0, a0 jr ra END(resume) +#endif /* USE_ALTERNATE_RESUME_IMPL */ + /* * Save a thread's fp context. */ LEAF(_save_fp) -#ifdef CONFIG_64BIT +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) mfc0 t0, CP0_STATUS #endif fpu_save_double a0 t0 t1 # clobbers t1 @@ -134,13 +117,33 @@ LEAF(_save_fp) * Restore a thread's fp context. */ LEAF(_restore_fp) -#ifdef CONFIG_64BIT +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) mfc0 t0, CP0_STATUS #endif fpu_restore_double a0 t0 t1 # clobbers t1 jr ra END(_restore_fp) +#ifdef CONFIG_CPU_HAS_MSA + +/* + * Save a thread's MSA vector context. + */ +LEAF(_save_msa) + msa_save_all a0 + jr ra + END(_save_msa) + +/* + * Restore a thread's MSA vector context. + */ +LEAF(_restore_msa) + msa_restore_all a0 + jr ra + END(_restore_msa) + +#endif + /* * Load the FPU with signalling NANS. This bit pattern we're using has * the property that no matter whether considered as single or as double @@ -152,19 +155,10 @@ LEAF(_restore_fp) #define FPU_DEFAULT 0x00000000 LEAF(_init_fpu) -#ifdef CONFIG_MIPS_MT_SMTC - /* Rather than manipulate per-VPE Status, set per-TC bit in TCStatus */ - mfc0 t0, CP0_TCSTATUS - /* Bit position is the same for Status, TCStatus */ - li t1, ST0_CU1 - or t0, t1 - mtc0 t0, CP0_TCSTATUS -#else /* Normal MIPS CU1 enable */ mfc0 t0, CP0_STATUS li t1, ST0_CU1 or t0, t1 mtc0 t0, CP0_STATUS -#endif /* CONFIG_MIPS_MT_SMTC */ enable_fpu_hazard li t1, FPU_DEFAULT @@ -228,8 +222,49 @@ LEAF(_init_fpu) mtc1 t1, $f29 mtc1 t1, $f30 mtc1 t1, $f31 + +#ifdef CONFIG_CPU_MIPS32_R2 + .set push + .set mips64r2 + sll t0, t0, 5 # is Status.FR set? + bgez t0, 1f # no: skip setting upper 32b + + mthc1 t1, $f0 + mthc1 t1, $f1 + mthc1 t1, $f2 + mthc1 t1, $f3 + mthc1 t1, $f4 + mthc1 t1, $f5 + mthc1 t1, $f6 + mthc1 t1, $f7 + mthc1 t1, $f8 + mthc1 t1, $f9 + mthc1 t1, $f10 + mthc1 t1, $f11 + mthc1 t1, $f12 + mthc1 t1, $f13 + mthc1 t1, $f14 + mthc1 t1, $f15 + mthc1 t1, $f16 + mthc1 t1, $f17 + mthc1 t1, $f18 + mthc1 t1, $f19 + mthc1 t1, $f20 + mthc1 t1, $f21 + mthc1 t1, $f22 + mthc1 t1, $f23 + mthc1 t1, $f24 + mthc1 t1, $f25 + mthc1 t1, $f26 + mthc1 t1, $f27 + mthc1 t1, $f28 + mthc1 t1, $f29 + mthc1 t1, $f30 + mthc1 t1, $f31 +1: .set pop +#endif /* CONFIG_CPU_MIPS32_R2 */ #else - .set mips3 + .set arch=r4000 dmtc1 t1, $f0 dmtc1 t1, $f2 dmtc1 t1, $f4 diff --git a/arch/mips/kernel/rtlx-cmp.c b/arch/mips/kernel/rtlx-cmp.c new file mode 100644 index 000000000000..758fb3cd2326 --- /dev/null +++ b/arch/mips/kernel/rtlx-cmp.c @@ -0,0 +1,119 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved. + * Copyright (C) 2013 Imagination Technologies Ltd. + */ +#include <linux/device.h> +#include <linux/fs.h> +#include <linux/err.h> +#include <linux/wait.h> +#include <linux/sched.h> +#include <linux/smp.h> + +#include <asm/mips_mt.h> +#include <asm/vpe.h> +#include <asm/rtlx.h> + +static int major; + +static void rtlx_interrupt(void) +{ + int i; + struct rtlx_info *info; + struct rtlx_info **p = vpe_get_shared(aprp_cpu_index()); + + if (p == NULL || *p == NULL) + return; + + info = *p; + + if (info->ap_int_pending == 1 && smp_processor_id() == 0) { + for (i = 0; i < RTLX_CHANNELS; i++) { + wake_up(&channel_wqs[i].lx_queue); + wake_up(&channel_wqs[i].rt_queue); + } + info->ap_int_pending = 0; + } +} + +void _interrupt_sp(void) +{ + smp_send_reschedule(aprp_cpu_index()); +} + +int __init rtlx_module_init(void) +{ + struct device *dev; + int i, err; + + if (!cpu_has_mipsmt) { + pr_warn("VPE loader: not a MIPS MT capable processor\n"); + return -ENODEV; + } + + if (num_possible_cpus() - aprp_cpu_index() < 1) { + pr_warn("No TCs reserved for AP/SP, not initializing RTLX.\n" + "Pass maxcpus=<n> argument as kernel argument\n"); + + return -ENODEV; + } + + major = register_chrdev(0, RTLX_MODULE_NAME, &rtlx_fops); + if (major < 0) { + pr_err("rtlx_module_init: unable to register device\n"); + return major; + } + + /* initialise the wait queues */ + for (i = 0; i < RTLX_CHANNELS; i++) { + init_waitqueue_head(&channel_wqs[i].rt_queue); + init_waitqueue_head(&channel_wqs[i].lx_queue); + atomic_set(&channel_wqs[i].in_open, 0); + mutex_init(&channel_wqs[i].mutex); + + dev = device_create(mt_class, NULL, MKDEV(major, i), NULL, + "%s%d", RTLX_MODULE_NAME, i); + if (IS_ERR(dev)) { + err = PTR_ERR(dev); + goto out_chrdev; + } + } + + /* set up notifiers */ + rtlx_notify.start = rtlx_starting; + rtlx_notify.stop = rtlx_stopping; + vpe_notify(aprp_cpu_index(), &rtlx_notify); + + if (cpu_has_vint) { + aprp_hook = rtlx_interrupt; + } else { + pr_err("APRP RTLX init on non-vectored-interrupt processor\n"); + err = -ENODEV; + goto out_class; + } + + return 0; + +out_class: + for (i = 0; i < RTLX_CHANNELS; i++) + device_destroy(mt_class, MKDEV(major, i)); +out_chrdev: + unregister_chrdev(major, RTLX_MODULE_NAME); + + return err; +} + +void __exit rtlx_module_exit(void) +{ + int i; + + for (i = 0; i < RTLX_CHANNELS; i++) + device_destroy(mt_class, MKDEV(major, i)); + + unregister_chrdev(major, RTLX_MODULE_NAME); + + aprp_hook = NULL; +} diff --git a/arch/mips/kernel/rtlx-mt.c b/arch/mips/kernel/rtlx-mt.c new file mode 100644 index 000000000000..5a66b975989e --- /dev/null +++ b/arch/mips/kernel/rtlx-mt.c @@ -0,0 +1,150 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved. + * Copyright (C) 2013 Imagination Technologies Ltd. + */ +#include <linux/device.h> +#include <linux/fs.h> +#include <linux/err.h> +#include <linux/wait.h> +#include <linux/sched.h> +#include <linux/interrupt.h> +#include <linux/irq.h> + +#include <asm/mips_mt.h> +#include <asm/vpe.h> +#include <asm/rtlx.h> + +static int major; + +static void rtlx_dispatch(void) +{ + if (read_c0_cause() & read_c0_status() & C_SW0) + do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ); +} + +/* + * Interrupt handler may be called before rtlx_init has otherwise had + * a chance to run. + */ +static irqreturn_t rtlx_interrupt(int irq, void *dev_id) +{ + unsigned int vpeflags; + unsigned long flags; + int i; + + local_irq_save(flags); + vpeflags = dvpe(); + set_c0_status(0x100 << MIPS_CPU_RTLX_IRQ); + irq_enable_hazard(); + evpe(vpeflags); + local_irq_restore(flags); + + for (i = 0; i < RTLX_CHANNELS; i++) { + wake_up(&channel_wqs[i].lx_queue); + wake_up(&channel_wqs[i].rt_queue); + } + + return IRQ_HANDLED; +} + +static struct irqaction rtlx_irq = { + .handler = rtlx_interrupt, + .name = "RTLX", +}; + +static int rtlx_irq_num = MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ; + +void _interrupt_sp(void) +{ + unsigned long flags; + + local_irq_save(flags); + dvpe(); + settc(1); + write_vpe_c0_cause(read_vpe_c0_cause() | C_SW0); + evpe(EVPE_ENABLE); + local_irq_restore(flags); +} + +int __init rtlx_module_init(void) +{ + struct device *dev; + int i, err; + + if (!cpu_has_mipsmt) { + pr_warn("VPE loader: not a MIPS MT capable processor\n"); + return -ENODEV; + } + + if (aprp_cpu_index() == 0) { + pr_warn("No TCs reserved for AP/SP, not initializing RTLX.\n" + "Pass maxtcs=<n> argument as kernel argument\n"); + + return -ENODEV; + } + + major = register_chrdev(0, RTLX_MODULE_NAME, &rtlx_fops); + if (major < 0) { + pr_err("rtlx_module_init: unable to register device\n"); + return major; + } + + /* initialise the wait queues */ + for (i = 0; i < RTLX_CHANNELS; i++) { + init_waitqueue_head(&channel_wqs[i].rt_queue); + init_waitqueue_head(&channel_wqs[i].lx_queue); + atomic_set(&channel_wqs[i].in_open, 0); + mutex_init(&channel_wqs[i].mutex); + + dev = device_create(mt_class, NULL, MKDEV(major, i), NULL, + "%s%d", RTLX_MODULE_NAME, i); + if (IS_ERR(dev)) { + err = PTR_ERR(dev); + goto out_chrdev; + } + } + + /* set up notifiers */ + rtlx_notify.start = rtlx_starting; + rtlx_notify.stop = rtlx_stopping; + vpe_notify(aprp_cpu_index(), &rtlx_notify); + + if (cpu_has_vint) { + aprp_hook = rtlx_dispatch; + } else { + pr_err("APRP RTLX init on non-vectored-interrupt processor\n"); + err = -ENODEV; + goto out_class; + } + + rtlx_irq.dev_id = rtlx; + err = setup_irq(rtlx_irq_num, &rtlx_irq); + if (err) + goto out_class; + + return 0; + +out_class: + for (i = 0; i < RTLX_CHANNELS; i++) + device_destroy(mt_class, MKDEV(major, i)); +out_chrdev: + unregister_chrdev(major, RTLX_MODULE_NAME); + + return err; +} + +void __exit rtlx_module_exit(void) +{ + int i; + + for (i = 0; i < RTLX_CHANNELS; i++) + device_destroy(mt_class, MKDEV(major, i)); + + unregister_chrdev(major, RTLX_MODULE_NAME); + + aprp_hook = NULL; +} diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c index 2c12ea1668d1..31b1b763cb29 100644 --- a/arch/mips/kernel/rtlx.c +++ b/arch/mips/kernel/rtlx.c @@ -1,114 +1,51 @@ /* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved. * Copyright (C) 2005, 06 Ralf Baechle (ralf@linux-mips.org) - * - * This program is free software; you can distribute it and/or modify it - * under the terms of the GNU General Public License (Version 2) as - * published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. - * + * Copyright (C) 2013 Imagination Technologies Ltd. */ - -#include <linux/device.h> #include <linux/kernel.h> #include <linux/fs.h> -#include <linux/init.h> -#include <asm/uaccess.h> -#include <linux/list.h> -#include <linux/vmalloc.h> -#include <linux/elf.h> -#include <linux/seq_file.h> #include <linux/syscalls.h> #include <linux/moduleloader.h> -#include <linux/interrupt.h> -#include <linux/poll.h> -#include <linux/sched.h> -#include <linux/wait.h> +#include <linux/atomic.h> #include <asm/mipsmtregs.h> #include <asm/mips_mt.h> -#include <asm/cacheflush.h> -#include <linux/atomic.h> -#include <asm/cpu.h> #include <asm/processor.h> -#include <asm/vpe.h> #include <asm/rtlx.h> #include <asm/setup.h> +#include <asm/vpe.h> -static struct rtlx_info *rtlx; -static int major; -static char module_name[] = "rtlx"; - -static struct chan_waitqueues { - wait_queue_head_t rt_queue; - wait_queue_head_t lx_queue; - atomic_t in_open; - struct mutex mutex; -} channel_wqs[RTLX_CHANNELS]; - -static struct vpe_notifications notify; static int sp_stopping; - -extern void *vpe_get_shared(int index); - -static void rtlx_dispatch(void) -{ - do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ); -} - - -/* Interrupt handler may be called before rtlx_init has otherwise had - a chance to run. -*/ -static irqreturn_t rtlx_interrupt(int irq, void *dev_id) -{ - unsigned int vpeflags; - unsigned long flags; - int i; - - /* Ought not to be strictly necessary for SMTC builds */ - local_irq_save(flags); - vpeflags = dvpe(); - set_c0_status(0x100 << MIPS_CPU_RTLX_IRQ); - irq_enable_hazard(); - evpe(vpeflags); - local_irq_restore(flags); - - for (i = 0; i < RTLX_CHANNELS; i++) { - wake_up(&channel_wqs[i].lx_queue); - wake_up(&channel_wqs[i].rt_queue); - } - - return IRQ_HANDLED; -} +struct rtlx_info *rtlx; +struct chan_waitqueues channel_wqs[RTLX_CHANNELS]; +struct vpe_notifications rtlx_notify; +void (*aprp_hook)(void) = NULL; +EXPORT_SYMBOL(aprp_hook); static void __used dump_rtlx(void) { int i; - printk("id 0x%lx state %d\n", rtlx->id, rtlx->state); + pr_info("id 0x%lx state %d\n", rtlx->id, rtlx->state); for (i = 0; i < RTLX_CHANNELS; i++) { struct rtlx_channel *chan = &rtlx->channel[i]; - printk(" rt_state %d lx_state %d buffer_size %d\n", - chan->rt_state, chan->lx_state, chan->buffer_size); + pr_info(" rt_state %d lx_state %d buffer_size %d\n", + chan->rt_state, chan->lx_state, chan->buffer_size); - printk(" rt_read %d rt_write %d\n", - chan->rt_read, chan->rt_write); + pr_info(" rt_read %d rt_write %d\n", + chan->rt_read, chan->rt_write); - printk(" lx_read %d lx_write %d\n", - chan->lx_read, chan->lx_write); + pr_info(" lx_read %d lx_write %d\n", + chan->lx_read, chan->lx_write); - printk(" rt_buffer <%s>\n", chan->rt_buffer); - printk(" lx_buffer <%s>\n", chan->lx_buffer); + pr_info(" rt_buffer <%s>\n", chan->rt_buffer); + pr_info(" lx_buffer <%s>\n", chan->lx_buffer); } } @@ -116,8 +53,7 @@ static void __used dump_rtlx(void) static int rtlx_init(struct rtlx_info *rtlxi) { if (rtlxi->id != RTLX_ID) { - printk(KERN_ERR "no valid RTLX id at 0x%p 0x%lx\n", - rtlxi, rtlxi->id); + pr_err("no valid RTLX id at 0x%p 0x%lx\n", rtlxi, rtlxi->id); return -ENOEXEC; } @@ -127,20 +63,20 @@ static int rtlx_init(struct rtlx_info *rtlxi) } /* notifications */ -static void starting(int vpe) +void rtlx_starting(int vpe) { int i; sp_stopping = 0; /* force a reload of rtlx */ - rtlx=NULL; + rtlx = NULL; /* wake up any sleeping rtlx_open's */ for (i = 0; i < RTLX_CHANNELS; i++) wake_up_interruptible(&channel_wqs[i].lx_queue); } -static void stopping(int vpe) +void rtlx_stopping(int vpe) { int i; @@ -158,31 +94,30 @@ int rtlx_open(int index, int can_sleep) int ret = 0; if (index >= RTLX_CHANNELS) { - printk(KERN_DEBUG "rtlx_open index out of range\n"); + pr_debug(KERN_DEBUG "rtlx_open index out of range\n"); return -ENOSYS; } if (atomic_inc_return(&channel_wqs[index].in_open) > 1) { - printk(KERN_DEBUG "rtlx_open channel %d already opened\n", - index); + pr_debug(KERN_DEBUG "rtlx_open channel %d already opened\n", index); ret = -EBUSY; goto out_fail; } if (rtlx == NULL) { - if( (p = vpe_get_shared(tclimit)) == NULL) { - if (can_sleep) { - ret = __wait_event_interruptible( + p = vpe_get_shared(aprp_cpu_index()); + if (p == NULL) { + if (can_sleep) { + ret = __wait_event_interruptible( channel_wqs[index].lx_queue, - (p = vpe_get_shared(tclimit))); - if (ret) + (p = vpe_get_shared(aprp_cpu_index()))); + if (ret) + goto out_fail; + } else { + pr_debug("No SP program loaded, and device opened with O_NONBLOCK\n"); + ret = -ENOSYS; goto out_fail; - } else { - printk(KERN_DEBUG "No SP program loaded, and device " - "opened with O_NONBLOCK\n"); - ret = -ENOSYS; - goto out_fail; - } + } } smp_rmb(); @@ -204,24 +139,24 @@ int rtlx_open(int index, int can_sleep) ret = -ERESTARTSYS; goto out_fail; } - finish_wait(&channel_wqs[index].lx_queue, &wait); + finish_wait(&channel_wqs[index].lx_queue, + &wait); } else { - pr_err(" *vpe_get_shared is NULL. " - "Has an SP program been loaded?\n"); + pr_err(" *vpe_get_shared is NULL. Has an SP program been loaded?\n"); ret = -ENOSYS; goto out_fail; } } if ((unsigned int)*p < KSEG0) { - printk(KERN_WARNING "vpe_get_shared returned an " - "invalid pointer maybe an error code %d\n", - (int)*p); + pr_warn("vpe_get_shared returned an invalid pointer maybe an error code %d\n", + (int)*p); ret = -ENOSYS; goto out_fail; } - if ((ret = rtlx_init(*p)) < 0) + ret = rtlx_init(*p); + if (ret < 0) goto out_ret; } @@ -352,7 +287,7 @@ ssize_t rtlx_write(int index, const void __user *buffer, size_t count) size_t fl; if (rtlx == NULL) - return(-ENOSYS); + return -ENOSYS; rt = &rtlx->channel[index]; @@ -361,8 +296,8 @@ ssize_t rtlx_write(int index, const void __user *buffer, size_t count) rt_read = rt->rt_read; /* total number of bytes to copy */ - count = min(count, (size_t)write_spacefree(rt_read, rt->rt_write, - rt->buffer_size)); + count = min_t(size_t, count, write_spacefree(rt_read, rt->rt_write, + rt->buffer_size)); /* first bit from write pointer to the end of the buffer, or count */ fl = min(count, (size_t) rt->buffer_size - rt->rt_write); @@ -372,9 +307,8 @@ ssize_t rtlx_write(int index, const void __user *buffer, size_t count) goto out; /* if there's any left copy to the beginning of the buffer */ - if (count - fl) { + if (count - fl) failed = copy_from_user(rt->rt_buffer, buffer + fl, count - fl); - } out: count -= failed; @@ -384,6 +318,8 @@ out: smp_wmb(); mutex_unlock(&channel_wqs[index].mutex); + _interrupt_sp(); + return count; } @@ -398,7 +334,7 @@ static int file_release(struct inode *inode, struct file *filp) return rtlx_release(iminor(inode)); } -static unsigned int file_poll(struct file *file, poll_table * wait) +static unsigned int file_poll(struct file *file, poll_table *wait) { int minor = iminor(file_inode(file)); unsigned int mask = 0; @@ -420,21 +356,20 @@ static unsigned int file_poll(struct file *file, poll_table * wait) return mask; } -static ssize_t file_read(struct file *file, char __user * buffer, size_t count, - loff_t * ppos) +static ssize_t file_read(struct file *file, char __user *buffer, size_t count, + loff_t *ppos) { int minor = iminor(file_inode(file)); /* data available? */ - if (!rtlx_read_poll(minor, (file->f_flags & O_NONBLOCK) ? 0 : 1)) { - return 0; // -EAGAIN makes cat whinge - } + if (!rtlx_read_poll(minor, (file->f_flags & O_NONBLOCK) ? 0 : 1)) + return 0; /* -EAGAIN makes 'cat' whine */ return rtlx_read(minor, buffer, count); } -static ssize_t file_write(struct file *file, const char __user * buffer, - size_t count, loff_t * ppos) +static ssize_t file_write(struct file *file, const char __user *buffer, + size_t count, loff_t *ppos) { int minor = iminor(file_inode(file)); @@ -454,100 +389,16 @@ static ssize_t file_write(struct file *file, const char __user * buffer, return rtlx_write(minor, buffer, count); } -static const struct file_operations rtlx_fops = { +const struct file_operations rtlx_fops = { .owner = THIS_MODULE, - .open = file_open, + .open = file_open, .release = file_release, .write = file_write, - .read = file_read, - .poll = file_poll, + .read = file_read, + .poll = file_poll, .llseek = noop_llseek, }; -static struct irqaction rtlx_irq = { - .handler = rtlx_interrupt, - .name = "RTLX", -}; - -static int rtlx_irq_num = MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ; - -static char register_chrdev_failed[] __initdata = - KERN_ERR "rtlx_module_init: unable to register device\n"; - -static int __init rtlx_module_init(void) -{ - struct device *dev; - int i, err; - - if (!cpu_has_mipsmt) { - printk("VPE loader: not a MIPS MT capable processor\n"); - return -ENODEV; - } - - if (tclimit == 0) { - printk(KERN_WARNING "No TCs reserved for AP/SP, not " - "initializing RTLX.\nPass maxtcs=<n> argument as kernel " - "argument\n"); - - return -ENODEV; - } - - major = register_chrdev(0, module_name, &rtlx_fops); - if (major < 0) { - printk(register_chrdev_failed); - return major; - } - - /* initialise the wait queues */ - for (i = 0; i < RTLX_CHANNELS; i++) { - init_waitqueue_head(&channel_wqs[i].rt_queue); - init_waitqueue_head(&channel_wqs[i].lx_queue); - atomic_set(&channel_wqs[i].in_open, 0); - mutex_init(&channel_wqs[i].mutex); - - dev = device_create(mt_class, NULL, MKDEV(major, i), NULL, - "%s%d", module_name, i); - if (IS_ERR(dev)) { - err = PTR_ERR(dev); - goto out_chrdev; - } - } - - /* set up notifiers */ - notify.start = starting; - notify.stop = stopping; - vpe_notify(tclimit, ¬ify); - - if (cpu_has_vint) - set_vi_handler(MIPS_CPU_RTLX_IRQ, rtlx_dispatch); - else { - pr_err("APRP RTLX init on non-vectored-interrupt processor\n"); - err = -ENODEV; - goto out_chrdev; - } - - rtlx_irq.dev_id = rtlx; - setup_irq(rtlx_irq_num, &rtlx_irq); - - return 0; - -out_chrdev: - for (i = 0; i < RTLX_CHANNELS; i++) - device_destroy(mt_class, MKDEV(major, i)); - - return err; -} - -static void __exit rtlx_module_exit(void) -{ - int i; - - for (i = 0; i < RTLX_CHANNELS; i++) - device_destroy(mt_class, MKDEV(major, i)); - - unregister_chrdev(major, module_name); -} - module_init(rtlx_module_init); module_exit(rtlx_module_exit); diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index e8e541b40d86..3245474f19d5 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -6,6 +6,7 @@ * Copyright (C) 1995-99, 2000- 02, 06 Ralf Baechle <ralf@linux-mips.org> * Copyright (C) 2001 MIPS Technologies, Inc. * Copyright (C) 2004 Thiemo Seufer + * Copyright (C) 2014 Imagination Technologies Ltd. */ #include <linux/errno.h> #include <asm/asm.h> @@ -74,10 +75,10 @@ NESTED(handle_sys, PT_SIZE, sp) .set noreorder .set nomacro -1: lw t5, 16(t0) # argument #5 from usp -4: lw t6, 20(t0) # argument #6 from usp -3: lw t7, 24(t0) # argument #7 from usp -2: lw t8, 28(t0) # argument #8 from usp +1: user_lw(t5, 16(t0)) # argument #5 from usp +4: user_lw(t6, 20(t0)) # argument #6 from usp +3: user_lw(t7, 24(t0)) # argument #7 from usp +2: user_lw(t8, 28(t0)) # argument #8 from usp sw t5, 16(sp) # argument #5 to ksp sw t6, 20(sp) # argument #6 to ksp @@ -118,7 +119,18 @@ syscall_trace_entry: SAVE_STATIC move s0, t2 move a0, sp - jal syscall_trace_enter + + /* + * syscall number is in v0 unless we called syscall(__NR_###) + * where the real syscall number is in a0 + */ + addiu a1, v0, __NR_O32_Linux + bnez v0, 1f /* __NR_syscall at offset 0 */ + lw a1, PT_R4(sp) + +1: jal syscall_trace_enter + + bltz v0, 2f # seccomp failed? Skip syscall move t0, s0 RESTORE_STATIC @@ -138,7 +150,7 @@ syscall_trace_entry: sw t1, PT_R0(sp) # save it for syscall restarting 1: sw v0, PT_R2(sp) # result - j syscall_exit +2: j syscall_exit /* ------------------------------------------------------------------------ */ @@ -563,3 +575,6 @@ EXPORT(sys_call_table) PTR sys_process_vm_writev PTR sys_kcmp PTR sys_finit_module + PTR sys_sched_setattr + PTR sys_sched_getattr /* 4350 */ + PTR sys_renameat2 diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index 57e3742fec59..be2fedd4ae33 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S @@ -80,8 +80,11 @@ syscall_trace_entry: SAVE_STATIC move s0, t2 move a0, sp + daddiu a1, v0, __NR_64_Linux jal syscall_trace_enter + bltz v0, 2f # seccomp failed? Skip syscall + move t0, s0 RESTORE_STATIC ld a0, PT_R4(sp) # Restore argument registers @@ -102,7 +105,7 @@ syscall_trace_entry: sd t1, PT_R0(sp) # save it for syscall restarting 1: sd v0, PT_R2(sp) # result - j syscall_exit +2: j syscall_exit illegal_syscall: /* This also isn't a 64-bit syscall, throw an error. */ @@ -425,4 +428,7 @@ EXPORT(sys_call_table) PTR sys_kcmp PTR sys_finit_module PTR sys_getdents64 + PTR sys_sched_setattr + PTR sys_sched_getattr /* 5310 */ + PTR sys_renameat2 .size sys_call_table,.-sys_call_table diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 2f48f5934399..c1dbcda4b816 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -72,8 +72,11 @@ n32_syscall_trace_entry: SAVE_STATIC move s0, t2 move a0, sp + daddiu a1, v0, __NR_N32_Linux jal syscall_trace_enter + bltz v0, 2f # seccomp failed? Skip syscall + move t0, s0 RESTORE_STATIC ld a0, PT_R4(sp) # Restore argument registers @@ -94,7 +97,7 @@ n32_syscall_trace_entry: sd t1, PT_R0(sp) # save it for syscall restarting 1: sd v0, PT_R2(sp) # result - j syscall_exit +2: j syscall_exit not_n32_scall: /* This is not an n32 compatibility syscall, pass it on to @@ -418,4 +421,7 @@ EXPORT(sysn32_call_table) PTR compat_sys_process_vm_writev /* 6310 */ PTR sys_kcmp PTR sys_finit_module + PTR sys_sched_setattr + PTR sys_sched_getattr + PTR sys_renameat2 /* 6315 */ .size sysn32_call_table,.-sysn32_call_table diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index f1acdb429f4f..f1343ccd7ed7 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -112,7 +112,20 @@ trace_a_syscall: move s0, t2 # Save syscall pointer move a0, sp - jal syscall_trace_enter + /* + * syscall number is in v0 unless we called syscall(__NR_###) + * where the real syscall number is in a0 + * note: NR_syscall is the first O32 syscall but the macro is + * only defined when compiling with -mabi=32 (CONFIG_32BIT) + * therefore __NR_O32_Linux is used (4000) + */ + addiu a1, v0, __NR_O32_Linux + bnez v0, 1f /* __NR_syscall at offset 0 */ + lw a1, PT_R4(sp) + +1: jal syscall_trace_enter + + bltz v0, 2f # seccomp failed? Skip syscall move t0, s0 RESTORE_STATIC @@ -136,7 +149,7 @@ trace_a_syscall: sd t1, PT_R0(sp) # save it for syscall restarting 1: sd v0, PT_R2(sp) # result - j syscall_exit +2: j syscall_exit /* ------------------------------------------------------------------------ */ @@ -541,4 +554,7 @@ EXPORT(sys32_call_table) PTR compat_sys_process_vm_writev PTR sys_kcmp PTR sys_finit_module + PTR sys_sched_setattr + PTR sys_sched_getattr /* 4350 */ + PTR sys_renameat2 .size sys32_call_table,.-sys32_call_table diff --git a/arch/mips/kernel/segment.c b/arch/mips/kernel/segment.c new file mode 100644 index 000000000000..076ead2a9859 --- /dev/null +++ b/arch/mips/kernel/segment.c @@ -0,0 +1,110 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2013 Imagination Technologies Ltd. + */ + +#include <linux/kernel.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> +#include <asm/cpu.h> +#include <asm/mipsregs.h> + +static void build_segment_config(char *str, unsigned int cfg) +{ + unsigned int am; + static const char * const am_str[] = { + "UK", "MK", "MSK", "MUSK", "MUSUK", "USK", + "RSRVD", "UUSK"}; + + /* Segment access mode. */ + am = (cfg & MIPS_SEGCFG_AM) >> MIPS_SEGCFG_AM_SHIFT; + str += sprintf(str, "%-5s", am_str[am]); + + /* + * Access modes MK, MSK and MUSK are mapped segments. Therefore + * there is no direct physical address mapping. + */ + if ((am == 0) || (am > 3)) { + str += sprintf(str, " %03lx", + ((cfg & MIPS_SEGCFG_PA) >> MIPS_SEGCFG_PA_SHIFT)); + str += sprintf(str, " %01ld", + ((cfg & MIPS_SEGCFG_C) >> MIPS_SEGCFG_C_SHIFT)); + } else { + str += sprintf(str, " UND"); + str += sprintf(str, " U"); + } + + /* Exception configuration. */ + str += sprintf(str, " %01ld\n", + ((cfg & MIPS_SEGCFG_EU) >> MIPS_SEGCFG_EU_SHIFT)); +} + +static int show_segments(struct seq_file *m, void *v) +{ + unsigned int segcfg; + char str[42]; + + seq_puts(m, "Segment Virtual Size Access Mode Physical Caching EU\n"); + seq_puts(m, "------- ------- ---- ----------- -------- ------- --\n"); + + segcfg = read_c0_segctl0(); + build_segment_config(str, segcfg); + seq_printf(m, " 0 e0000000 512M %s", str); + + segcfg >>= 16; + build_segment_config(str, segcfg); + seq_printf(m, " 1 c0000000 512M %s", str); + + segcfg = read_c0_segctl1(); + build_segment_config(str, segcfg); + seq_printf(m, " 2 a0000000 512M %s", str); + + segcfg >>= 16; + build_segment_config(str, segcfg); + seq_printf(m, " 3 80000000 512M %s", str); + + segcfg = read_c0_segctl2(); + build_segment_config(str, segcfg); + seq_printf(m, " 4 40000000 1G %s", str); + + segcfg >>= 16; + build_segment_config(str, segcfg); + seq_printf(m, " 5 00000000 1G %s\n", str); + + return 0; +} + +static int segments_open(struct inode *inode, struct file *file) +{ + return single_open(file, show_segments, NULL); +} + +static const struct file_operations segments_fops = { + .open = segments_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init segments_info(void) +{ + extern struct dentry *mips_debugfs_dir; + struct dentry *segments; + + if (cpu_has_segments) { + if (!mips_debugfs_dir) + return -ENODEV; + + segments = debugfs_create_file("segments", S_IRUGO, + mips_debugfs_dir, NULL, + &segments_fops); + if (!segments) + return -ENOMEM; + } + return 0; +} + +device_initcall(segments_info); diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 2f285abc76d5..9e60d117e41e 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -6,6 +6,7 @@ * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 1994 - 2000 Ralf Baechle * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + * Copyright (C) 2014, Imagination Technologies Ltd. */ #include <linux/cache.h> #include <linux/context_tracking.h> @@ -46,9 +47,6 @@ static int (*restore_fp_context)(struct sigcontext __user *sc); extern asmlinkage int _save_fp_context(struct sigcontext __user *sc); extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc); -extern asmlinkage int fpu_emulator_save_context(struct sigcontext __user *sc); -extern asmlinkage int fpu_emulator_restore_context(struct sigcontext __user *sc); - struct sigframe { u32 sf_ass[4]; /* argument save space for o32 */ u32 sf_pad[2]; /* Was: signal trampoline */ @@ -64,16 +62,55 @@ struct rt_sigframe { }; /* + * Thread saved context copy to/from a signal context presumed to be on the + * user stack, and therefore accessed with appropriate macros from uaccess.h. + */ +static int copy_fp_to_sigcontext(struct sigcontext __user *sc) +{ + int i; + int err = 0; + + for (i = 0; i < NUM_FPU_REGS; i++) { + err |= + __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 0), + &sc->sc_fpregs[i]); + } + err |= __put_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr); + + return err; +} + +static int copy_fp_from_sigcontext(struct sigcontext __user *sc) +{ + int i; + int err = 0; + u64 fpr_val; + + for (i = 0; i < NUM_FPU_REGS; i++) { + err |= __get_user(fpr_val, &sc->sc_fpregs[i]); + set_fpr64(¤t->thread.fpu.fpr[i], 0, fpr_val); + } + err |= __get_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr); + + return err; +} + +/* * Helper routines */ static int protected_save_fp_context(struct sigcontext __user *sc) { int err; +#ifndef CONFIG_EVA while (1) { lock_fpu_owner(); - own_fpu_inatomic(1); - err = save_fp_context(sc); /* this might fail */ - unlock_fpu_owner(); + if (is_fpu_owner()) { + err = save_fp_context(sc); + unlock_fpu_owner(); + } else { + unlock_fpu_owner(); + err = copy_fp_to_sigcontext(sc); + } if (likely(!err)) break; /* touch the sigcontext and try again */ @@ -83,17 +120,30 @@ static int protected_save_fp_context(struct sigcontext __user *sc) if (err) break; /* really bad sigcontext */ } +#else + /* + * EVA does not have FPU EVA instructions so saving fpu context directly + * does not work. + */ + lose_fpu(1); + err = save_fp_context(sc); /* this might fail */ +#endif return err; } static int protected_restore_fp_context(struct sigcontext __user *sc) { int err, tmp __maybe_unused; +#ifndef CONFIG_EVA while (1) { lock_fpu_owner(); - own_fpu_inatomic(0); - err = restore_fp_context(sc); /* this might fail */ - unlock_fpu_owner(); + if (is_fpu_owner()) { + err = restore_fp_context(sc); + unlock_fpu_owner(); + } else { + unlock_fpu_owner(); + err = copy_fp_from_sigcontext(sc); + } if (likely(!err)) break; /* touch the sigcontext and try again */ @@ -103,6 +153,14 @@ static int protected_restore_fp_context(struct sigcontext __user *sc) if (err) break; /* really bad sigcontext */ } +#else + /* + * EVA does not have FPU EVA instructions so restoring fpu context + * directly does not work. + */ + lose_fpu(0); + err = restore_fp_context(sc); /* this might fail */ +#endif return err; } @@ -589,23 +647,26 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, } #ifdef CONFIG_SMP +#ifndef CONFIG_EVA static int smp_save_fp_context(struct sigcontext __user *sc) { return raw_cpu_has_fpu ? _save_fp_context(sc) - : fpu_emulator_save_context(sc); + : copy_fp_to_sigcontext(sc); } static int smp_restore_fp_context(struct sigcontext __user *sc) { return raw_cpu_has_fpu ? _restore_fp_context(sc) - : fpu_emulator_restore_context(sc); + : copy_fp_from_sigcontext(sc); } +#endif /* CONFIG_EVA */ #endif static int signal_setup(void) { +#ifndef CONFIG_EVA #ifdef CONFIG_SMP /* For now just do the cpu_has_fpu check when the functions are invoked */ save_fp_context = smp_save_fp_context; @@ -615,9 +676,13 @@ static int signal_setup(void) save_fp_context = _save_fp_context; restore_fp_context = _restore_fp_context; } else { - save_fp_context = fpu_emulator_save_context; - restore_fp_context = fpu_emulator_restore_context; + save_fp_context = copy_fp_from_sigcontext; + restore_fp_context = copy_fp_to_sigcontext; } +#endif /* CONFIG_SMP */ +#else + save_fp_context = copy_fp_from_sigcontext;; + restore_fp_context = copy_fp_to_sigcontext; #endif return 0; diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index 1905a419aa46..bae2e6ee2109 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c @@ -42,9 +42,6 @@ static int (*restore_fp_context32)(struct sigcontext32 __user *sc); extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc); extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc); -extern asmlinkage int fpu_emulator_save_context32(struct sigcontext32 __user *sc); -extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 __user *sc); - /* * Including <asm/unistd.h> would give use the 64-bit syscall numbers ... */ @@ -78,6 +75,42 @@ struct rt_sigframe32 { }; /* + * Thread saved context copy to/from a signal context presumed to be on the + * user stack, and therefore accessed with appropriate macros from uaccess.h. + */ +static int copy_fp_to_sigcontext32(struct sigcontext32 __user *sc) +{ + int i; + int err = 0; + int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1; + + for (i = 0; i < NUM_FPU_REGS; i += inc) { + err |= + __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 0), + &sc->sc_fpregs[i]); + } + err |= __put_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr); + + return err; +} + +static int copy_fp_from_sigcontext32(struct sigcontext32 __user *sc) +{ + int i; + int err = 0; + int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1; + u64 fpr_val; + + for (i = 0; i < NUM_FPU_REGS; i += inc) { + err |= __get_user(fpr_val, &sc->sc_fpregs[i]); + set_fpr64(¤t->thread.fpu.fpr[i], 0, fpr_val); + } + err |= __get_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr); + + return err; +} + +/* * sigcontext handlers */ static int protected_save_fp_context32(struct sigcontext32 __user *sc) @@ -85,9 +118,13 @@ static int protected_save_fp_context32(struct sigcontext32 __user *sc) int err; while (1) { lock_fpu_owner(); - own_fpu_inatomic(1); - err = save_fp_context32(sc); /* this might fail */ - unlock_fpu_owner(); + if (is_fpu_owner()) { + err = save_fp_context32(sc); + unlock_fpu_owner(); + } else { + unlock_fpu_owner(); + err = copy_fp_to_sigcontext32(sc); + } if (likely(!err)) break; /* touch the sigcontext and try again */ @@ -105,9 +142,13 @@ static int protected_restore_fp_context32(struct sigcontext32 __user *sc) int err, tmp __maybe_unused; while (1) { lock_fpu_owner(); - own_fpu_inatomic(0); - err = restore_fp_context32(sc); /* this might fail */ - unlock_fpu_owner(); + if (is_fpu_owner()) { + err = restore_fp_context32(sc); + unlock_fpu_owner(); + } else { + unlock_fpu_owner(); + err = copy_fp_from_sigcontext32(sc); + } if (likely(!err)) break; /* touch the sigcontext and try again */ @@ -564,8 +605,8 @@ static int signal32_init(void) save_fp_context32 = _save_fp_context32; restore_fp_context32 = _restore_fp_context32; } else { - save_fp_context32 = fpu_emulator_save_context32; - restore_fp_context32 = fpu_emulator_restore_context32; + save_fp_context32 = copy_fp_to_sigcontext32; + restore_fp_context32 = copy_fp_from_sigcontext32; } return 0; diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index 2362665ba496..df9e2bd9b2c2 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -49,8 +49,10 @@ cpumask_t bmips_booted_mask; unsigned long bmips_smp_boot_sp; unsigned long bmips_smp_boot_gp; -static void bmips_send_ipi_single(int cpu, unsigned int action); -static irqreturn_t bmips_ipi_interrupt(int irq, void *dev_id); +static void bmips43xx_send_ipi_single(int cpu, unsigned int action); +static void bmips5000_send_ipi_single(int cpu, unsigned int action); +static irqreturn_t bmips43xx_ipi_interrupt(int irq, void *dev_id); +static irqreturn_t bmips5000_ipi_interrupt(int irq, void *dev_id); /* SW interrupts 0,1 are used for interprocessor signaling */ #define IPI0_IRQ (MIPS_CPU_IRQ_BASE + 0) @@ -64,49 +66,58 @@ static irqreturn_t bmips_ipi_interrupt(int irq, void *dev_id); static void __init bmips_smp_setup(void) { int i, cpu = 1, boot_cpu = 0; - -#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) int cpu_hw_intr; - /* arbitration priority */ - clear_c0_brcm_cmt_ctrl(0x30); - - /* NBK and weak order flags */ - set_c0_brcm_config_0(0x30000); - - /* Find out if we are running on TP0 or TP1 */ - boot_cpu = !!(read_c0_brcm_cmt_local() & (1 << 31)); - - /* - * MIPS interrupts 0,1 (SW INT 0,1) cross over to the other thread - * MIPS interrupt 2 (HW INT 0) is the CPU0 L1 controller output - * MIPS interrupt 3 (HW INT 1) is the CPU1 L1 controller output - */ - if (boot_cpu == 0) - cpu_hw_intr = 0x02; - else - cpu_hw_intr = 0x1d; - - change_c0_brcm_cmt_intr(0xf8018000, (cpu_hw_intr << 27) | (0x03 << 15)); - - /* single core, 2 threads (2 pipelines) */ - max_cpus = 2; -#elif defined(CONFIG_CPU_BMIPS5000) - /* enable raceless SW interrupts */ - set_c0_brcm_config(0x03 << 22); - - /* route HW interrupt 0 to CPU0, HW interrupt 1 to CPU1 */ - change_c0_brcm_mode(0x1f << 27, 0x02 << 27); - - /* N cores, 2 threads per core */ - max_cpus = (((read_c0_brcm_config() >> 6) & 0x03) + 1) << 1; + switch (current_cpu_type()) { + case CPU_BMIPS4350: + case CPU_BMIPS4380: + /* arbitration priority */ + clear_c0_brcm_cmt_ctrl(0x30); + + /* NBK and weak order flags */ + set_c0_brcm_config_0(0x30000); + + /* Find out if we are running on TP0 or TP1 */ + boot_cpu = !!(read_c0_brcm_cmt_local() & (1 << 31)); + + /* + * MIPS interrupts 0,1 (SW INT 0,1) cross over to the other + * thread + * MIPS interrupt 2 (HW INT 0) is the CPU0 L1 controller output + * MIPS interrupt 3 (HW INT 1) is the CPU1 L1 controller output + */ + if (boot_cpu == 0) + cpu_hw_intr = 0x02; + else + cpu_hw_intr = 0x1d; + + change_c0_brcm_cmt_intr(0xf8018000, + (cpu_hw_intr << 27) | (0x03 << 15)); + + /* single core, 2 threads (2 pipelines) */ + max_cpus = 2; + + break; + case CPU_BMIPS5000: + /* enable raceless SW interrupts */ + set_c0_brcm_config(0x03 << 22); + + /* route HW interrupt 0 to CPU0, HW interrupt 1 to CPU1 */ + change_c0_brcm_mode(0x1f << 27, 0x02 << 27); + + /* N cores, 2 threads per core */ + max_cpus = (((read_c0_brcm_config() >> 6) & 0x03) + 1) << 1; + + /* clear any pending SW interrupts */ + for (i = 0; i < max_cpus; i++) { + write_c0_brcm_action(ACTION_CLR_IPI(i, 0)); + write_c0_brcm_action(ACTION_CLR_IPI(i, 1)); + } - /* clear any pending SW interrupts */ - for (i = 0; i < max_cpus; i++) { - write_c0_brcm_action(ACTION_CLR_IPI(i, 0)); - write_c0_brcm_action(ACTION_CLR_IPI(i, 1)); + break; + default: + max_cpus = 1; } -#endif if (!bmips_smp_enabled) max_cpus = 1; @@ -134,6 +145,20 @@ static void __init bmips_smp_setup(void) */ static void bmips_prepare_cpus(unsigned int max_cpus) { + irqreturn_t (*bmips_ipi_interrupt)(int irq, void *dev_id); + + switch (current_cpu_type()) { + case CPU_BMIPS4350: + case CPU_BMIPS4380: + bmips_ipi_interrupt = bmips43xx_ipi_interrupt; + break; + case CPU_BMIPS5000: + bmips_ipi_interrupt = bmips5000_ipi_interrupt; + break; + default: + return; + } + if (request_irq(IPI0_IRQ, bmips_ipi_interrupt, IRQF_PERCPU, "smp_ipi0", NULL)) panic("Can't request IPI0 interrupt"); @@ -168,26 +193,39 @@ static void bmips_boot_secondary(int cpu, struct task_struct *idle) pr_info("SMP: Booting CPU%d...\n", cpu); - if (cpumask_test_cpu(cpu, &bmips_booted_mask)) - bmips_send_ipi_single(cpu, 0); + if (cpumask_test_cpu(cpu, &bmips_booted_mask)) { + switch (current_cpu_type()) { + case CPU_BMIPS4350: + case CPU_BMIPS4380: + bmips43xx_send_ipi_single(cpu, 0); + break; + case CPU_BMIPS5000: + bmips5000_send_ipi_single(cpu, 0); + break; + } + } else { -#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) - /* Reset slave TP1 if booting from TP0 */ - if (cpu_logical_map(cpu) == 1) - set_c0_brcm_cmt_ctrl(0x01); -#elif defined(CONFIG_CPU_BMIPS5000) - if (cpu & 0x01) - write_c0_brcm_action(ACTION_BOOT_THREAD(cpu)); - else { - /* - * core N thread 0 was already booted; just - * pulse the NMI line - */ - bmips_write_zscm_reg(0x210, 0xc0000000); - udelay(10); - bmips_write_zscm_reg(0x210, 0x00); + switch (current_cpu_type()) { + case CPU_BMIPS4350: + case CPU_BMIPS4380: + /* Reset slave TP1 if booting from TP0 */ + if (cpu_logical_map(cpu) == 1) + set_c0_brcm_cmt_ctrl(0x01); + break; + case CPU_BMIPS5000: + if (cpu & 0x01) + write_c0_brcm_action(ACTION_BOOT_THREAD(cpu)); + else { + /* + * core N thread 0 was already booted; just + * pulse the NMI line + */ + bmips_write_zscm_reg(0x210, 0xc0000000); + udelay(10); + bmips_write_zscm_reg(0x210, 0x00); + } + break; } -#endif cpumask_set_cpu(cpu, &bmips_booted_mask); } } @@ -199,26 +237,32 @@ static void bmips_init_secondary(void) { /* move NMI vector to kseg0, in case XKS01 is enabled */ -#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) - void __iomem *cbr = BMIPS_GET_CBR(); + void __iomem *cbr; unsigned long old_vec; unsigned long relo_vector; int boot_cpu; - boot_cpu = !!(read_c0_brcm_cmt_local() & (1 << 31)); - relo_vector = boot_cpu ? BMIPS_RELO_VECTOR_CONTROL_0 : - BMIPS_RELO_VECTOR_CONTROL_1; + switch (current_cpu_type()) { + case CPU_BMIPS4350: + case CPU_BMIPS4380: + cbr = BMIPS_GET_CBR(); - old_vec = __raw_readl(cbr + relo_vector); - __raw_writel(old_vec & ~0x20000000, cbr + relo_vector); + boot_cpu = !!(read_c0_brcm_cmt_local() & (1 << 31)); + relo_vector = boot_cpu ? BMIPS_RELO_VECTOR_CONTROL_0 : + BMIPS_RELO_VECTOR_CONTROL_1; - clear_c0_cause(smp_processor_id() ? C_SW1 : C_SW0); -#elif defined(CONFIG_CPU_BMIPS5000) - write_c0_brcm_bootvec(read_c0_brcm_bootvec() & - (smp_processor_id() & 0x01 ? ~0x20000000 : ~0x2000)); + old_vec = __raw_readl(cbr + relo_vector); + __raw_writel(old_vec & ~0x20000000, cbr + relo_vector); - write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), 0)); -#endif + clear_c0_cause(smp_processor_id() ? C_SW1 : C_SW0); + break; + case CPU_BMIPS5000: + write_c0_brcm_bootvec(read_c0_brcm_bootvec() & + (smp_processor_id() & 0x01 ? ~0x20000000 : ~0x2000)); + + write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), 0)); + break; + } } /* @@ -237,15 +281,6 @@ static void bmips_smp_finish(void) } /* - * Runs on CPU0 after all CPUs have been booted - */ -static void bmips_cpus_done(void) -{ -} - -#if defined(CONFIG_CPU_BMIPS5000) - -/* * BMIPS5000 raceless IPIs * * Each CPU has two inbound SW IRQs which are independent of all other CPUs. @@ -253,12 +288,12 @@ static void bmips_cpus_done(void) * IPI1 is used for SMP_CALL_FUNCTION */ -static void bmips_send_ipi_single(int cpu, unsigned int action) +static void bmips5000_send_ipi_single(int cpu, unsigned int action) { write_c0_brcm_action(ACTION_SET_IPI(cpu, action == SMP_CALL_FUNCTION)); } -static irqreturn_t bmips_ipi_interrupt(int irq, void *dev_id) +static irqreturn_t bmips5000_ipi_interrupt(int irq, void *dev_id) { int action = irq - IPI0_IRQ; @@ -272,7 +307,14 @@ static irqreturn_t bmips_ipi_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -#else +static void bmips5000_send_ipi_mask(const struct cpumask *mask, + unsigned int action) +{ + unsigned int i; + + for_each_cpu(i, mask) + bmips5000_send_ipi_single(i, action); +} /* * BMIPS43xx racey IPIs @@ -287,7 +329,7 @@ static irqreturn_t bmips_ipi_interrupt(int irq, void *dev_id) static DEFINE_SPINLOCK(ipi_lock); static DEFINE_PER_CPU(int, ipi_action_mask); -static void bmips_send_ipi_single(int cpu, unsigned int action) +static void bmips43xx_send_ipi_single(int cpu, unsigned int action) { unsigned long flags; @@ -298,7 +340,7 @@ static void bmips_send_ipi_single(int cpu, unsigned int action) spin_unlock_irqrestore(&ipi_lock, flags); } -static irqreturn_t bmips_ipi_interrupt(int irq, void *dev_id) +static irqreturn_t bmips43xx_ipi_interrupt(int irq, void *dev_id) { unsigned long flags; int action, cpu = irq - IPI0_IRQ; @@ -317,15 +359,13 @@ static irqreturn_t bmips_ipi_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -#endif /* BMIPS type */ - -static void bmips_send_ipi_mask(const struct cpumask *mask, +static void bmips43xx_send_ipi_mask(const struct cpumask *mask, unsigned int action) { unsigned int i; for_each_cpu(i, mask) - bmips_send_ipi_single(i, action); + bmips43xx_send_ipi_single(i, action); } #ifdef CONFIG_HOTPLUG_CPU @@ -381,15 +421,28 @@ void __ref play_dead(void) #endif /* CONFIG_HOTPLUG_CPU */ -struct plat_smp_ops bmips_smp_ops = { +struct plat_smp_ops bmips43xx_smp_ops = { + .smp_setup = bmips_smp_setup, + .prepare_cpus = bmips_prepare_cpus, + .boot_secondary = bmips_boot_secondary, + .smp_finish = bmips_smp_finish, + .init_secondary = bmips_init_secondary, + .send_ipi_single = bmips43xx_send_ipi_single, + .send_ipi_mask = bmips43xx_send_ipi_mask, +#ifdef CONFIG_HOTPLUG_CPU + .cpu_disable = bmips_cpu_disable, + .cpu_die = bmips_cpu_die, +#endif +}; + +struct plat_smp_ops bmips5000_smp_ops = { .smp_setup = bmips_smp_setup, .prepare_cpus = bmips_prepare_cpus, .boot_secondary = bmips_boot_secondary, .smp_finish = bmips_smp_finish, .init_secondary = bmips_init_secondary, - .cpus_done = bmips_cpus_done, - .send_ipi_single = bmips_send_ipi_single, - .send_ipi_mask = bmips_send_ipi_mask, + .send_ipi_single = bmips5000_send_ipi_single, + .send_ipi_mask = bmips5000_send_ipi_mask, #ifdef CONFIG_HOTPLUG_CPU .cpu_disable = bmips_cpu_disable, .cpu_die = bmips_cpu_die, @@ -427,43 +480,47 @@ void bmips_ebase_setup(void) BUG_ON(ebase != CKSEG0); -#if defined(CONFIG_CPU_BMIPS4350) - /* - * BMIPS4350 cannot relocate the normal vectors, but it - * can relocate the BEV=1 vectors. So CPU1 starts up at - * the relocated BEV=1, IV=0 general exception vector @ - * 0xa000_0380. - * - * set_uncached_handler() is used here because: - * - CPU1 will run this from uncached space - * - None of the cacheflush functions are set up yet - */ - set_uncached_handler(BMIPS_WARM_RESTART_VEC - CKSEG0, - &bmips_smp_int_vec, 0x80); - __sync(); - return; -#elif defined(CONFIG_CPU_BMIPS4380) - /* - * 0x8000_0000: reset/NMI (initially in kseg1) - * 0x8000_0400: normal vectors - */ - new_ebase = 0x80000400; - cbr = BMIPS_GET_CBR(); - __raw_writel(0x80080800, cbr + BMIPS_RELO_VECTOR_CONTROL_0); - __raw_writel(0xa0080800, cbr + BMIPS_RELO_VECTOR_CONTROL_1); -#elif defined(CONFIG_CPU_BMIPS5000) - /* - * 0x8000_0000: reset/NMI (initially in kseg1) - * 0x8000_1000: normal vectors - */ - new_ebase = 0x80001000; - write_c0_brcm_bootvec(0xa0088008); - write_c0_ebase(new_ebase); - if (max_cpus > 2) - bmips_write_zscm_reg(0xa0, 0xa008a008); -#else - return; -#endif + switch (current_cpu_type()) { + case CPU_BMIPS4350: + /* + * BMIPS4350 cannot relocate the normal vectors, but it + * can relocate the BEV=1 vectors. So CPU1 starts up at + * the relocated BEV=1, IV=0 general exception vector @ + * 0xa000_0380. + * + * set_uncached_handler() is used here because: + * - CPU1 will run this from uncached space + * - None of the cacheflush functions are set up yet + */ + set_uncached_handler(BMIPS_WARM_RESTART_VEC - CKSEG0, + &bmips_smp_int_vec, 0x80); + __sync(); + return; + case CPU_BMIPS4380: + /* + * 0x8000_0000: reset/NMI (initially in kseg1) + * 0x8000_0400: normal vectors + */ + new_ebase = 0x80000400; + cbr = BMIPS_GET_CBR(); + __raw_writel(0x80080800, cbr + BMIPS_RELO_VECTOR_CONTROL_0); + __raw_writel(0xa0080800, cbr + BMIPS_RELO_VECTOR_CONTROL_1); + break; + case CPU_BMIPS5000: + /* + * 0x8000_0000: reset/NMI (initially in kseg1) + * 0x8000_1000: normal vectors + */ + new_ebase = 0x80001000; + write_c0_brcm_bootvec(0xa0088008); + write_c0_ebase(new_ebase); + if (max_cpus > 2) + bmips_write_zscm_reg(0xa0, 0xa008a008); + break; + default: + return; + } + board_nmi_handler_setup = &bmips_nmi_handler_setup; ebase = new_ebase; } diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c index 5969f1e9b62a..fc8a51553426 100644 --- a/arch/mips/kernel/smp-cmp.c +++ b/arch/mips/kernel/smp-cmp.c @@ -39,57 +39,9 @@ #include <asm/amon.h> #include <asm/gic.h> -static void ipi_call_function(unsigned int cpu) -{ - pr_debug("CPU%d: %s cpu %d status %08x\n", - smp_processor_id(), __func__, cpu, read_c0_status()); - - gic_send_ipi(plat_ipi_call_int_xlate(cpu)); -} - - -static void ipi_resched(unsigned int cpu) -{ - pr_debug("CPU%d: %s cpu %d status %08x\n", - smp_processor_id(), __func__, cpu, read_c0_status()); - - gic_send_ipi(plat_ipi_resched_int_xlate(cpu)); -} - -/* - * FIXME: This isn't restricted to CMP - * The SMVP kernel could use GIC interrupts if available - */ -void cmp_send_ipi_single(int cpu, unsigned int action) -{ - unsigned long flags; - - local_irq_save(flags); - - switch (action) { - case SMP_CALL_FUNCTION: - ipi_call_function(cpu); - break; - - case SMP_RESCHEDULE_YOURSELF: - ipi_resched(cpu); - break; - } - - local_irq_restore(flags); -} - -static void cmp_send_ipi_mask(const struct cpumask *mask, unsigned int action) -{ - unsigned int i; - - for_each_cpu(i, mask) - cmp_send_ipi_single(i, action); -} - static void cmp_init_secondary(void) { - struct cpuinfo_mips *c = ¤t_cpu_data; + struct cpuinfo_mips *c __maybe_unused = ¤t_cpu_data; /* Assume GIC is present */ change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | STATUSF_IP6 | @@ -97,15 +49,11 @@ static void cmp_init_secondary(void) /* Enable per-cpu interrupts: platform specific */ - c->core = (read_c0_ebase() >> 1) & 0x1ff; -#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) +#ifdef CONFIG_MIPS_MT_SMP if (cpu_has_mipsmt) c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE; #endif -#ifdef CONFIG_MIPS_MT_SMTC - c->tc_id = (read_c0_tcbind() & TCBIND_CURTC) >> TCBIND_CURTC_SHIFT; -#endif } static void cmp_smp_finish(void) @@ -124,11 +72,6 @@ static void cmp_smp_finish(void) local_irq_enable(); } -static void cmp_cpus_done(void) -{ - pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__); -} - /* * Setup the PC, SP, and GP of a secondary processor and start it running * smp_bootstrap is the place to resume from @@ -184,10 +127,6 @@ void __init cmp_smp_setup(void) unsigned int mvpconf0 = read_c0_mvpconf0(); nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; -#elif defined(CONFIG_MIPS_MT_SMTC) - unsigned int mvpconf0 = read_c0_mvpconf0(); - - nvpe = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; #endif smp_num_siblings = nvpe; } @@ -199,19 +138,21 @@ void __init cmp_prepare_cpus(unsigned int max_cpus) pr_debug("SMPCMP: CPU%d: %s max_cpus=%d\n", smp_processor_id(), __func__, max_cpus); +#ifdef CONFIG_MIPS_MT /* * FIXME: some of these options are per-system, some per-core and * some per-cpu */ mips_mt_set_cpuoptions(); +#endif + } struct plat_smp_ops cmp_smp_ops = { - .send_ipi_single = cmp_send_ipi_single, - .send_ipi_mask = cmp_send_ipi_mask, + .send_ipi_single = gic_send_ipi_single, + .send_ipi_mask = gic_send_ipi_mask, .init_secondary = cmp_init_secondary, .smp_finish = cmp_smp_finish, - .cpus_done = cmp_cpus_done, .boot_secondary = cmp_boot_secondary, .smp_setup = cmp_smp_setup, .prepare_cpus = cmp_prepare_cpus, diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c new file mode 100644 index 000000000000..949f2c6827a0 --- /dev/null +++ b/arch/mips/kernel/smp-cps.c @@ -0,0 +1,466 @@ +/* + * Copyright (C) 2013 Imagination Technologies + * Author: Paul Burton <paul.burton@imgtec.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/io.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/smp.h> +#include <linux/types.h> + +#include <asm/cacheflush.h> +#include <asm/gic.h> +#include <asm/mips-cm.h> +#include <asm/mips-cpc.h> +#include <asm/mips_mt.h> +#include <asm/mipsregs.h> +#include <asm/pm-cps.h> +#include <asm/smp-cps.h> +#include <asm/time.h> +#include <asm/uasm.h> + +static DECLARE_BITMAP(core_power, NR_CPUS); + +struct core_boot_config *mips_cps_core_bootcfg; + +static unsigned core_vpe_count(unsigned core) +{ + unsigned cfg; + + if (!config_enabled(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt) + return 1; + + write_gcr_cl_other(core << CM_GCR_Cx_OTHER_CORENUM_SHF); + cfg = read_gcr_co_config() & CM_GCR_Cx_CONFIG_PVPE_MSK; + return (cfg >> CM_GCR_Cx_CONFIG_PVPE_SHF) + 1; +} + +static void __init cps_smp_setup(void) +{ + unsigned int ncores, nvpes, core_vpes; + int c, v; + + /* Detect & record VPE topology */ + ncores = mips_cm_numcores(); + pr_info("VPE topology "); + for (c = nvpes = 0; c < ncores; c++) { + core_vpes = core_vpe_count(c); + pr_cont("%c%u", c ? ',' : '{', core_vpes); + + /* Use the number of VPEs in core 0 for smp_num_siblings */ + if (!c) + smp_num_siblings = core_vpes; + + for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) { + cpu_data[nvpes + v].core = c; +#ifdef CONFIG_MIPS_MT_SMP + cpu_data[nvpes + v].vpe_id = v; +#endif + } + + nvpes += core_vpes; + } + pr_cont("} total %u\n", nvpes); + + /* Indicate present CPUs (CPU being synonymous with VPE) */ + for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) { + set_cpu_possible(v, true); + set_cpu_present(v, true); + __cpu_number_map[v] = v; + __cpu_logical_map[v] = v; + } + + /* Set a coherent default CCA (CWB) */ + change_c0_config(CONF_CM_CMASK, 0x5); + + /* Core 0 is powered up (we're running on it) */ + bitmap_set(core_power, 0, 1); + + /* Initialise core 0 */ + mips_cps_core_init(); + + /* Make core 0 coherent with everything */ + write_gcr_cl_coherence(0xff); +} + +static void __init cps_prepare_cpus(unsigned int max_cpus) +{ + unsigned ncores, core_vpes, c, cca; + bool cca_unsuitable; + u32 *entry_code; + + mips_mt_set_cpuoptions(); + + /* Detect whether the CCA is unsuited to multi-core SMP */ + cca = read_c0_config() & CONF_CM_CMASK; + switch (cca) { + case 0x4: /* CWBE */ + case 0x5: /* CWB */ + /* The CCA is coherent, multi-core is fine */ + cca_unsuitable = false; + break; + + default: + /* CCA is not coherent, multi-core is not usable */ + cca_unsuitable = true; + } + + /* Warn the user if the CCA prevents multi-core */ + ncores = mips_cm_numcores(); + if (cca_unsuitable && ncores > 1) { + pr_warn("Using only one core due to unsuitable CCA 0x%x\n", + cca); + + for_each_present_cpu(c) { + if (cpu_data[c].core) + set_cpu_present(c, false); + } + } + + /* + * Patch the start of mips_cps_core_entry to provide: + * + * v0 = CM base address + * s0 = kseg0 CCA + */ + entry_code = (u32 *)&mips_cps_core_entry; + UASM_i_LA(&entry_code, 3, (long)mips_cm_base); + uasm_i_addiu(&entry_code, 16, 0, cca); + dma_cache_wback_inv((unsigned long)&mips_cps_core_entry, + (void *)entry_code - (void *)&mips_cps_core_entry); + + /* Allocate core boot configuration structs */ + mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg), + GFP_KERNEL); + if (!mips_cps_core_bootcfg) { + pr_err("Failed to allocate boot config for %u cores\n", ncores); + goto err_out; + } + + /* Allocate VPE boot configuration structs */ + for (c = 0; c < ncores; c++) { + core_vpes = core_vpe_count(c); + mips_cps_core_bootcfg[c].vpe_config = kcalloc(core_vpes, + sizeof(*mips_cps_core_bootcfg[c].vpe_config), + GFP_KERNEL); + if (!mips_cps_core_bootcfg[c].vpe_config) { + pr_err("Failed to allocate %u VPE boot configs\n", + core_vpes); + goto err_out; + } + } + + /* Mark this CPU as booted */ + atomic_set(&mips_cps_core_bootcfg[current_cpu_data.core].vpe_mask, + 1 << cpu_vpe_id(¤t_cpu_data)); + + return; +err_out: + /* Clean up allocations */ + if (mips_cps_core_bootcfg) { + for (c = 0; c < ncores; c++) + kfree(mips_cps_core_bootcfg[c].vpe_config); + kfree(mips_cps_core_bootcfg); + mips_cps_core_bootcfg = NULL; + } + + /* Effectively disable SMP by declaring CPUs not present */ + for_each_possible_cpu(c) { + if (c == 0) + continue; + set_cpu_present(c, false); + } +} + +static void boot_core(unsigned core) +{ + u32 access; + + /* Select the appropriate core */ + write_gcr_cl_other(core << CM_GCR_Cx_OTHER_CORENUM_SHF); + + /* Set its reset vector */ + write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry)); + + /* Ensure its coherency is disabled */ + write_gcr_co_coherence(0); + + /* Ensure the core can access the GCRs */ + access = read_gcr_access(); + access |= 1 << (CM_GCR_ACCESS_ACCESSEN_SHF + core); + write_gcr_access(access); + + if (mips_cpc_present()) { + /* Reset the core */ + mips_cpc_lock_other(core); + write_cpc_co_cmd(CPC_Cx_CMD_RESET); + mips_cpc_unlock_other(); + } else { + /* Take the core out of reset */ + write_gcr_co_reset_release(0); + } + + /* The core is now powered up */ + bitmap_set(core_power, core, 1); +} + +static void remote_vpe_boot(void *dummy) +{ + mips_cps_boot_vpes(); +} + +static void cps_boot_secondary(int cpu, struct task_struct *idle) +{ + unsigned core = cpu_data[cpu].core; + unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]); + struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core]; + struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id]; + unsigned int remote; + int err; + + vpe_cfg->pc = (unsigned long)&smp_bootstrap; + vpe_cfg->sp = __KSTK_TOS(idle); + vpe_cfg->gp = (unsigned long)task_thread_info(idle); + + atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask); + + preempt_disable(); + + if (!test_bit(core, core_power)) { + /* Boot a VPE on a powered down core */ + boot_core(core); + goto out; + } + + if (core != current_cpu_data.core) { + /* Boot a VPE on another powered up core */ + for (remote = 0; remote < NR_CPUS; remote++) { + if (cpu_data[remote].core != core) + continue; + if (cpu_online(remote)) + break; + } + BUG_ON(remote >= NR_CPUS); + + err = smp_call_function_single(remote, remote_vpe_boot, + NULL, 1); + if (err) + panic("Failed to call remote CPU\n"); + goto out; + } + + BUG_ON(!cpu_has_mipsmt); + + /* Boot a VPE on this core */ + mips_cps_boot_vpes(); +out: + preempt_enable(); +} + +static void cps_init_secondary(void) +{ + /* Disable MT - we only want to run 1 TC per VPE */ + if (cpu_has_mipsmt) + dmt(); + + change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | + STATUSF_IP6 | STATUSF_IP7); +} + +static void cps_smp_finish(void) +{ + write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ)); + +#ifdef CONFIG_MIPS_MT_FPAFF + /* If we have an FPU, enroll ourselves in the FPU-full mask */ + if (cpu_has_fpu) + cpu_set(smp_processor_id(), mt_fpu_cpumask); +#endif /* CONFIG_MIPS_MT_FPAFF */ + + local_irq_enable(); +} + +#ifdef CONFIG_HOTPLUG_CPU + +static int cps_cpu_disable(void) +{ + unsigned cpu = smp_processor_id(); + struct core_boot_config *core_cfg; + + if (!cpu) + return -EBUSY; + + if (!cps_pm_support_state(CPS_PM_POWER_GATED)) + return -EINVAL; + + core_cfg = &mips_cps_core_bootcfg[current_cpu_data.core]; + atomic_sub(1 << cpu_vpe_id(¤t_cpu_data), &core_cfg->vpe_mask); + smp_mb__after_atomic(); + set_cpu_online(cpu, false); + cpu_clear(cpu, cpu_callin_map); + + return 0; +} + +static DECLARE_COMPLETION(cpu_death_chosen); +static unsigned cpu_death_sibling; +static enum { + CPU_DEATH_HALT, + CPU_DEATH_POWER, +} cpu_death; + +void play_dead(void) +{ + unsigned cpu, core; + + local_irq_disable(); + idle_task_exit(); + cpu = smp_processor_id(); + cpu_death = CPU_DEATH_POWER; + + if (cpu_has_mipsmt) { + core = cpu_data[cpu].core; + + /* Look for another online VPE within the core */ + for_each_online_cpu(cpu_death_sibling) { + if (cpu_data[cpu_death_sibling].core != core) + continue; + + /* + * There is an online VPE within the core. Just halt + * this TC and leave the core alone. + */ + cpu_death = CPU_DEATH_HALT; + break; + } + } + + /* This CPU has chosen its way out */ + complete(&cpu_death_chosen); + + if (cpu_death == CPU_DEATH_HALT) { + /* Halt this TC */ + write_c0_tchalt(TCHALT_H); + instruction_hazard(); + } else { + /* Power down the core */ + cps_pm_enter_state(CPS_PM_POWER_GATED); + } + + /* This should never be reached */ + panic("Failed to offline CPU %u", cpu); +} + +static void wait_for_sibling_halt(void *ptr_cpu) +{ + unsigned cpu = (unsigned)ptr_cpu; + unsigned vpe_id = cpu_data[cpu].vpe_id; + unsigned halted; + unsigned long flags; + + do { + local_irq_save(flags); + settc(vpe_id); + halted = read_tc_c0_tchalt(); + local_irq_restore(flags); + } while (!(halted & TCHALT_H)); +} + +static void cps_cpu_die(unsigned int cpu) +{ + unsigned core = cpu_data[cpu].core; + unsigned stat; + int err; + + /* Wait for the cpu to choose its way out */ + if (!wait_for_completion_timeout(&cpu_death_chosen, + msecs_to_jiffies(5000))) { + pr_err("CPU%u: didn't offline\n", cpu); + return; + } + + /* + * Now wait for the CPU to actually offline. Without doing this that + * offlining may race with one or more of: + * + * - Onlining the CPU again. + * - Powering down the core if another VPE within it is offlined. + * - A sibling VPE entering a non-coherent state. + * + * In the non-MT halt case (ie. infinite loop) the CPU is doing nothing + * with which we could race, so do nothing. + */ + if (cpu_death == CPU_DEATH_POWER) { + /* + * Wait for the core to enter a powered down or clock gated + * state, the latter happening when a JTAG probe is connected + * in which case the CPC will refuse to power down the core. + */ + do { + mips_cpc_lock_other(core); + stat = read_cpc_co_stat_conf(); + stat &= CPC_Cx_STAT_CONF_SEQSTATE_MSK; + mips_cpc_unlock_other(); + } while (stat != CPC_Cx_STAT_CONF_SEQSTATE_D0 && + stat != CPC_Cx_STAT_CONF_SEQSTATE_D2 && + stat != CPC_Cx_STAT_CONF_SEQSTATE_U2); + + /* Indicate the core is powered off */ + bitmap_clear(core_power, core, 1); + } else if (cpu_has_mipsmt) { + /* + * Have a CPU with access to the offlined CPUs registers wait + * for its TC to halt. + */ + err = smp_call_function_single(cpu_death_sibling, + wait_for_sibling_halt, + (void *)cpu, 1); + if (err) + panic("Failed to call remote sibling CPU\n"); + } +} + +#endif /* CONFIG_HOTPLUG_CPU */ + +static struct plat_smp_ops cps_smp_ops = { + .smp_setup = cps_smp_setup, + .prepare_cpus = cps_prepare_cpus, + .boot_secondary = cps_boot_secondary, + .init_secondary = cps_init_secondary, + .smp_finish = cps_smp_finish, + .send_ipi_single = gic_send_ipi_single, + .send_ipi_mask = gic_send_ipi_mask, +#ifdef CONFIG_HOTPLUG_CPU + .cpu_disable = cps_cpu_disable, + .cpu_die = cps_cpu_die, +#endif +}; + +bool mips_cps_smp_in_use(void) +{ + extern struct plat_smp_ops *mp_ops; + return mp_ops == &cps_smp_ops; +} + +int register_cps_smp_ops(void) +{ + if (!mips_cm_present()) { + pr_warn("MIPS CPS SMP unable to proceed without a CM\n"); + return -ENODEV; + } + + /* check we have a GIC - we need one for IPIs */ + if (!(read_gcr_gic_status() & CM_GCR_GIC_STATUS_EX_MSK)) { + pr_warn("MIPS CPS SMP unable to proceed without a GIC\n"); + return -ENODEV; + } + + register_smp_ops(&cps_smp_ops); + return 0; +} diff --git a/arch/mips/kernel/smp-gic.c b/arch/mips/kernel/smp-gic.c new file mode 100644 index 000000000000..3b21a96d1ccb --- /dev/null +++ b/arch/mips/kernel/smp-gic.c @@ -0,0 +1,64 @@ +/* + * Copyright (C) 2013 Imagination Technologies + * Author: Paul Burton <paul.burton@imgtec.com> + * + * Based on smp-cmp.c: + * Copyright (C) 2007 MIPS Technologies, Inc. + * Author: Chris Dearman (chris@mips.com) + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/printk.h> + +#include <asm/gic.h> +#include <asm/mips-cpc.h> +#include <asm/smp-ops.h> + +void gic_send_ipi_single(int cpu, unsigned int action) +{ + unsigned long flags; + unsigned int intr; + unsigned int core = cpu_data[cpu].core; + + pr_debug("CPU%d: %s cpu %d action %u status %08x\n", + smp_processor_id(), __func__, cpu, action, read_c0_status()); + + local_irq_save(flags); + + switch (action) { + case SMP_CALL_FUNCTION: + intr = plat_ipi_call_int_xlate(cpu); + break; + + case SMP_RESCHEDULE_YOURSELF: + intr = plat_ipi_resched_int_xlate(cpu); + break; + + default: + BUG(); + } + + gic_send_ipi(intr); + + if (mips_cpc_present() && (core != current_cpu_data.core)) { + while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) { + mips_cpc_lock_other(core); + write_cpc_co_cmd(CPC_Cx_CMD_PWRUP); + mips_cpc_unlock_other(); + } + } + + local_irq_restore(flags); +} + +void gic_send_ipi_mask(const struct cpumask *mask, unsigned int action) +{ + unsigned int i; + + for_each_cpu(i, mask) + gic_send_ipi_single(i, action); +} diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c index 57a3f7a2b370..3babf6e4f894 100644 --- a/arch/mips/kernel/smp-mt.c +++ b/arch/mips/kernel/smp-mt.c @@ -71,6 +71,7 @@ static unsigned int __init smvp_vpe_init(unsigned int tc, unsigned int mvpconf0, /* Record this as available CPU */ set_cpu_possible(tc, true); + set_cpu_present(tc, true); __cpu_number_map[tc] = ++ncpu; __cpu_logical_map[ncpu] = tc; } @@ -118,6 +119,12 @@ static void vsmp_send_ipi_single(int cpu, unsigned int action) unsigned long flags; int vpflags; +#ifdef CONFIG_IRQ_GIC + if (gic_present) { + gic_send_ipi_single(cpu, action); + return; + } +#endif local_irq_save(flags); vpflags = dvpe(); /* can't access the other CPU's registers whilst MVPE enabled */ @@ -176,10 +183,6 @@ static void vsmp_smp_finish(void) local_irq_enable(); } -static void vsmp_cpus_done(void) -{ -} - /* * Setup the PC, SP, and GP of a secondary processor and start it * running! @@ -280,8 +283,29 @@ struct plat_smp_ops vsmp_smp_ops = { .send_ipi_mask = vsmp_send_ipi_mask, .init_secondary = vsmp_init_secondary, .smp_finish = vsmp_smp_finish, - .cpus_done = vsmp_cpus_done, .boot_secondary = vsmp_boot_secondary, .smp_setup = vsmp_smp_setup, .prepare_cpus = vsmp_prepare_cpus, }; + +static int proc_cpuinfo_chain_call(struct notifier_block *nfb, + unsigned long action_unused, void *data) +{ + struct proc_cpuinfo_notifier_args *pcn = data; + struct seq_file *m = pcn->m; + unsigned long n = pcn->n; + + if (!cpu_has_mipsmt) + return NOTIFY_OK; + + seq_printf(m, "VPE\t\t\t: %d\n", cpu_data[n].vpe_id); + + return NOTIFY_OK; +} + +static int __init proc_cpuinfo_notifier_init(void) +{ + return proc_cpuinfo_notifier(proc_cpuinfo_chain_call, 0); +} + +subsys_initcall(proc_cpuinfo_notifier_init); diff --git a/arch/mips/kernel/smp-up.c b/arch/mips/kernel/smp-up.c index 7fde3e4d978f..17878d71ef2b 100644 --- a/arch/mips/kernel/smp-up.c +++ b/arch/mips/kernel/smp-up.c @@ -36,11 +36,6 @@ static void up_smp_finish(void) { } -/* Hook for after all CPUs are online */ -static void up_cpus_done(void) -{ -} - /* * Firmware CPU startup hook */ @@ -73,7 +68,6 @@ struct plat_smp_ops up_smp_ops = { .send_ipi_mask = up_send_ipi_mask, .init_secondary = up_init_secondary, .smp_finish = up_smp_finish, - .cpus_done = up_cpus_done, .boot_secondary = up_boot_secondary, .smp_setup = up_smp_setup, .prepare_cpus = up_prepare_cpus, diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 0a022ee33b2a..9bad52ede903 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -43,10 +43,6 @@ #include <asm/time.h> #include <asm/setup.h> -#ifdef CONFIG_MIPS_MT_SMTC -#include <asm/mipsmtregs.h> -#endif /* CONFIG_MIPS_MT_SMTC */ - volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ @@ -66,6 +62,8 @@ EXPORT_SYMBOL(cpu_sibling_map); /* representing cpus for which sibling maps can be computed */ static cpumask_t cpu_sibling_setup_map; +cpumask_t cpu_coherent_mask; + static inline void set_cpu_sibling_map(int cpu) { int i; @@ -102,12 +100,6 @@ asmlinkage void start_secondary(void) { unsigned int cpu; -#ifdef CONFIG_MIPS_MT_SMTC - /* Only do cpu_probe for first TC of CPU */ - if ((read_c0_tcbind() & TCBIND_CURTC) != 0) - __cpu_name[smp_processor_id()] = __cpu_name[0]; - else -#endif /* CONFIG_MIPS_MT_SMTC */ cpu_probe(); cpu_report(); per_cpu_trap_init(false); @@ -124,6 +116,7 @@ asmlinkage void start_secondary(void) cpu = smp_processor_id(); cpu_data[cpu].udelay_val = loops_per_jiffy; + cpu_set(cpu, cpu_coherent_mask); notify_cpu_starting(cpu); set_cpu_online(cpu, true); @@ -173,7 +166,6 @@ void smp_send_stop(void) void __init smp_cpus_done(unsigned int max_cpus) { - mp_ops->cpus_done(); } /* called from main before smp_init() */ @@ -186,6 +178,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) #ifndef CONFIG_HOTPLUG_CPU init_cpu_present(cpu_possible_mask); #endif + cpumask_copy(&cpu_coherent_mask, cpu_possible_mask); } /* preload SMP state for boot cpu */ @@ -238,13 +231,10 @@ static void flush_tlb_mm_ipi(void *mm) * o collapses to normal function call on UP kernels * o collapses to normal function call on systems with a single shared * primary cache. - * o CONFIG_MIPS_MT_SMTC currently implies there is only one physical core. */ static inline void smp_on_other_tlbs(void (*func) (void *info), void *info) { -#ifndef CONFIG_MIPS_MT_SMTC smp_call_function(func, info, 1); -#endif } static inline void smp_on_each_tlb(void (*func) (void *info), void *info) @@ -404,3 +394,46 @@ void dump_send_ipi(void (*dump_ipi_callback)(void *)) } EXPORT_SYMBOL(dump_send_ipi); #endif + +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST + +static DEFINE_PER_CPU(atomic_t, tick_broadcast_count); +static DEFINE_PER_CPU(struct call_single_data, tick_broadcast_csd); + +void tick_broadcast(const struct cpumask *mask) +{ + atomic_t *count; + struct call_single_data *csd; + int cpu; + + for_each_cpu(cpu, mask) { + count = &per_cpu(tick_broadcast_count, cpu); + csd = &per_cpu(tick_broadcast_csd, cpu); + + if (atomic_inc_return(count) == 1) + smp_call_function_single_async(cpu, csd); + } +} + +static void tick_broadcast_callee(void *info) +{ + int cpu = smp_processor_id(); + tick_receive_broadcast(); + atomic_set(&per_cpu(tick_broadcast_count, cpu), 0); +} + +static int __init tick_broadcast_init(void) +{ + struct call_single_data *csd; + int cpu; + + for (cpu = 0; cpu < NR_CPUS; cpu++) { + csd = &per_cpu(tick_broadcast_csd, cpu); + csd->func = tick_broadcast_callee; + } + + return 0; +} +early_initcall(tick_broadcast_init); + +#endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */ diff --git a/arch/mips/kernel/smtc-asm.S b/arch/mips/kernel/smtc-asm.S deleted file mode 100644 index 2866863a39df..000000000000 --- a/arch/mips/kernel/smtc-asm.S +++ /dev/null @@ -1,133 +0,0 @@ -/* - * Assembly Language Functions for MIPS MT SMTC support - */ - -/* - * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set. */ - -#include <asm/regdef.h> -#include <asm/asmmacro.h> -#include <asm/stackframe.h> -#include <asm/irqflags.h> - -/* - * "Software Interrupt" linkage. - * - * This is invoked when an "Interrupt" is sent from one TC to another, - * where the TC to be interrupted is halted, has it's Restart address - * and Status values saved by the "remote control" thread, then modified - * to cause execution to begin here, in kenel mode. This code then - * disguises the TC state as that of an exception and transfers - * control to the general exception or vectored interrupt handler. - */ - .set noreorder - -/* -The __smtc_ipi_vector would use k0 and k1 as temporaries and -1) Set EXL (this is per-VPE, so this can't be done by proxy!) -2) Restore the K/CU and IXMT bits to the pre "exception" state - (EXL means no interrupts and access to the kernel map). -3) Set EPC to be the saved value of TCRestart. -4) Jump to the exception handler entry point passed by the sender. - -CAN WE PROVE THAT WE WON'T DO THIS IF INTS DISABLED?? -*/ - -/* - * Reviled and slandered vision: Set EXL and restore K/CU/IXMT - * state of pre-halt thread, then save everything and call - * thought some function pointer to imaginary_exception, which - * will parse a register value or memory message queue to - * deliver things like interprocessor interrupts. On return - * from that function, jump to the global ret_from_irq code - * to invoke the scheduler and return as appropriate. - */ - -#define PT_PADSLOT4 (PT_R0-8) -#define PT_PADSLOT5 (PT_R0-4) - - .text - .align 5 -FEXPORT(__smtc_ipi_vector) -#ifdef CONFIG_CPU_MICROMIPS - nop -#endif - .set noat - /* Disable thread scheduling to make Status update atomic */ - DMT 27 # dmt k1 - _ehb - /* Set EXL */ - mfc0 k0,CP0_STATUS - ori k0,k0,ST0_EXL - mtc0 k0,CP0_STATUS - _ehb - /* Thread scheduling now inhibited by EXL. Restore TE state. */ - andi k1,k1,VPECONTROL_TE - beqz k1,1f - emt -1: - /* - * The IPI sender has put some information on the anticipated - * kernel stack frame. If we were in user mode, this will be - * built above the saved kernel SP. If we were already in the - * kernel, it will be built above the current CPU SP. - * - * Were we in kernel mode, as indicated by CU0? - */ - sll k1,k0,3 - .set noreorder - bltz k1,2f - move k1,sp - .set reorder - /* - * If previously in user mode, set CU0 and use kernel stack. - */ - li k1,ST0_CU0 - or k1,k1,k0 - mtc0 k1,CP0_STATUS - _ehb - get_saved_sp - /* Interrupting TC will have pre-set values in slots in the new frame */ -2: subu k1,k1,PT_SIZE - /* Load TCStatus Value */ - lw k0,PT_TCSTATUS(k1) - /* Write it to TCStatus to restore CU/KSU/IXMT state */ - mtc0 k0,$2,1 - _ehb - lw k0,PT_EPC(k1) - mtc0 k0,CP0_EPC - /* Save all will redundantly recompute the SP, but use it for now */ - SAVE_ALL - CLI - TRACE_IRQS_OFF - /* Function to be invoked passed stack pad slot 5 */ - lw t0,PT_PADSLOT5(sp) - /* Argument from sender passed in stack pad slot 4 */ - lw a0,PT_PADSLOT4(sp) - LONG_L s0, TI_REGS($28) - LONG_S sp, TI_REGS($28) - PTR_LA ra, ret_from_irq - jr t0 - -/* - * Called from idle loop to provoke processing of queued IPIs - * First IPI message in queue passed as argument. - */ - -LEAF(self_ipi) - /* Before anything else, block interrupts */ - mfc0 t0,CP0_TCSTATUS - ori t1,t0,TCSTATUS_IXMT - mtc0 t1,CP0_TCSTATUS - _ehb - /* We know we're in kernel mode, so prepare stack frame */ - subu t1,sp,PT_SIZE - sw ra,PT_EPC(t1) - sw a0,PT_PADSLOT4(t1) - la t2,ipi_decode - sw t2,PT_PADSLOT5(t1) - /* Save pre-disable value of TCStatus */ - sw t0,PT_TCSTATUS(t1) - j __smtc_ipi_vector - nop -END(self_ipi) diff --git a/arch/mips/kernel/smtc-proc.c b/arch/mips/kernel/smtc-proc.c deleted file mode 100644 index c10aa84c9fa9..000000000000 --- a/arch/mips/kernel/smtc-proc.c +++ /dev/null @@ -1,79 +0,0 @@ -/* - * /proc hooks for SMTC kernel - * Copyright (C) 2005 Mips Technologies, Inc - */ - -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/cpumask.h> -#include <linux/interrupt.h> - -#include <asm/cpu.h> -#include <asm/processor.h> -#include <linux/atomic.h> -#include <asm/hardirq.h> -#include <asm/mmu_context.h> -#include <asm/mipsregs.h> -#include <asm/cacheflush.h> -#include <linux/proc_fs.h> -#include <linux/seq_file.h> - -#include <asm/smtc_proc.h> - -/* - * /proc diagnostic and statistics hooks - */ - -/* - * Statistics gathered - */ -unsigned long selfipis[NR_CPUS]; - -struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS]; - -atomic_t smtc_fpu_recoveries; - -static int smtc_proc_show(struct seq_file *m, void *v) -{ - int i; - extern unsigned long ebase; - - seq_printf(m, "SMTC Status Word: 0x%08x\n", smtc_status); - seq_printf(m, "Config7: 0x%08x\n", read_c0_config7()); - seq_printf(m, "EBASE: 0x%08lx\n", ebase); - seq_printf(m, "Counter Interrupts taken per CPU (TC)\n"); - for (i=0; i < NR_CPUS; i++) - seq_printf(m, "%d: %ld\n", i, smtc_cpu_stats[i].timerints); - seq_printf(m, "Self-IPIs by CPU:\n"); - for(i = 0; i < NR_CPUS; i++) - seq_printf(m, "%d: %ld\n", i, smtc_cpu_stats[i].selfipis); - seq_printf(m, "%d Recoveries of \"stolen\" FPU\n", - atomic_read(&smtc_fpu_recoveries)); - return 0; -} - -static int smtc_proc_open(struct inode *inode, struct file *file) -{ - return single_open(file, smtc_proc_show, NULL); -} - -static const struct file_operations smtc_proc_fops = { - .open = smtc_proc_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -void init_smtc_stats(void) -{ - int i; - - for (i=0; i<NR_CPUS; i++) { - smtc_cpu_stats[i].timerints = 0; - smtc_cpu_stats[i].selfipis = 0; - } - - atomic_set(&smtc_fpu_recoveries, 0); - - proc_create("smtc", 0444, NULL, &smtc_proc_fops); -} diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c deleted file mode 100644 index dfc1b911be04..000000000000 --- a/arch/mips/kernel/smtc.c +++ /dev/null @@ -1,1528 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * Copyright (C) 2004 Mips Technologies, Inc - * Copyright (C) 2008 Kevin D. Kissell - */ - -#include <linux/clockchips.h> -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/smp.h> -#include <linux/cpumask.h> -#include <linux/interrupt.h> -#include <linux/kernel_stat.h> -#include <linux/module.h> -#include <linux/ftrace.h> -#include <linux/slab.h> - -#include <asm/cpu.h> -#include <asm/processor.h> -#include <linux/atomic.h> -#include <asm/hardirq.h> -#include <asm/hazards.h> -#include <asm/irq.h> -#include <asm/idle.h> -#include <asm/mmu_context.h> -#include <asm/mipsregs.h> -#include <asm/cacheflush.h> -#include <asm/time.h> -#include <asm/addrspace.h> -#include <asm/smtc.h> -#include <asm/smtc_proc.h> -#include <asm/setup.h> - -/* - * SMTC Kernel needs to manipulate low-level CPU interrupt mask - * in do_IRQ. These are passed in setup_irq_smtc() and stored - * in this table. - */ -unsigned long irq_hwmask[NR_IRQS]; - -#define LOCK_MT_PRA() \ - local_irq_save(flags); \ - mtflags = dmt() - -#define UNLOCK_MT_PRA() \ - emt(mtflags); \ - local_irq_restore(flags) - -#define LOCK_CORE_PRA() \ - local_irq_save(flags); \ - mtflags = dvpe() - -#define UNLOCK_CORE_PRA() \ - evpe(mtflags); \ - local_irq_restore(flags) - -/* - * Data structures purely associated with SMTC parallelism - */ - - -/* - * Table for tracking ASIDs whose lifetime is prolonged. - */ - -asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS]; - -/* - * Number of InterProcessor Interrupt (IPI) message buffers to allocate - */ - -#define IPIBUF_PER_CPU 4 - -struct smtc_ipi_q IPIQ[NR_CPUS]; -static struct smtc_ipi_q freeIPIq; - - -/* - * Number of FPU contexts for each VPE - */ - -static int smtc_nconf1[MAX_SMTC_VPES]; - - -/* Forward declarations */ - -void ipi_decode(struct smtc_ipi *); -static void post_direct_ipi(int cpu, struct smtc_ipi *pipi); -static void setup_cross_vpe_interrupts(unsigned int nvpe); -void init_smtc_stats(void); - -/* Global SMTC Status */ - -unsigned int smtc_status; - -/* Boot command line configuration overrides */ - -static int vpe0limit; -static int ipibuffers; -static int nostlb; -static int asidmask; -unsigned long smtc_asid_mask = 0xff; - -static int __init vpe0tcs(char *str) -{ - get_option(&str, &vpe0limit); - - return 1; -} - -static int __init ipibufs(char *str) -{ - get_option(&str, &ipibuffers); - return 1; -} - -static int __init stlb_disable(char *s) -{ - nostlb = 1; - return 1; -} - -static int __init asidmask_set(char *str) -{ - get_option(&str, &asidmask); - switch (asidmask) { - case 0x1: - case 0x3: - case 0x7: - case 0xf: - case 0x1f: - case 0x3f: - case 0x7f: - case 0xff: - smtc_asid_mask = (unsigned long)asidmask; - break; - default: - printk("ILLEGAL ASID mask 0x%x from command line\n", asidmask); - } - return 1; -} - -__setup("vpe0tcs=", vpe0tcs); -__setup("ipibufs=", ipibufs); -__setup("nostlb", stlb_disable); -__setup("asidmask=", asidmask_set); - -#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG - -static int hang_trig; - -static int __init hangtrig_enable(char *s) -{ - hang_trig = 1; - return 1; -} - - -__setup("hangtrig", hangtrig_enable); - -#define DEFAULT_BLOCKED_IPI_LIMIT 32 - -static int timerq_limit = DEFAULT_BLOCKED_IPI_LIMIT; - -static int __init tintq(char *str) -{ - get_option(&str, &timerq_limit); - return 1; -} - -__setup("tintq=", tintq); - -static int imstuckcount[MAX_SMTC_VPES][8]; -/* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */ -static int vpemask[MAX_SMTC_VPES][8] = { - {0, 0, 1, 0, 0, 0, 0, 1}, - {0, 0, 0, 0, 0, 0, 0, 1} -}; -int tcnoprog[NR_CPUS]; -static atomic_t idle_hook_initialized = ATOMIC_INIT(0); -static int clock_hang_reported[NR_CPUS]; - -#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ - -/* - * Configure shared TLB - VPC configuration bit must be set by caller - */ - -static void smtc_configure_tlb(void) -{ - int i, tlbsiz, vpes; - unsigned long mvpconf0; - unsigned long config1val; - - /* Set up ASID preservation table */ - for (vpes=0; vpes<MAX_SMTC_TLBS; vpes++) { - for(i = 0; i < MAX_SMTC_ASIDS; i++) { - smtc_live_asid[vpes][i] = 0; - } - } - mvpconf0 = read_c0_mvpconf0(); - - if ((vpes = ((mvpconf0 & MVPCONF0_PVPE) - >> MVPCONF0_PVPE_SHIFT) + 1) > 1) { - /* If we have multiple VPEs, try to share the TLB */ - if ((mvpconf0 & MVPCONF0_TLBS) && !nostlb) { - /* - * If TLB sizing is programmable, shared TLB - * size is the total available complement. - * Otherwise, we have to take the sum of all - * static VPE TLB entries. - */ - if ((tlbsiz = ((mvpconf0 & MVPCONF0_PTLBE) - >> MVPCONF0_PTLBE_SHIFT)) == 0) { - /* - * If there's more than one VPE, there had better - * be more than one TC, because we need one to bind - * to each VPE in turn to be able to read - * its configuration state! - */ - settc(1); - /* Stop the TC from doing anything foolish */ - write_tc_c0_tchalt(TCHALT_H); - mips_ihb(); - /* No need to un-Halt - that happens later anyway */ - for (i=0; i < vpes; i++) { - write_tc_c0_tcbind(i); - /* - * To be 100% sure we're really getting the right - * information, we exit the configuration state - * and do an IHB after each rebinding. - */ - write_c0_mvpcontrol( - read_c0_mvpcontrol() & ~ MVPCONTROL_VPC ); - mips_ihb(); - /* - * Only count if the MMU Type indicated is TLB - */ - if (((read_vpe_c0_config() & MIPS_CONF_MT) >> 7) == 1) { - config1val = read_vpe_c0_config1(); - tlbsiz += ((config1val >> 25) & 0x3f) + 1; - } - - /* Put core back in configuration state */ - write_c0_mvpcontrol( - read_c0_mvpcontrol() | MVPCONTROL_VPC ); - mips_ihb(); - } - } - write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_STLB); - ehb(); - - /* - * Setup kernel data structures to use software total, - * rather than read the per-VPE Config1 value. The values - * for "CPU 0" gets copied to all the other CPUs as part - * of their initialization in smtc_cpu_setup(). - */ - - /* MIPS32 limits TLB indices to 64 */ - if (tlbsiz > 64) - tlbsiz = 64; - cpu_data[0].tlbsize = current_cpu_data.tlbsize = tlbsiz; - smtc_status |= SMTC_TLB_SHARED; - local_flush_tlb_all(); - - printk("TLB of %d entry pairs shared by %d VPEs\n", - tlbsiz, vpes); - } else { - printk("WARNING: TLB Not Sharable on SMTC Boot!\n"); - } - } -} - - -/* - * Incrementally build the CPU map out of constituent MIPS MT cores, - * using the specified available VPEs and TCs. Plaform code needs - * to ensure that each MIPS MT core invokes this routine on reset, - * one at a time(!). - * - * This version of the build_cpu_map and prepare_cpus routines assumes - * that *all* TCs of a MIPS MT core will be used for Linux, and that - * they will be spread across *all* available VPEs (to minimise the - * loss of efficiency due to exception service serialization). - * An improved version would pick up configuration information and - * possibly leave some TCs/VPEs as "slave" processors. - * - * Use c0_MVPConf0 to find out how many TCs are available, setting up - * cpu_possible_mask and the logical/physical mappings. - */ - -int __init smtc_build_cpu_map(int start_cpu_slot) -{ - int i, ntcs; - - /* - * The CPU map isn't actually used for anything at this point, - * so it's not clear what else we should do apart from set - * everything up so that "logical" = "physical". - */ - ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; - for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) { - set_cpu_possible(i, true); - __cpu_number_map[i] = i; - __cpu_logical_map[i] = i; - } -#ifdef CONFIG_MIPS_MT_FPAFF - /* Initialize map of CPUs with FPUs */ - cpus_clear(mt_fpu_cpumask); -#endif - - /* One of those TC's is the one booting, and not a secondary... */ - printk("%i available secondary CPU TC(s)\n", i - 1); - - return i; -} - -/* - * Common setup before any secondaries are started - * Make sure all CPUs are in a sensible state before we boot any of the - * secondaries. - * - * For MIPS MT "SMTC" operation, we set up all TCs, spread as evenly - * as possible across the available VPEs. - */ - -static void smtc_tc_setup(int vpe, int tc, int cpu) -{ - static int cp1contexts[MAX_SMTC_VPES]; - - /* - * Make a local copy of the available FPU contexts in order - * to keep track of TCs that can have one. - */ - if (tc == 1) - { - /* - * FIXME: Multi-core SMTC hasn't been tested and the - * maximum number of VPEs may change. - */ - cp1contexts[0] = smtc_nconf1[0] - 1; - cp1contexts[1] = smtc_nconf1[1]; - } - - settc(tc); - write_tc_c0_tchalt(TCHALT_H); - mips_ihb(); - write_tc_c0_tcstatus((read_tc_c0_tcstatus() - & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT)) - | TCSTATUS_A); - /* - * TCContext gets an offset from the base of the IPIQ array - * to be used in low-level code to detect the presence of - * an active IPI queue. - */ - write_tc_c0_tccontext((sizeof(struct smtc_ipi_q) * cpu) << 16); - - /* Bind TC to VPE. */ - write_tc_c0_tcbind(vpe); - - /* In general, all TCs should have the same cpu_data indications. */ - memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips)); - - /* Check to see if there is a FPU context available for this TC. */ - if (!cp1contexts[vpe]) - cpu_data[cpu].options &= ~MIPS_CPU_FPU; - else - cp1contexts[vpe]--; - - /* Store the TC and VPE into the cpu_data structure. */ - cpu_data[cpu].vpe_id = vpe; - cpu_data[cpu].tc_id = tc; - - /* FIXME: Multi-core SMTC hasn't been tested, but be prepared. */ - cpu_data[cpu].core = (read_vpe_c0_ebase() >> 1) & 0xff; -} - -/* - * Tweak to get Count registers synced as closely as possible. The - * value seems good for 34K-class cores. - */ - -#define CP0_SKEW 8 - -void smtc_prepare_cpus(int cpus) -{ - int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu; - unsigned long flags; - unsigned long val; - int nipi; - struct smtc_ipi *pipi; - - /* disable interrupts so we can disable MT */ - local_irq_save(flags); - /* disable MT so we can configure */ - dvpe(); - dmt(); - - spin_lock_init(&freeIPIq.lock); - - /* - * We probably don't have as many VPEs as we do SMP "CPUs", - * but it's possible - and in any case we'll never use more! - */ - for (i=0; i<NR_CPUS; i++) { - IPIQ[i].head = IPIQ[i].tail = NULL; - spin_lock_init(&IPIQ[i].lock); - IPIQ[i].depth = 0; - IPIQ[i].resched_flag = 0; /* No reschedules queued initially */ - } - - /* cpu_data index starts at zero */ - cpu = 0; - cpu_data[cpu].vpe_id = 0; - cpu_data[cpu].tc_id = 0; - cpu_data[cpu].core = (read_c0_ebase() >> 1) & 0xff; - cpu++; - - /* Report on boot-time options */ - mips_mt_set_cpuoptions(); - if (vpelimit > 0) - printk("Limit of %d VPEs set\n", vpelimit); - if (tclimit > 0) - printk("Limit of %d TCs set\n", tclimit); - if (nostlb) { - printk("Shared TLB Use Inhibited - UNSAFE for Multi-VPE Operation\n"); - } - if (asidmask) - printk("ASID mask value override to 0x%x\n", asidmask); - - /* Temporary */ -#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG - if (hang_trig) - printk("Logic Analyser Trigger on suspected TC hang\n"); -#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ - - /* Put MVPE's into 'configuration state' */ - write_c0_mvpcontrol( read_c0_mvpcontrol() | MVPCONTROL_VPC ); - - val = read_c0_mvpconf0(); - nvpe = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; - if (vpelimit > 0 && nvpe > vpelimit) - nvpe = vpelimit; - ntc = ((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; - if (ntc > NR_CPUS) - ntc = NR_CPUS; - if (tclimit > 0 && ntc > tclimit) - ntc = tclimit; - slop = ntc % nvpe; - for (i = 0; i < nvpe; i++) { - tcpervpe[i] = ntc / nvpe; - if (slop) { - if((slop - i) > 0) tcpervpe[i]++; - } - } - /* Handle command line override for VPE0 */ - if (vpe0limit > ntc) vpe0limit = ntc; - if (vpe0limit > 0) { - int slopslop; - if (vpe0limit < tcpervpe[0]) { - /* Reducing TC count - distribute to others */ - slop = tcpervpe[0] - vpe0limit; - slopslop = slop % (nvpe - 1); - tcpervpe[0] = vpe0limit; - for (i = 1; i < nvpe; i++) { - tcpervpe[i] += slop / (nvpe - 1); - if(slopslop && ((slopslop - (i - 1) > 0))) - tcpervpe[i]++; - } - } else if (vpe0limit > tcpervpe[0]) { - /* Increasing TC count - steal from others */ - slop = vpe0limit - tcpervpe[0]; - slopslop = slop % (nvpe - 1); - tcpervpe[0] = vpe0limit; - for (i = 1; i < nvpe; i++) { - tcpervpe[i] -= slop / (nvpe - 1); - if(slopslop && ((slopslop - (i - 1) > 0))) - tcpervpe[i]--; - } - } - } - - /* Set up shared TLB */ - smtc_configure_tlb(); - - for (tc = 0, vpe = 0 ; (vpe < nvpe) && (tc < ntc) ; vpe++) { - /* Get number of CP1 contexts for each VPE. */ - if (tc == 0) - { - /* - * Do not call settc() for TC0 or the FPU context - * value will be incorrect. Besides, we know that - * we are TC0 anyway. - */ - smtc_nconf1[0] = ((read_vpe_c0_vpeconf1() & - VPECONF1_NCP1) >> VPECONF1_NCP1_SHIFT); - if (nvpe == 2) - { - settc(1); - smtc_nconf1[1] = ((read_vpe_c0_vpeconf1() & - VPECONF1_NCP1) >> VPECONF1_NCP1_SHIFT); - settc(0); - } - } - if (tcpervpe[vpe] == 0) - continue; - if (vpe != 0) - printk(", "); - printk("VPE %d: TC", vpe); - for (i = 0; i < tcpervpe[vpe]; i++) { - /* - * TC 0 is bound to VPE 0 at reset, - * and is presumably executing this - * code. Leave it alone! - */ - if (tc != 0) { - smtc_tc_setup(vpe, tc, cpu); - if (vpe != 0) { - /* - * Set MVP bit (possibly again). Do it - * here to catch CPUs that have no TCs - * bound to the VPE at reset. In that - * case, a TC must be bound to the VPE - * before we can set VPEControl[MVP] - */ - write_vpe_c0_vpeconf0( - read_vpe_c0_vpeconf0() | - VPECONF0_MVP); - } - cpu++; - } - printk(" %d", tc); - tc++; - } - if (vpe != 0) { - /* - * Allow this VPE to control others. - */ - write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | - VPECONF0_MVP); - - /* - * Clear any stale software interrupts from VPE's Cause - */ - write_vpe_c0_cause(0); - - /* - * Clear ERL/EXL of VPEs other than 0 - * and set restricted interrupt enable/mask. - */ - write_vpe_c0_status((read_vpe_c0_status() - & ~(ST0_BEV | ST0_ERL | ST0_EXL | ST0_IM)) - | (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP7 - | ST0_IE)); - /* - * set config to be the same as vpe0, - * particularly kseg0 coherency alg - */ - write_vpe_c0_config(read_c0_config()); - /* Clear any pending timer interrupt */ - write_vpe_c0_compare(0); - /* Propagate Config7 */ - write_vpe_c0_config7(read_c0_config7()); - write_vpe_c0_count(read_c0_count() + CP0_SKEW); - ehb(); - } - /* enable multi-threading within VPE */ - write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE); - /* enable the VPE */ - write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA); - } - - /* - * Pull any physically present but unused TCs out of circulation. - */ - while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) { - set_cpu_possible(tc, false); - set_cpu_present(tc, false); - tc++; - } - - /* release config state */ - write_c0_mvpcontrol( read_c0_mvpcontrol() & ~ MVPCONTROL_VPC ); - - printk("\n"); - - /* Set up coprocessor affinity CPU mask(s) */ - -#ifdef CONFIG_MIPS_MT_FPAFF - for (tc = 0; tc < ntc; tc++) { - if (cpu_data[tc].options & MIPS_CPU_FPU) - cpu_set(tc, mt_fpu_cpumask); - } -#endif - - /* set up ipi interrupts... */ - - /* If we have multiple VPEs running, set up the cross-VPE interrupt */ - - setup_cross_vpe_interrupts(nvpe); - - /* Set up queue of free IPI "messages". */ - nipi = NR_CPUS * IPIBUF_PER_CPU; - if (ipibuffers > 0) - nipi = ipibuffers; - - pipi = kmalloc(nipi *sizeof(struct smtc_ipi), GFP_KERNEL); - if (pipi == NULL) - panic("kmalloc of IPI message buffers failed"); - else - printk("IPI buffer pool of %d buffers\n", nipi); - for (i = 0; i < nipi; i++) { - smtc_ipi_nq(&freeIPIq, pipi); - pipi++; - } - - /* Arm multithreading and enable other VPEs - but all TCs are Halted */ - emt(EMT_ENABLE); - evpe(EVPE_ENABLE); - local_irq_restore(flags); - /* Initialize SMTC /proc statistics/diagnostics */ - init_smtc_stats(); -} - - -/* - * Setup the PC, SP, and GP of a secondary processor and start it - * running! - * smp_bootstrap is the place to resume from - * __KSTK_TOS(idle) is apparently the stack pointer - * (unsigned long)idle->thread_info the gp - * - */ -void smtc_boot_secondary(int cpu, struct task_struct *idle) -{ - extern u32 kernelsp[NR_CPUS]; - unsigned long flags; - int mtflags; - - LOCK_MT_PRA(); - if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { - dvpe(); - } - settc(cpu_data[cpu].tc_id); - - /* pc */ - write_tc_c0_tcrestart((unsigned long)&smp_bootstrap); - - /* stack pointer */ - kernelsp[cpu] = __KSTK_TOS(idle); - write_tc_gpr_sp(__KSTK_TOS(idle)); - - /* global pointer */ - write_tc_gpr_gp((unsigned long)task_thread_info(idle)); - - smtc_status |= SMTC_MTC_ACTIVE; - write_tc_c0_tchalt(0); - if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { - evpe(EVPE_ENABLE); - } - UNLOCK_MT_PRA(); -} - -void smtc_init_secondary(void) -{ -} - -void smtc_smp_finish(void) -{ - int cpu = smp_processor_id(); - - /* - * Lowest-numbered CPU per VPE starts a clock tick. - * Like per_cpu_trap_init() hack, this assumes that - * SMTC init code assigns TCs consdecutively and - * in ascending order across available VPEs. - */ - if (cpu > 0 && (cpu_data[cpu].vpe_id != cpu_data[cpu - 1].vpe_id)) - write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ); - - local_irq_enable(); - - printk("TC %d going on-line as CPU %d\n", - cpu_data[smp_processor_id()].tc_id, smp_processor_id()); -} - -void smtc_cpus_done(void) -{ -} - -/* - * Support for SMTC-optimized driver IRQ registration - */ - -/* - * SMTC Kernel needs to manipulate low-level CPU interrupt mask - * in do_IRQ. These are passed in setup_irq_smtc() and stored - * in this table. - */ - -int setup_irq_smtc(unsigned int irq, struct irqaction * new, - unsigned long hwmask) -{ -#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG - unsigned int vpe = current_cpu_data.vpe_id; - - vpemask[vpe][irq - MIPS_CPU_IRQ_BASE] = 1; -#endif - irq_hwmask[irq] = hwmask; - - return setup_irq(irq, new); -} - -#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF -/* - * Support for IRQ affinity to TCs - */ - -void smtc_set_irq_affinity(unsigned int irq, cpumask_t affinity) -{ - /* - * If a "fast path" cache of quickly decodable affinity state - * is maintained, this is where it gets done, on a call up - * from the platform affinity code. - */ -} - -void smtc_forward_irq(struct irq_data *d) -{ - unsigned int irq = d->irq; - int target; - - /* - * OK wise guy, now figure out how to get the IRQ - * to be serviced on an authorized "CPU". - * - * Ideally, to handle the situation where an IRQ has multiple - * eligible CPUS, we would maintain state per IRQ that would - * allow a fair distribution of service requests. Since the - * expected use model is any-or-only-one, for simplicity - * and efficiency, we just pick the easiest one to find. - */ - - target = cpumask_first(d->affinity); - - /* - * We depend on the platform code to have correctly processed - * IRQ affinity change requests to ensure that the IRQ affinity - * mask has been purged of bits corresponding to nonexistent and - * offline "CPUs", and to TCs bound to VPEs other than the VPE - * connected to the physical interrupt input for the interrupt - * in question. Otherwise we have a nasty problem with interrupt - * mask management. This is best handled in non-performance-critical - * platform IRQ affinity setting code, to minimize interrupt-time - * checks. - */ - - /* If no one is eligible, service locally */ - if (target >= NR_CPUS) - do_IRQ_no_affinity(irq); - else - smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq); -} - -#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ - -/* - * IPI model for SMTC is tricky, because interrupts aren't TC-specific. - * Within a VPE one TC can interrupt another by different approaches. - * The easiest to get right would probably be to make all TCs except - * the target IXMT and set a software interrupt, but an IXMT-based - * scheme requires that a handler must run before a new IPI could - * be sent, which would break the "broadcast" loops in MIPS MT. - * A more gonzo approach within a VPE is to halt the TC, extract - * its Restart, Status, and a couple of GPRs, and program the Restart - * address to emulate an interrupt. - * - * Within a VPE, one can be confident that the target TC isn't in - * a critical EXL state when halted, since the write to the Halt - * register could not have issued on the writing thread if the - * halting thread had EXL set. So k0 and k1 of the target TC - * can be used by the injection code. Across VPEs, one can't - * be certain that the target TC isn't in a critical exception - * state. So we try a two-step process of sending a software - * interrupt to the target VPE, which either handles the event - * itself (if it was the target) or injects the event within - * the VPE. - */ - -static void smtc_ipi_qdump(void) -{ - int i; - struct smtc_ipi *temp; - - for (i = 0; i < NR_CPUS ;i++) { - pr_info("IPIQ[%d]: head = 0x%x, tail = 0x%x, depth = %d\n", - i, (unsigned)IPIQ[i].head, (unsigned)IPIQ[i].tail, - IPIQ[i].depth); - temp = IPIQ[i].head; - - while (temp != IPIQ[i].tail) { - pr_debug("%d %d %d: ", temp->type, temp->dest, - (int)temp->arg); -#ifdef SMTC_IPI_DEBUG - pr_debug("%u %lu\n", temp->sender, temp->stamp); -#else - pr_debug("\n"); -#endif - temp = temp->flink; - } - } -} - -/* - * The standard atomic.h primitives don't quite do what we want - * here: We need an atomic add-and-return-previous-value (which - * could be done with atomic_add_return and a decrement) and an - * atomic set/zero-and-return-previous-value (which can't really - * be done with the atomic.h primitives). And since this is - * MIPS MT, we can assume that we have LL/SC. - */ -static inline int atomic_postincrement(atomic_t *v) -{ - unsigned long result; - - unsigned long temp; - - __asm__ __volatile__( - "1: ll %0, %2 \n" - " addu %1, %0, 1 \n" - " sc %1, %2 \n" - " beqz %1, 1b \n" - __WEAK_LLSC_MB - : "=&r" (result), "=&r" (temp), "=m" (v->counter) - : "m" (v->counter) - : "memory"); - - return result; -} - -void smtc_send_ipi(int cpu, int type, unsigned int action) -{ - int tcstatus; - struct smtc_ipi *pipi; - unsigned long flags; - int mtflags; - unsigned long tcrestart; - int set_resched_flag = (type == LINUX_SMP_IPI && - action == SMP_RESCHEDULE_YOURSELF); - - if (cpu == smp_processor_id()) { - printk("Cannot Send IPI to self!\n"); - return; - } - if (set_resched_flag && IPIQ[cpu].resched_flag != 0) - return; /* There is a reschedule queued already */ - - /* Set up a descriptor, to be delivered either promptly or queued */ - pipi = smtc_ipi_dq(&freeIPIq); - if (pipi == NULL) { - bust_spinlocks(1); - mips_mt_regdump(dvpe()); - panic("IPI Msg. Buffers Depleted"); - } - pipi->type = type; - pipi->arg = (void *)action; - pipi->dest = cpu; - if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { - /* If not on same VPE, enqueue and send cross-VPE interrupt */ - IPIQ[cpu].resched_flag |= set_resched_flag; - smtc_ipi_nq(&IPIQ[cpu], pipi); - LOCK_CORE_PRA(); - settc(cpu_data[cpu].tc_id); - write_vpe_c0_cause(read_vpe_c0_cause() | C_SW1); - UNLOCK_CORE_PRA(); - } else { - /* - * Not sufficient to do a LOCK_MT_PRA (dmt) here, - * since ASID shootdown on the other VPE may - * collide with this operation. - */ - LOCK_CORE_PRA(); - settc(cpu_data[cpu].tc_id); - /* Halt the targeted TC */ - write_tc_c0_tchalt(TCHALT_H); - mips_ihb(); - - /* - * Inspect TCStatus - if IXMT is set, we have to queue - * a message. Otherwise, we set up the "interrupt" - * of the other TC - */ - tcstatus = read_tc_c0_tcstatus(); - - if ((tcstatus & TCSTATUS_IXMT) != 0) { - /* - * If we're in the the irq-off version of the wait - * loop, we need to force exit from the wait and - * do a direct post of the IPI. - */ - if (cpu_wait == r4k_wait_irqoff) { - tcrestart = read_tc_c0_tcrestart(); - if (address_is_in_r4k_wait_irqoff(tcrestart)) { - write_tc_c0_tcrestart(__pastwait); - tcstatus &= ~TCSTATUS_IXMT; - write_tc_c0_tcstatus(tcstatus); - goto postdirect; - } - } - /* - * Otherwise we queue the message for the target TC - * to pick up when he does a local_irq_restore() - */ - write_tc_c0_tchalt(0); - UNLOCK_CORE_PRA(); - IPIQ[cpu].resched_flag |= set_resched_flag; - smtc_ipi_nq(&IPIQ[cpu], pipi); - } else { -postdirect: - post_direct_ipi(cpu, pipi); - write_tc_c0_tchalt(0); - UNLOCK_CORE_PRA(); - } - } -} - -/* - * Send IPI message to Halted TC, TargTC/TargVPE already having been set - */ -static void post_direct_ipi(int cpu, struct smtc_ipi *pipi) -{ - struct pt_regs *kstack; - unsigned long tcstatus; - unsigned long tcrestart; - extern u32 kernelsp[NR_CPUS]; - extern void __smtc_ipi_vector(void); -//printk("%s: on %d for %d\n", __func__, smp_processor_id(), cpu); - - /* Extract Status, EPC from halted TC */ - tcstatus = read_tc_c0_tcstatus(); - tcrestart = read_tc_c0_tcrestart(); - /* If TCRestart indicates a WAIT instruction, advance the PC */ - if ((tcrestart & 0x80000000) - && ((*(unsigned int *)tcrestart & 0xfe00003f) == 0x42000020)) { - tcrestart += 4; - } - /* - * Save on TC's future kernel stack - * - * CU bit of Status is indicator that TC was - * already running on a kernel stack... - */ - if (tcstatus & ST0_CU0) { - /* Note that this "- 1" is pointer arithmetic */ - kstack = ((struct pt_regs *)read_tc_gpr_sp()) - 1; - } else { - kstack = ((struct pt_regs *)kernelsp[cpu]) - 1; - } - - kstack->cp0_epc = (long)tcrestart; - /* Save TCStatus */ - kstack->cp0_tcstatus = tcstatus; - /* Pass token of operation to be performed kernel stack pad area */ - kstack->pad0[4] = (unsigned long)pipi; - /* Pass address of function to be called likewise */ - kstack->pad0[5] = (unsigned long)&ipi_decode; - /* Set interrupt exempt and kernel mode */ - tcstatus |= TCSTATUS_IXMT; - tcstatus &= ~TCSTATUS_TKSU; - write_tc_c0_tcstatus(tcstatus); - ehb(); - /* Set TC Restart address to be SMTC IPI vector */ - write_tc_c0_tcrestart(__smtc_ipi_vector); -} - -static void ipi_resched_interrupt(void) -{ - scheduler_ipi(); -} - -static void ipi_call_interrupt(void) -{ - /* Invoke generic function invocation code in smp.c */ - smp_call_function_interrupt(); -} - -DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device); - -static void __irq_entry smtc_clock_tick_interrupt(void) -{ - unsigned int cpu = smp_processor_id(); - struct clock_event_device *cd; - int irq = MIPS_CPU_IRQ_BASE + 1; - - irq_enter(); - kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq)); - cd = &per_cpu(mips_clockevent_device, cpu); - cd->event_handler(cd); - irq_exit(); -} - -void ipi_decode(struct smtc_ipi *pipi) -{ - void *arg_copy = pipi->arg; - int type_copy = pipi->type; - - smtc_ipi_nq(&freeIPIq, pipi); - - switch (type_copy) { - case SMTC_CLOCK_TICK: - smtc_clock_tick_interrupt(); - break; - - case LINUX_SMP_IPI: - switch ((int)arg_copy) { - case SMP_RESCHEDULE_YOURSELF: - ipi_resched_interrupt(); - break; - case SMP_CALL_FUNCTION: - ipi_call_interrupt(); - break; - default: - printk("Impossible SMTC IPI Argument %p\n", arg_copy); - break; - } - break; -#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF - case IRQ_AFFINITY_IPI: - /* - * Accept a "forwarded" interrupt that was initially - * taken by a TC who doesn't have affinity for the IRQ. - */ - do_IRQ_no_affinity((int)arg_copy); - break; -#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ - default: - printk("Impossible SMTC IPI Type 0x%x\n", type_copy); - break; - } -} - -/* - * Similar to smtc_ipi_replay(), but invoked from context restore, - * so it reuses the current exception frame rather than set up a - * new one with self_ipi. - */ - -void deferred_smtc_ipi(void) -{ - int cpu = smp_processor_id(); - - /* - * Test is not atomic, but much faster than a dequeue, - * and the vast majority of invocations will have a null queue. - * If irq_disabled when this was called, then any IPIs queued - * after we test last will be taken on the next irq_enable/restore. - * If interrupts were enabled, then any IPIs added after the - * last test will be taken directly. - */ - - while (IPIQ[cpu].head != NULL) { - struct smtc_ipi_q *q = &IPIQ[cpu]; - struct smtc_ipi *pipi; - unsigned long flags; - - /* - * It may be possible we'll come in with interrupts - * already enabled. - */ - local_irq_save(flags); - spin_lock(&q->lock); - pipi = __smtc_ipi_dq(q); - spin_unlock(&q->lock); - if (pipi != NULL) { - if (pipi->type == LINUX_SMP_IPI && - (int)pipi->arg == SMP_RESCHEDULE_YOURSELF) - IPIQ[cpu].resched_flag = 0; - ipi_decode(pipi); - } - /* - * The use of the __raw_local restore isn't - * as obviously necessary here as in smtc_ipi_replay(), - * but it's more efficient, given that we're already - * running down the IPI queue. - */ - __arch_local_irq_restore(flags); - } -} - -/* - * Cross-VPE interrupts in the SMTC prototype use "software interrupts" - * set via cross-VPE MTTR manipulation of the Cause register. It would be - * in some regards preferable to have external logic for "doorbell" hardware - * interrupts. - */ - -static int cpu_ipi_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_IRQ; - -static irqreturn_t ipi_interrupt(int irq, void *dev_idm) -{ - int my_vpe = cpu_data[smp_processor_id()].vpe_id; - int my_tc = cpu_data[smp_processor_id()].tc_id; - int cpu; - struct smtc_ipi *pipi; - unsigned long tcstatus; - int sent; - unsigned long flags; - unsigned int mtflags; - unsigned int vpflags; - - /* - * So long as cross-VPE interrupts are done via - * MFTR/MTTR read-modify-writes of Cause, we need - * to stop other VPEs whenever the local VPE does - * anything similar. - */ - local_irq_save(flags); - vpflags = dvpe(); - clear_c0_cause(0x100 << MIPS_CPU_IPI_IRQ); - set_c0_status(0x100 << MIPS_CPU_IPI_IRQ); - irq_enable_hazard(); - evpe(vpflags); - local_irq_restore(flags); - - /* - * Cross-VPE Interrupt handler: Try to directly deliver IPIs - * queued for TCs on this VPE other than the current one. - * Return-from-interrupt should cause us to drain the queue - * for the current TC, so we ought not to have to do it explicitly here. - */ - - for_each_online_cpu(cpu) { - if (cpu_data[cpu].vpe_id != my_vpe) - continue; - - pipi = smtc_ipi_dq(&IPIQ[cpu]); - if (pipi != NULL) { - if (cpu_data[cpu].tc_id != my_tc) { - sent = 0; - LOCK_MT_PRA(); - settc(cpu_data[cpu].tc_id); - write_tc_c0_tchalt(TCHALT_H); - mips_ihb(); - tcstatus = read_tc_c0_tcstatus(); - if ((tcstatus & TCSTATUS_IXMT) == 0) { - post_direct_ipi(cpu, pipi); - sent = 1; - } - write_tc_c0_tchalt(0); - UNLOCK_MT_PRA(); - if (!sent) { - smtc_ipi_req(&IPIQ[cpu], pipi); - } - } else { - /* - * ipi_decode() should be called - * with interrupts off - */ - local_irq_save(flags); - if (pipi->type == LINUX_SMP_IPI && - (int)pipi->arg == SMP_RESCHEDULE_YOURSELF) - IPIQ[cpu].resched_flag = 0; - ipi_decode(pipi); - local_irq_restore(flags); - } - } - } - - return IRQ_HANDLED; -} - -static void ipi_irq_dispatch(void) -{ - do_IRQ(cpu_ipi_irq); -} - -static struct irqaction irq_ipi = { - .handler = ipi_interrupt, - .flags = IRQF_PERCPU, - .name = "SMTC_IPI" -}; - -static void setup_cross_vpe_interrupts(unsigned int nvpe) -{ - if (nvpe < 1) - return; - - if (!cpu_has_vint) - panic("SMTC Kernel requires Vectored Interrupt support"); - - set_vi_handler(MIPS_CPU_IPI_IRQ, ipi_irq_dispatch); - - setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ)); - - irq_set_handler(cpu_ipi_irq, handle_percpu_irq); -} - -/* - * SMTC-specific hacks invoked from elsewhere in the kernel. - */ - - /* - * smtc_ipi_replay is called from raw_local_irq_restore - */ - -void smtc_ipi_replay(void) -{ - unsigned int cpu = smp_processor_id(); - - /* - * To the extent that we've ever turned interrupts off, - * we may have accumulated deferred IPIs. This is subtle. - * we should be OK: If we pick up something and dispatch - * it here, that's great. If we see nothing, but concurrent - * with this operation, another TC sends us an IPI, IXMT - * is clear, and we'll handle it as a real pseudo-interrupt - * and not a pseudo-pseudo interrupt. The important thing - * is to do the last check for queued message *after* the - * re-enabling of interrupts. - */ - while (IPIQ[cpu].head != NULL) { - struct smtc_ipi_q *q = &IPIQ[cpu]; - struct smtc_ipi *pipi; - unsigned long flags; - - /* - * It's just possible we'll come in with interrupts - * already enabled. - */ - local_irq_save(flags); - - spin_lock(&q->lock); - pipi = __smtc_ipi_dq(q); - spin_unlock(&q->lock); - /* - ** But use a raw restore here to avoid recursion. - */ - __arch_local_irq_restore(flags); - - if (pipi) { - self_ipi(pipi); - smtc_cpu_stats[cpu].selfipis++; - } - } -} - -EXPORT_SYMBOL(smtc_ipi_replay); - -void smtc_idle_loop_hook(void) -{ -#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG - int im; - int flags; - int mtflags; - int bit; - int vpe; - int tc; - int hook_ntcs; - /* - * printk within DMT-protected regions can deadlock, - * so buffer diagnostic messages for later output. - */ - char *pdb_msg; - char id_ho_db_msg[768]; /* worst-case use should be less than 700 */ - - if (atomic_read(&idle_hook_initialized) == 0) { /* fast test */ - if (atomic_add_return(1, &idle_hook_initialized) == 1) { - int mvpconf0; - /* Tedious stuff to just do once */ - mvpconf0 = read_c0_mvpconf0(); - hook_ntcs = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; - if (hook_ntcs > NR_CPUS) - hook_ntcs = NR_CPUS; - for (tc = 0; tc < hook_ntcs; tc++) { - tcnoprog[tc] = 0; - clock_hang_reported[tc] = 0; - } - for (vpe = 0; vpe < 2; vpe++) - for (im = 0; im < 8; im++) - imstuckcount[vpe][im] = 0; - printk("Idle loop test hook initialized for %d TCs\n", hook_ntcs); - atomic_set(&idle_hook_initialized, 1000); - } else { - /* Someone else is initializing in parallel - let 'em finish */ - while (atomic_read(&idle_hook_initialized) < 1000) - ; - } - } - - /* Have we stupidly left IXMT set somewhere? */ - if (read_c0_tcstatus() & 0x400) { - write_c0_tcstatus(read_c0_tcstatus() & ~0x400); - ehb(); - printk("Dangling IXMT in cpu_idle()\n"); - } - - /* Have we stupidly left an IM bit turned off? */ -#define IM_LIMIT 2000 - local_irq_save(flags); - mtflags = dmt(); - pdb_msg = &id_ho_db_msg[0]; - im = read_c0_status(); - vpe = current_cpu_data.vpe_id; - for (bit = 0; bit < 8; bit++) { - /* - * In current prototype, I/O interrupts - * are masked for VPE > 0 - */ - if (vpemask[vpe][bit]) { - if (!(im & (0x100 << bit))) - imstuckcount[vpe][bit]++; - else - imstuckcount[vpe][bit] = 0; - if (imstuckcount[vpe][bit] > IM_LIMIT) { - set_c0_status(0x100 << bit); - ehb(); - imstuckcount[vpe][bit] = 0; - pdb_msg += sprintf(pdb_msg, - "Dangling IM %d fixed for VPE %d\n", bit, - vpe); - } - } - } - - emt(mtflags); - local_irq_restore(flags); - if (pdb_msg != &id_ho_db_msg[0]) - printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg); -#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ - - smtc_ipi_replay(); -} - -void smtc_soft_dump(void) -{ - int i; - - printk("Counter Interrupts taken per CPU (TC)\n"); - for (i=0; i < NR_CPUS; i++) { - printk("%d: %ld\n", i, smtc_cpu_stats[i].timerints); - } - printk("Self-IPI invocations:\n"); - for (i=0; i < NR_CPUS; i++) { - printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis); - } - smtc_ipi_qdump(); - printk("%d Recoveries of \"stolen\" FPU\n", - atomic_read(&smtc_fpu_recoveries)); -} - - -/* - * TLB management routines special to SMTC - */ - -void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) -{ - unsigned long flags, mtflags, tcstat, prevhalt, asid; - int tlb, i; - - /* - * It would be nice to be able to use a spinlock here, - * but this is invoked from within TLB flush routines - * that protect themselves with DVPE, so if a lock is - * held by another TC, it'll never be freed. - * - * DVPE/DMT must not be done with interrupts enabled, - * so even so most callers will already have disabled - * them, let's be really careful... - */ - - local_irq_save(flags); - if (smtc_status & SMTC_TLB_SHARED) { - mtflags = dvpe(); - tlb = 0; - } else { - mtflags = dmt(); - tlb = cpu_data[cpu].vpe_id; - } - asid = asid_cache(cpu); - - do { - if (!((asid += ASID_INC) & ASID_MASK) ) { - if (cpu_has_vtag_icache) - flush_icache_all(); - /* Traverse all online CPUs (hack requires contiguous range) */ - for_each_online_cpu(i) { - /* - * We don't need to worry about our own CPU, nor those of - * CPUs who don't share our TLB. - */ - if ((i != smp_processor_id()) && - ((smtc_status & SMTC_TLB_SHARED) || - (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))) { - settc(cpu_data[i].tc_id); - prevhalt = read_tc_c0_tchalt() & TCHALT_H; - if (!prevhalt) { - write_tc_c0_tchalt(TCHALT_H); - mips_ihb(); - } - tcstat = read_tc_c0_tcstatus(); - smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i); - if (!prevhalt) - write_tc_c0_tchalt(0); - } - } - if (!asid) /* fix version if needed */ - asid = ASID_FIRST_VERSION; - local_flush_tlb_all(); /* start new asid cycle */ - } - } while (smtc_live_asid[tlb][(asid & ASID_MASK)]); - - /* - * SMTC shares the TLB within VPEs and possibly across all VPEs. - */ - for_each_online_cpu(i) { - if ((smtc_status & SMTC_TLB_SHARED) || - (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id)) - cpu_context(i, mm) = asid_cache(i) = asid; - } - - if (smtc_status & SMTC_TLB_SHARED) - evpe(mtflags); - else - emt(mtflags); - local_irq_restore(flags); -} - -/* - * Invoked from macros defined in mmu_context.h - * which must already have disabled interrupts - * and done a DVPE or DMT as appropriate. - */ - -void smtc_flush_tlb_asid(unsigned long asid) -{ - int entry; - unsigned long ehi; - - entry = read_c0_wired(); - - /* Traverse all non-wired entries */ - while (entry < current_cpu_data.tlbsize) { - write_c0_index(entry); - ehb(); - tlb_read(); - ehb(); - ehi = read_c0_entryhi(); - if ((ehi & ASID_MASK) == asid) { - /* - * Invalidate only entries with specified ASID, - * makiing sure all entries differ. - */ - write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1))); - write_c0_entrylo0(0); - write_c0_entrylo1(0); - mtc0_tlbw_hazard(); - tlb_write_indexed(); - } - entry++; - } - write_c0_index(PARKED_INDEX); - tlbw_use_hazard(); -} - -/* - * Support for single-threading cache flush operations. - */ - -static int halt_state_save[NR_CPUS]; - -/* - * To really, really be sure that nothing is being done - * by other TCs, halt them all. This code assumes that - * a DVPE has already been done, so while their Halted - * state is theoretically architecturally unstable, in - * practice, it's not going to change while we're looking - * at it. - */ - -void smtc_cflush_lockdown(void) -{ - int cpu; - - for_each_online_cpu(cpu) { - if (cpu != smp_processor_id()) { - settc(cpu_data[cpu].tc_id); - halt_state_save[cpu] = read_tc_c0_tchalt(); - write_tc_c0_tchalt(TCHALT_H); - } - } - mips_ihb(); -} - -/* It would be cheating to change the cpu_online states during a flush! */ - -void smtc_cflush_release(void) -{ - int cpu; - - /* - * Start with a hazard barrier to ensure - * that all CACHE ops have played through. - */ - mips_ihb(); - - for_each_online_cpu(cpu) { - if (cpu != smp_processor_id()) { - settc(cpu_data[cpu].tc_id); - write_tc_c0_tchalt(halt_state_save[cpu]); - } - } - mips_ihb(); -} diff --git a/arch/mips/kernel/spram.c b/arch/mips/kernel/spram.c index 93f86817f20a..67f2495def1c 100644 --- a/arch/mips/kernel/spram.c +++ b/arch/mips/kernel/spram.c @@ -8,7 +8,6 @@ * * Copyright (C) 2007, 2008 MIPS Technologies, Inc. */ -#include <linux/init.h> #include <linux/kernel.h> #include <linux/ptrace.h> #include <linux/stddef.h> @@ -198,14 +197,17 @@ static void probe_spram(char *type, } void spram_config(void) { - struct cpuinfo_mips *c = ¤t_cpu_data; unsigned int config0; - switch (c->cputype) { + switch (current_cpu_type()) { case CPU_24K: case CPU_34K: case CPU_74K: case CPU_1004K: + case CPU_1074K: + case CPU_INTERAPTIV: + case CPU_PROAPTIV: + case CPU_P5600: config0 = read_c0_config(); /* FIXME: addresses are Malta specific */ if (config0 & (1<<24)) { diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c index 84536bf4a154..2242bdd4370e 100644 --- a/arch/mips/kernel/sync-r4k.c +++ b/arch/mips/kernel/sync-r4k.c @@ -6,12 +6,9 @@ * not have done anything significant (but they may have had interrupts * enabled briefly - prom_smp_finish() should not be responsible for enabling * interrupts...) - * - * FIXME: broken for SMTC */ #include <linux/kernel.h> -#include <linux/init.h> #include <linux/irqflags.h> #include <linux/cpumask.h> @@ -34,14 +31,6 @@ void synchronise_count_master(int cpu) unsigned long flags; unsigned int initcount; -#ifdef CONFIG_MIPS_MT_SMTC - /* - * SMTC needs to synchronise per VPE, not per CPU - * ignore for now - */ - return; -#endif - printk(KERN_INFO "Synchronize counters for CPU %u: ", cpu); local_irq_save(flags); @@ -111,14 +100,6 @@ void synchronise_count_slave(int cpu) int i; unsigned int initcount; -#ifdef CONFIG_MIPS_MT_SMTC - /* - * SMTC needs to synchronise per VPE, not per CPU - * ignore for now - */ - return; -#endif - /* * Not every cpu is online at the time this gets called, * so we first wait for the master to say everyone is ready diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index b79d13f95bf0..4a4f9dda5658 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c @@ -110,7 +110,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new) if (cpu_has_llsc && R10000_LLSC_WAR) { __asm__ __volatile__ ( - " .set mips3 \n" + " .set arch=r4000 \n" " li %[err], 0 \n" "1: ll %[old], (%[addr]) \n" " move %[tmp], %[new] \n" @@ -135,7 +135,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new) : "memory"); } else if (cpu_has_llsc) { __asm__ __volatile__ ( - " .set mips3 \n" + " .set arch=r4000 \n" " li %[err], 0 \n" "1: ll %[old], (%[addr]) \n" " move %[tmp], %[new] \n" diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c index dcb8e5d3bb8a..8d0170969e22 100644 --- a/arch/mips/kernel/time.c +++ b/arch/mips/kernel/time.c @@ -26,7 +26,6 @@ #include <asm/cpu-features.h> #include <asm/cpu-type.h> #include <asm/div64.h> -#include <asm/smtc_ipi.h> #include <asm/time.h> /* diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index f9c8746be8d6..51706d6dd5b0 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -10,10 +10,12 @@ * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc. All rights reserved. + * Copyright (C) 2014, Imagination Technologies Ltd. */ #include <linux/bug.h> #include <linux/compiler.h> #include <linux/context_tracking.h> +#include <linux/cpu_pm.h> #include <linux/kexec.h> #include <linux/init.h> #include <linux/kernel.h> @@ -47,6 +49,7 @@ #include <asm/mipsregs.h> #include <asm/mipsmtregs.h> #include <asm/module.h> +#include <asm/msa.h> #include <asm/pgtable.h> #include <asm/ptrace.h> #include <asm/sections.h> @@ -77,7 +80,10 @@ extern asmlinkage void handle_ri_rdhwr(void); extern asmlinkage void handle_cpu(void); extern asmlinkage void handle_ov(void); extern asmlinkage void handle_tr(void); +extern asmlinkage void handle_msa_fpe(void); extern asmlinkage void handle_fpe(void); +extern asmlinkage void handle_ftlb(void); +extern asmlinkage void handle_msa(void); extern asmlinkage void handle_mdmx(void); extern asmlinkage void handle_watch(void); extern asmlinkage void handle_mt(void); @@ -365,9 +371,6 @@ void __noreturn die(const char *str, struct pt_regs *regs) { static int die_counter; int sig = SIGSEGV; -#ifdef CONFIG_MIPS_MT_SMTC - unsigned long dvpret; -#endif /* CONFIG_MIPS_MT_SMTC */ oops_enter(); @@ -377,13 +380,7 @@ void __noreturn die(const char *str, struct pt_regs *regs) console_verbose(); raw_spin_lock_irq(&die_lock); -#ifdef CONFIG_MIPS_MT_SMTC - dvpret = dvpe(); -#endif /* CONFIG_MIPS_MT_SMTC */ bust_spinlocks(1); -#ifdef CONFIG_MIPS_MT_SMTC - mips_mt_regdump(dvpret); -#endif /* CONFIG_MIPS_MT_SMTC */ printk("%s[#%d]:\n", str, ++die_counter); show_registers(regs); @@ -707,10 +704,12 @@ int process_fpemu_return(int sig, void __user *fault_addr) si.si_addr = fault_addr; si.si_signo = sig; if (sig == SIGSEGV) { + down_read(¤t->mm->mmap_sem); if (find_vma(current->mm, (unsigned long)fault_addr)) si.si_code = SEGV_ACCERR; else si.si_code = SEGV_MAPERR; + up_read(¤t->mm->mmap_sem); } else { si.si_code = BUS_ADRERR; } @@ -860,6 +859,11 @@ asmlinkage void do_bp(struct pt_regs *regs) enum ctx_state prev_state; unsigned long epc; u16 instr[2]; + mm_segment_t seg; + + seg = get_fs(); + if (!user_mode(regs)) + set_fs(KERNEL_DS); prev_state = exception_enter(); if (get_isa16_mode(regs->cp0_epc)) { @@ -869,17 +873,19 @@ asmlinkage void do_bp(struct pt_regs *regs) if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)) || (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2))))) goto out_sigsegv; - opcode = (instr[0] << 16) | instr[1]; + opcode = (instr[0] << 16) | instr[1]; } else { - /* MIPS16e mode */ - if (__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc))) + /* MIPS16e mode */ + if (__get_user(instr[0], + (u16 __user *)msk_isa16_mode(epc))) goto out_sigsegv; - bcode = (instr[0] >> 6) & 0x3f; - do_trap_or_bp(regs, bcode, "Break"); - goto out; + bcode = (instr[0] >> 6) & 0x3f; + do_trap_or_bp(regs, bcode, "Break"); + goto out; } } else { - if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) + if (__get_user(opcode, + (unsigned int __user *) exception_epc(regs))) goto out_sigsegv; } @@ -917,6 +923,7 @@ asmlinkage void do_bp(struct pt_regs *regs) do_trap_or_bp(regs, bcode, "Break"); out: + set_fs(seg); exception_exit(prev_state); return; @@ -930,8 +937,13 @@ asmlinkage void do_tr(struct pt_regs *regs) u32 opcode, tcode = 0; enum ctx_state prev_state; u16 instr[2]; + mm_segment_t seg; unsigned long epc = msk_isa16_mode(exception_epc(regs)); + seg = get_fs(); + if (!user_mode(regs)) + set_fs(get_ds()); + prev_state = exception_enter(); if (get_isa16_mode(regs->cp0_epc)) { if (__get_user(instr[0], (u16 __user *)(epc + 0)) || @@ -952,6 +964,7 @@ asmlinkage void do_tr(struct pt_regs *regs) do_trap_or_bp(regs, tcode, "Trap"); out: + set_fs(seg); exception_exit(prev_state); return; @@ -1073,6 +1086,76 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action, return NOTIFY_OK; } +static int enable_restore_fp_context(int msa) +{ + int err, was_fpu_owner; + + if (!used_math()) { + /* First time FP context user. */ + err = init_fpu(); + if (msa && !err) + enable_msa(); + if (!err) + set_used_math(); + return err; + } + + /* + * This task has formerly used the FP context. + * + * If this thread has no live MSA vector context then we can simply + * restore the scalar FP context. If it has live MSA vector context + * (that is, it has or may have used MSA since last performing a + * function call) then we'll need to restore the vector context. This + * applies even if we're currently only executing a scalar FP + * instruction. This is because if we were to later execute an MSA + * instruction then we'd either have to: + * + * - Restore the vector context & clobber any registers modified by + * scalar FP instructions between now & then. + * + * or + * + * - Not restore the vector context & lose the most significant bits + * of all vector registers. + * + * Neither of those options is acceptable. We cannot restore the least + * significant bits of the registers now & only restore the most + * significant bits later because the most significant bits of any + * vector registers whose aliased FP register is modified now will have + * been zeroed. We'd have no way to know that when restoring the vector + * context & thus may load an outdated value for the most significant + * bits of a vector register. + */ + if (!msa && !thread_msa_context_live()) + return own_fpu(1); + + /* + * This task is using or has previously used MSA. Thus we require + * that Status.FR == 1. + */ + was_fpu_owner = is_fpu_owner(); + err = own_fpu(0); + if (err) + return err; + + enable_msa(); + write_msa_csr(current->thread.fpu.msacsr); + set_thread_flag(TIF_USEDMSA); + + /* + * If this is the first time that the task is using MSA and it has + * previously used scalar FP in this time slice then we already nave + * FP context which we shouldn't clobber. + */ + if (!test_and_set_thread_flag(TIF_MSA_CTX_LIVE) && was_fpu_owner) + return 0; + + /* We need to restore the vector context. */ + restore_msa(current); + return 0; +} + asmlinkage void do_cpu(struct pt_regs *regs) { enum ctx_state prev_state; @@ -1080,7 +1163,7 @@ asmlinkage void do_cpu(struct pt_regs *regs) unsigned long old_epc, old31; unsigned int opcode; unsigned int cpid; - int status; + int status, err; unsigned long __maybe_unused flags; prev_state = exception_enter(); @@ -1152,20 +1235,15 @@ asmlinkage void do_cpu(struct pt_regs *regs) /* Fall through. */ case 1: - if (used_math()) /* Using the FPU again. */ - own_fpu(1); - else { /* First time FPU user. */ - init_fpu(); - set_used_math(); - } + err = enable_restore_fp_context(0); - if (!raw_cpu_has_fpu) { + if (!raw_cpu_has_fpu || err) { int sig; void __user *fault_addr = NULL; sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0, &fault_addr); - if (!process_fpemu_return(sig, fault_addr)) + if (!process_fpemu_return(sig, fault_addr) && !err) mt_ase_fp_affinity(); } @@ -1182,6 +1260,37 @@ out: exception_exit(prev_state); } +asmlinkage void do_msa_fpe(struct pt_regs *regs) +{ + enum ctx_state prev_state; + + prev_state = exception_enter(); + die_if_kernel("do_msa_fpe invoked from kernel context!", regs); + force_sig(SIGFPE, current); + exception_exit(prev_state); +} + +asmlinkage void do_msa(struct pt_regs *regs) +{ + enum ctx_state prev_state; + int err; + + prev_state = exception_enter(); + + if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) { + force_sig(SIGILL, current); + goto out; + } + + die_if_kernel("do_msa invoked from kernel context!", regs); + + err = enable_restore_fp_context(1); + if (err) + force_sig(SIGILL, current); +out: + exception_exit(prev_state); +} + asmlinkage void do_mdmx(struct pt_regs *regs) { enum ctx_state prev_state; @@ -1336,6 +1445,10 @@ static inline void parity_protection_init(void) case CPU_34K: case CPU_74K: case CPU_1004K: + case CPU_1074K: + case CPU_INTERAPTIV: + case CPU_PROAPTIV: + case CPU_P5600: { #define ERRCTL_PE 0x80000000 #define ERRCTL_L2P 0x00800000 @@ -1425,14 +1538,27 @@ asmlinkage void cache_parity_error(void) printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n", reg_val & (1<<30) ? "secondary" : "primary", reg_val & (1<<31) ? "data" : "insn"); - printk("Error bits: %s%s%s%s%s%s%s\n", - reg_val & (1<<29) ? "ED " : "", - reg_val & (1<<28) ? "ET " : "", - reg_val & (1<<26) ? "EE " : "", - reg_val & (1<<25) ? "EB " : "", - reg_val & (1<<24) ? "EI " : "", - reg_val & (1<<23) ? "E1 " : "", - reg_val & (1<<22) ? "E0 " : ""); + if (cpu_has_mips_r2 && + ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) { + pr_err("Error bits: %s%s%s%s%s%s%s%s\n", + reg_val & (1<<29) ? "ED " : "", + reg_val & (1<<28) ? "ET " : "", + reg_val & (1<<27) ? "ES " : "", + reg_val & (1<<26) ? "EE " : "", + reg_val & (1<<25) ? "EB " : "", + reg_val & (1<<24) ? "EI " : "", + reg_val & (1<<23) ? "E1 " : "", + reg_val & (1<<22) ? "E0 " : ""); + } else { + pr_err("Error bits: %s%s%s%s%s%s%s\n", + reg_val & (1<<29) ? "ED " : "", + reg_val & (1<<28) ? "ET " : "", + reg_val & (1<<26) ? "EE " : "", + reg_val & (1<<25) ? "EB " : "", + reg_val & (1<<24) ? "EI " : "", + reg_val & (1<<23) ? "E1 " : "", + reg_val & (1<<22) ? "E0 " : ""); + } printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1)); #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) @@ -1446,6 +1572,34 @@ asmlinkage void cache_parity_error(void) panic("Can't handle the cache error!"); } +asmlinkage void do_ftlb(void) +{ + const int field = 2 * sizeof(unsigned long); + unsigned int reg_val; + + /* For the moment, report the problem and hang. */ + if (cpu_has_mips_r2 && + ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) { + pr_err("FTLB error exception, cp0_ecc=0x%08x:\n", + read_c0_ecc()); + pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc()); + reg_val = read_c0_cacheerr(); + pr_err("c0_cacheerr == %08x\n", reg_val); + + if ((reg_val & 0xc0000000) == 0xc0000000) { + pr_err("Decoded c0_cacheerr: FTLB parity error\n"); + } else { + pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n", + reg_val & (1<<30) ? "secondary" : "primary", + reg_val & (1<<31) ? "data" : "insn"); + } + } else { + pr_err("FTLB error exception\n"); + } + /* Just print the cacheerr bits for now */ + cache_parity_error(); +} + /* * SDBBP EJTAG debug exception handler. * We skip the instruction and return to the next instruction. @@ -1599,19 +1753,6 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) extern char rollback_except_vec_vi; char *vec_start = using_rollback_handler() ? &rollback_except_vec_vi : &except_vec_vi; -#ifdef CONFIG_MIPS_MT_SMTC - /* - * We need to provide the SMTC vectored interrupt handler - * not only with the address of the handler, but with the - * Status.IM bit to be masked before going there. - */ - extern char except_vec_vi_mori; -#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN) - const int mori_offset = &except_vec_vi_mori - vec_start + 2; -#else - const int mori_offset = &except_vec_vi_mori - vec_start; -#endif -#endif /* CONFIG_MIPS_MT_SMTC */ #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN) const int lui_offset = &except_vec_vi_lui - vec_start + 2; const int ori_offset = &except_vec_vi_ori - vec_start + 2; @@ -1635,12 +1776,6 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) #else handler_len); #endif -#ifdef CONFIG_MIPS_MT_SMTC - BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */ - - h = (u16 *)(b + mori_offset); - *h = (0x100 << n); -#endif /* CONFIG_MIPS_MT_SMTC */ h = (u16 *)(b + lui_offset); *h = (handler >> 16) & 0xffff; h = (u16 *)(b + ori_offset); @@ -1705,32 +1840,16 @@ static int __init ulri_disable(char *s) } __setup("noulri", ulri_disable); -void per_cpu_trap_init(bool is_boot_cpu) +/* configure STATUS register */ +static void configure_status(void) { - unsigned int cpu = smp_processor_id(); - unsigned int status_set = ST0_CU0; - unsigned int hwrena = cpu_hwrena_impl_bits; -#ifdef CONFIG_MIPS_MT_SMTC - int secondaryTC = 0; - int bootTC = (cpu == 0); - - /* - * Only do per_cpu_trap_init() for first TC of Each VPE. - * Note that this hack assumes that the SMTC init code - * assigns TCs consecutively and in ascending order. - */ - - if (((read_c0_tcbind() & TCBIND_CURTC) != 0) && - ((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id)) - secondaryTC = 1; -#endif /* CONFIG_MIPS_MT_SMTC */ - /* * Disable coprocessors and select 32-bit or 64-bit addressing * and the 16/32 or 32/32 FPR register model. Reset the BEV * flag that some firmware may have left set and the TS bit (for * IP27). Set XX for ISA IV code to work. */ + unsigned int status_set = ST0_CU0; #ifdef CONFIG_64BIT status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX; #endif @@ -1741,6 +1860,12 @@ void per_cpu_trap_init(bool is_boot_cpu) change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX, status_set); +} + +/* configure HWRENA register */ +static void configure_hwrena(void) +{ + unsigned int hwrena = cpu_hwrena_impl_bits; if (cpu_has_mips_r2) hwrena |= 0x0000000f; @@ -1750,11 +1875,10 @@ void per_cpu_trap_init(bool is_boot_cpu) if (hwrena) write_c0_hwrena(hwrena); +} -#ifdef CONFIG_MIPS_MT_SMTC - if (!secondaryTC) { -#endif /* CONFIG_MIPS_MT_SMTC */ - +static void configure_exception_vector(void) +{ if (cpu_has_veic || cpu_has_vint) { unsigned long sr = set_c0_status(ST0_BEV); write_c0_ebase(ebase); @@ -1770,6 +1894,16 @@ void per_cpu_trap_init(bool is_boot_cpu) } else set_c0_cause(CAUSEF_IV); } +} + +void per_cpu_trap_init(bool is_boot_cpu) +{ + unsigned int cpu = smp_processor_id(); + + configure_status(); + configure_hwrena(); + + configure_exception_vector(); /* * Before R2 both interrupt numbers were fixed to 7, so on R2 only: @@ -1789,10 +1923,6 @@ void per_cpu_trap_init(bool is_boot_cpu) cp0_perfcount_irq = -1; } -#ifdef CONFIG_MIPS_MT_SMTC - } -#endif /* CONFIG_MIPS_MT_SMTC */ - if (!cpu_data[cpu].asid_cache) cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; @@ -1801,23 +1931,10 @@ void per_cpu_trap_init(bool is_boot_cpu) BUG_ON(current->mm); enter_lazy_tlb(&init_mm, current); -#ifdef CONFIG_MIPS_MT_SMTC - if (bootTC) { -#endif /* CONFIG_MIPS_MT_SMTC */ /* Boot CPU's cache setup in setup_arch(). */ if (!is_boot_cpu) cpu_cache_init(); tlb_init(); -#ifdef CONFIG_MIPS_MT_SMTC - } else if (!secondaryTC) { - /* - * First TC in non-boot VPE must do subset of tlb_init() - * for MMU countrol registers. - */ - write_c0_pagemask(PM_DEFAULT_MASK); - write_c0_wired(0); - } -#endif /* CONFIG_MIPS_MT_SMTC */ TLBMISS_HANDLER_SETUP(); } @@ -1973,6 +2090,7 @@ void __init trap_init(void) set_except_vector(11, handle_cpu); set_except_vector(12, handle_ov); set_except_vector(13, handle_tr); + set_except_vector(14, handle_msa_fpe); if (current_cpu_type() == CPU_R6000 || current_cpu_type() == CPU_R6000A) { @@ -1995,6 +2113,8 @@ void __init trap_init(void) if (cpu_has_fpu && !cpu_has_nofpuex) set_except_vector(15, handle_fpe); + set_except_vector(16, handle_ftlb); + set_except_vector(21, handle_msa); set_except_vector(22, handle_mdmx); if (cpu_has_mcheck) @@ -2022,3 +2142,32 @@ void __init trap_init(void) cu2_notifier(default_cu2_call, 0x80000000); /* Run last */ } + +static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd, + void *v) +{ + switch (cmd) { + case CPU_PM_ENTER_FAILED: + case CPU_PM_EXIT: + configure_status(); + configure_hwrena(); + configure_exception_vector(); + + /* Restore register with CPU number for TLB handlers */ + TLBMISS_HANDLER_RESTORE(); + + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block trap_pm_notifier_block = { + .notifier_call = trap_pm_notifier, +}; + +static int __init trap_pm_init(void) +{ + return cpu_pm_register_notifier(&trap_pm_notifier_block); +} +arch_initcall(trap_pm_init); diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index c369a5d35527..2b3517214d6d 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -7,6 +7,7 @@ * * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle * Copyright (C) 1999 Silicon Graphics, Inc. + * Copyright (C) 2014 Imagination Technologies Ltd. * * This file contains exception handler for address error exception with the * special capability to execute faulting instructions in software. The @@ -110,8 +111,8 @@ extern void show_registers(struct pt_regs *regs); #ifdef __BIG_ENDIAN #define LoadHW(addr, value, res) \ __asm__ __volatile__ (".set\tnoat\n" \ - "1:\tlb\t%0, 0(%2)\n" \ - "2:\tlbu\t$1, 1(%2)\n\t" \ + "1:\t"user_lb("%0", "0(%2)")"\n" \ + "2:\t"user_lbu("$1", "1(%2)")"\n\t" \ "sll\t%0, 0x8\n\t" \ "or\t%0, $1\n\t" \ "li\t%1, 0\n" \ @@ -130,8 +131,8 @@ extern void show_registers(struct pt_regs *regs); #define LoadW(addr, value, res) \ __asm__ __volatile__ ( \ - "1:\tlwl\t%0, (%2)\n" \ - "2:\tlwr\t%0, 3(%2)\n\t" \ + "1:\t"user_lwl("%0", "(%2)")"\n" \ + "2:\t"user_lwr("%0", "3(%2)")"\n\t" \ "li\t%1, 0\n" \ "3:\n\t" \ ".insn\n\t" \ @@ -149,8 +150,8 @@ extern void show_registers(struct pt_regs *regs); #define LoadHWU(addr, value, res) \ __asm__ __volatile__ ( \ ".set\tnoat\n" \ - "1:\tlbu\t%0, 0(%2)\n" \ - "2:\tlbu\t$1, 1(%2)\n\t" \ + "1:\t"user_lbu("%0", "0(%2)")"\n" \ + "2:\t"user_lbu("$1", "1(%2)")"\n\t" \ "sll\t%0, 0x8\n\t" \ "or\t%0, $1\n\t" \ "li\t%1, 0\n" \ @@ -170,8 +171,8 @@ extern void show_registers(struct pt_regs *regs); #define LoadWU(addr, value, res) \ __asm__ __volatile__ ( \ - "1:\tlwl\t%0, (%2)\n" \ - "2:\tlwr\t%0, 3(%2)\n\t" \ + "1:\t"user_lwl("%0", "(%2)")"\n" \ + "2:\t"user_lwr("%0", "3(%2)")"\n\t" \ "dsll\t%0, %0, 32\n\t" \ "dsrl\t%0, %0, 32\n\t" \ "li\t%1, 0\n" \ @@ -209,9 +210,9 @@ extern void show_registers(struct pt_regs *regs); #define StoreHW(addr, value, res) \ __asm__ __volatile__ ( \ ".set\tnoat\n" \ - "1:\tsb\t%1, 1(%2)\n\t" \ + "1:\t"user_sb("%1", "1(%2)")"\n" \ "srl\t$1, %1, 0x8\n" \ - "2:\tsb\t$1, 0(%2)\n\t" \ + "2:\t"user_sb("$1", "0(%2)")"\n" \ ".set\tat\n\t" \ "li\t%0, 0\n" \ "3:\n\t" \ @@ -229,8 +230,8 @@ extern void show_registers(struct pt_regs *regs); #define StoreW(addr, value, res) \ __asm__ __volatile__ ( \ - "1:\tswl\t%1,(%2)\n" \ - "2:\tswr\t%1, 3(%2)\n\t" \ + "1:\t"user_swl("%1", "(%2)")"\n" \ + "2:\t"user_swr("%1", "3(%2)")"\n\t" \ "li\t%0, 0\n" \ "3:\n\t" \ ".insn\n\t" \ @@ -267,8 +268,8 @@ extern void show_registers(struct pt_regs *regs); #ifdef __LITTLE_ENDIAN #define LoadHW(addr, value, res) \ __asm__ __volatile__ (".set\tnoat\n" \ - "1:\tlb\t%0, 1(%2)\n" \ - "2:\tlbu\t$1, 0(%2)\n\t" \ + "1:\t"user_lb("%0", "1(%2)")"\n" \ + "2:\t"user_lbu("$1", "0(%2)")"\n\t" \ "sll\t%0, 0x8\n\t" \ "or\t%0, $1\n\t" \ "li\t%1, 0\n" \ @@ -287,8 +288,8 @@ extern void show_registers(struct pt_regs *regs); #define LoadW(addr, value, res) \ __asm__ __volatile__ ( \ - "1:\tlwl\t%0, 3(%2)\n" \ - "2:\tlwr\t%0, (%2)\n\t" \ + "1:\t"user_lwl("%0", "3(%2)")"\n" \ + "2:\t"user_lwr("%0", "(%2)")"\n\t" \ "li\t%1, 0\n" \ "3:\n\t" \ ".insn\n\t" \ @@ -306,8 +307,8 @@ extern void show_registers(struct pt_regs *regs); #define LoadHWU(addr, value, res) \ __asm__ __volatile__ ( \ ".set\tnoat\n" \ - "1:\tlbu\t%0, 1(%2)\n" \ - "2:\tlbu\t$1, 0(%2)\n\t" \ + "1:\t"user_lbu("%0", "1(%2)")"\n" \ + "2:\t"user_lbu("$1", "0(%2)")"\n\t" \ "sll\t%0, 0x8\n\t" \ "or\t%0, $1\n\t" \ "li\t%1, 0\n" \ @@ -327,8 +328,8 @@ extern void show_registers(struct pt_regs *regs); #define LoadWU(addr, value, res) \ __asm__ __volatile__ ( \ - "1:\tlwl\t%0, 3(%2)\n" \ - "2:\tlwr\t%0, (%2)\n\t" \ + "1:\t"user_lwl("%0", "3(%2)")"\n" \ + "2:\t"user_lwr("%0", "(%2)")"\n\t" \ "dsll\t%0, %0, 32\n\t" \ "dsrl\t%0, %0, 32\n\t" \ "li\t%1, 0\n" \ @@ -366,9 +367,9 @@ extern void show_registers(struct pt_regs *regs); #define StoreHW(addr, value, res) \ __asm__ __volatile__ ( \ ".set\tnoat\n" \ - "1:\tsb\t%1, 0(%2)\n\t" \ + "1:\t"user_sb("%1", "0(%2)")"\n" \ "srl\t$1,%1, 0x8\n" \ - "2:\tsb\t$1, 1(%2)\n\t" \ + "2:\t"user_sb("$1", "1(%2)")"\n" \ ".set\tat\n\t" \ "li\t%0, 0\n" \ "3:\n\t" \ @@ -386,8 +387,8 @@ extern void show_registers(struct pt_regs *regs); #define StoreW(addr, value, res) \ __asm__ __volatile__ ( \ - "1:\tswl\t%1, 3(%2)\n" \ - "2:\tswr\t%1, (%2)\n\t" \ + "1:\t"user_swl("%1", "3(%2)")"\n" \ + "2:\t"user_swr("%1", "(%2)")"\n\t" \ "li\t%0, 0\n" \ "3:\n\t" \ ".insn\n\t" \ @@ -430,7 +431,9 @@ static void emulate_load_store_insn(struct pt_regs *regs, unsigned long origpc; unsigned long orig31; void __user *fault_addr = NULL; - +#ifdef CONFIG_EVA + mm_segment_t seg; +#endif origpc = (unsigned long)pc; orig31 = regs->regs[31]; @@ -475,6 +478,88 @@ static void emulate_load_store_insn(struct pt_regs *regs, * The remaining opcodes are the ones that are really of * interest. */ +#ifdef CONFIG_EVA + case spec3_op: + /* + * we can land here only from kernel accessing user memory, + * so we need to "switch" the address limit to user space, so + * address check can work properly. + */ + seg = get_fs(); + set_fs(USER_DS); + switch (insn.spec3_format.func) { + case lhe_op: + if (!access_ok(VERIFY_READ, addr, 2)) { + set_fs(seg); + goto sigbus; + } + LoadHW(addr, value, res); + if (res) { + set_fs(seg); + goto fault; + } + compute_return_epc(regs); + regs->regs[insn.spec3_format.rt] = value; + break; + case lwe_op: + if (!access_ok(VERIFY_READ, addr, 4)) { + set_fs(seg); + goto sigbus; + } + LoadW(addr, value, res); + if (res) { + set_fs(seg); + goto fault; + } + compute_return_epc(regs); + regs->regs[insn.spec3_format.rt] = value; + break; + case lhue_op: + if (!access_ok(VERIFY_READ, addr, 2)) { + set_fs(seg); + goto sigbus; + } + LoadHWU(addr, value, res); + if (res) { + set_fs(seg); + goto fault; + } + compute_return_epc(regs); + regs->regs[insn.spec3_format.rt] = value; + break; + case she_op: + if (!access_ok(VERIFY_WRITE, addr, 2)) { + set_fs(seg); + goto sigbus; + } + compute_return_epc(regs); + value = regs->regs[insn.spec3_format.rt]; + StoreHW(addr, value, res); + if (res) { + set_fs(seg); + goto fault; + } + break; + case swe_op: + if (!access_ok(VERIFY_WRITE, addr, 4)) { + set_fs(seg); + goto sigbus; + } + compute_return_epc(regs); + value = regs->regs[insn.spec3_format.rt]; + StoreW(addr, value, res); + if (res) { + set_fs(seg); + goto fault; + } + break; + default: + set_fs(seg); + goto sigill; + } + set_fs(seg); + break; +#endif case lh_op: if (!access_ok(VERIFY_READ, addr, 2)) goto sigbus; diff --git a/arch/mips/kernel/vpe-cmp.c b/arch/mips/kernel/vpe-cmp.c new file mode 100644 index 000000000000..9268ebc0f61e --- /dev/null +++ b/arch/mips/kernel/vpe-cmp.c @@ -0,0 +1,180 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. + * Copyright (C) 2013 Imagination Technologies Ltd. + */ +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/fs.h> +#include <linux/slab.h> +#include <linux/export.h> + +#include <asm/vpe.h> + +static int major; + +void cleanup_tc(struct tc *tc) +{ + +} + +static ssize_t store_kill(struct device *dev, struct device_attribute *attr, + const char *buf, size_t len) +{ + struct vpe *vpe = get_vpe(aprp_cpu_index()); + struct vpe_notifications *notifier; + + list_for_each_entry(notifier, &vpe->notify, list) + notifier->stop(aprp_cpu_index()); + + release_progmem(vpe->load_addr); + vpe->state = VPE_STATE_UNUSED; + + return len; +} +static DEVICE_ATTR(kill, S_IWUSR, NULL, store_kill); + +static ssize_t ntcs_show(struct device *cd, struct device_attribute *attr, + char *buf) +{ + struct vpe *vpe = get_vpe(aprp_cpu_index()); + + return sprintf(buf, "%d\n", vpe->ntcs); +} + +static ssize_t ntcs_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t len) +{ + struct vpe *vpe = get_vpe(aprp_cpu_index()); + unsigned long new; + int ret; + + ret = kstrtoul(buf, 0, &new); + if (ret < 0) + return ret; + + /* APRP can only reserve one TC in a VPE and no more. */ + if (new != 1) + return -EINVAL; + + vpe->ntcs = new; + + return len; +} +static DEVICE_ATTR_RW(ntcs); + +static struct attribute *vpe_attrs[] = { + &dev_attr_kill.attr, + &dev_attr_ntcs.attr, + NULL, +}; +ATTRIBUTE_GROUPS(vpe); + +static void vpe_device_release(struct device *cd) +{ + kfree(cd); +} + +static struct class vpe_class = { + .name = "vpe", + .owner = THIS_MODULE, + .dev_release = vpe_device_release, + .dev_groups = vpe_groups, +}; + +static struct device vpe_device; + +int __init vpe_module_init(void) +{ + struct vpe *v = NULL; + struct tc *t; + int err; + + if (!cpu_has_mipsmt) { + pr_warn("VPE loader: not a MIPS MT capable processor\n"); + return -ENODEV; + } + + if (num_possible_cpus() - aprp_cpu_index() < 1) { + pr_warn("No VPEs reserved for AP/SP, not initialize VPE loader\n" + "Pass maxcpus=<n> argument as kernel argument\n"); + return -ENODEV; + } + + major = register_chrdev(0, VPE_MODULE_NAME, &vpe_fops); + if (major < 0) { + pr_warn("VPE loader: unable to register character device\n"); + return major; + } + + err = class_register(&vpe_class); + if (err) { + pr_err("vpe_class registration failed\n"); + goto out_chrdev; + } + + device_initialize(&vpe_device); + vpe_device.class = &vpe_class, + vpe_device.parent = NULL, + dev_set_name(&vpe_device, "vpe_sp"); + vpe_device.devt = MKDEV(major, VPE_MODULE_MINOR); + err = device_add(&vpe_device); + if (err) { + pr_err("Adding vpe_device failed\n"); + goto out_class; + } + + t = alloc_tc(aprp_cpu_index()); + if (!t) { + pr_warn("VPE: unable to allocate TC\n"); + err = -ENOMEM; + goto out_dev; + } + + /* VPE */ + v = alloc_vpe(aprp_cpu_index()); + if (v == NULL) { + pr_warn("VPE: unable to allocate VPE\n"); + kfree(t); + err = -ENOMEM; + goto out_dev; + } + + v->ntcs = 1; + + /* add the tc to the list of this vpe's tc's. */ + list_add(&t->tc, &v->tc); + + /* TC */ + t->pvpe = v; /* set the parent vpe */ + + return 0; + +out_dev: + device_del(&vpe_device); + +out_class: + class_unregister(&vpe_class); + +out_chrdev: + unregister_chrdev(major, VPE_MODULE_NAME); + + return err; +} + +void __exit vpe_module_exit(void) +{ + struct vpe *v, *n; + + device_del(&vpe_device); + class_unregister(&vpe_class); + unregister_chrdev(major, VPE_MODULE_NAME); + + /* No locking needed here */ + list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) + if (v->state != VPE_STATE_UNUSED) + release_vpe(v); +} diff --git a/arch/mips/kernel/vpe-mt.c b/arch/mips/kernel/vpe-mt.c new file mode 100644 index 000000000000..2e003b11a098 --- /dev/null +++ b/arch/mips/kernel/vpe-mt.c @@ -0,0 +1,521 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. + * Copyright (C) 2013 Imagination Technologies Ltd. + */ +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/fs.h> +#include <linux/slab.h> +#include <linux/export.h> + +#include <asm/mipsregs.h> +#include <asm/mipsmtregs.h> +#include <asm/mips_mt.h> +#include <asm/vpe.h> + +static int major; + +/* The number of TCs and VPEs physically available on the core */ +static int hw_tcs, hw_vpes; + +/* We are prepared so configure and start the VPE... */ +int vpe_run(struct vpe *v) +{ + unsigned long flags, val, dmt_flag; + struct vpe_notifications *notifier; + unsigned int vpeflags; + struct tc *t; + + /* check we are the Master VPE */ + local_irq_save(flags); + val = read_c0_vpeconf0(); + if (!(val & VPECONF0_MVP)) { + pr_warn("VPE loader: only Master VPE's are able to config MT\n"); + local_irq_restore(flags); + + return -1; + } + + dmt_flag = dmt(); + vpeflags = dvpe(); + + if (list_empty(&v->tc)) { + evpe(vpeflags); + emt(dmt_flag); + local_irq_restore(flags); + + pr_warn("VPE loader: No TC's associated with VPE %d\n", + v->minor); + + return -ENOEXEC; + } + + t = list_first_entry(&v->tc, struct tc, tc); + + /* Put MVPE's into 'configuration state' */ + set_c0_mvpcontrol(MVPCONTROL_VPC); + + settc(t->index); + + /* should check it is halted, and not activated */ + if ((read_tc_c0_tcstatus() & TCSTATUS_A) || + !(read_tc_c0_tchalt() & TCHALT_H)) { + evpe(vpeflags); + emt(dmt_flag); + local_irq_restore(flags); + + pr_warn("VPE loader: TC %d is already active!\n", + t->index); + + return -ENOEXEC; + } + + /* + * Write the address we want it to start running from in the TCPC + * register. + */ + write_tc_c0_tcrestart((unsigned long)v->__start); + write_tc_c0_tccontext((unsigned long)0); + + /* + * Mark the TC as activated, not interrupt exempt and not dynamically + * allocatable + */ + val = read_tc_c0_tcstatus(); + val = (val & ~(TCSTATUS_DA | TCSTATUS_IXMT)) | TCSTATUS_A; + write_tc_c0_tcstatus(val); + + write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H); + + /* + * The sde-kit passes 'memsize' to __start in $a3, so set something + * here... Or set $a3 to zero and define DFLT_STACK_SIZE and + * DFLT_HEAP_SIZE when you compile your program + */ + mttgpr(6, v->ntcs); + mttgpr(7, physical_memsize); + + /* set up VPE1 */ + /* + * bind the TC to VPE 1 as late as possible so we only have the final + * VPE registers to set up, and so an EJTAG probe can trigger on it + */ + write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | 1); + + write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~(VPECONF0_VPA)); + + back_to_back_c0_hazard(); + + /* Set up the XTC bit in vpeconf0 to point at our tc */ + write_vpe_c0_vpeconf0((read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC)) + | (t->index << VPECONF0_XTC_SHIFT)); + + back_to_back_c0_hazard(); + + /* enable this VPE */ + write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA); + + /* clear out any left overs from a previous program */ + write_vpe_c0_status(0); + write_vpe_c0_cause(0); + + /* take system out of configuration state */ + clear_c0_mvpcontrol(MVPCONTROL_VPC); + + /* + * SMVP kernels manage VPE enable independently, but uniprocessor + * kernels need to turn it on, even if that wasn't the pre-dvpe() state. + */ +#ifdef CONFIG_SMP + evpe(vpeflags); +#else + evpe(EVPE_ENABLE); +#endif + emt(dmt_flag); + local_irq_restore(flags); + + list_for_each_entry(notifier, &v->notify, list) + notifier->start(VPE_MODULE_MINOR); + + return 0; +} + +void cleanup_tc(struct tc *tc) +{ + unsigned long flags; + unsigned int mtflags, vpflags; + int tmp; + + local_irq_save(flags); + mtflags = dmt(); + vpflags = dvpe(); + /* Put MVPE's into 'configuration state' */ + set_c0_mvpcontrol(MVPCONTROL_VPC); + + settc(tc->index); + tmp = read_tc_c0_tcstatus(); + + /* mark not allocated and not dynamically allocatable */ + tmp &= ~(TCSTATUS_A | TCSTATUS_DA); + tmp |= TCSTATUS_IXMT; /* interrupt exempt */ + write_tc_c0_tcstatus(tmp); + + write_tc_c0_tchalt(TCHALT_H); + mips_ihb(); + + clear_c0_mvpcontrol(MVPCONTROL_VPC); + evpe(vpflags); + emt(mtflags); + local_irq_restore(flags); +} + +/* module wrapper entry points */ +/* give me a vpe */ +void *vpe_alloc(void) +{ + int i; + struct vpe *v; + + /* find a vpe */ + for (i = 1; i < MAX_VPES; i++) { + v = get_vpe(i); + if (v != NULL) { + v->state = VPE_STATE_INUSE; + return v; + } + } + return NULL; +} +EXPORT_SYMBOL(vpe_alloc); + +/* start running from here */ +int vpe_start(void *vpe, unsigned long start) +{ + struct vpe *v = vpe; + + v->__start = start; + return vpe_run(v); +} +EXPORT_SYMBOL(vpe_start); + +/* halt it for now */ +int vpe_stop(void *vpe) +{ + struct vpe *v = vpe; + struct tc *t; + unsigned int evpe_flags; + + evpe_flags = dvpe(); + + t = list_entry(v->tc.next, struct tc, tc); + if (t != NULL) { + settc(t->index); + write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA); + } + + evpe(evpe_flags); + + return 0; +} +EXPORT_SYMBOL(vpe_stop); + +/* I've done with it thank you */ +int vpe_free(void *vpe) +{ + struct vpe *v = vpe; + struct tc *t; + unsigned int evpe_flags; + + t = list_entry(v->tc.next, struct tc, tc); + if (t == NULL) + return -ENOEXEC; + + evpe_flags = dvpe(); + + /* Put MVPE's into 'configuration state' */ + set_c0_mvpcontrol(MVPCONTROL_VPC); + + settc(t->index); + write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA); + + /* halt the TC */ + write_tc_c0_tchalt(TCHALT_H); + mips_ihb(); + + /* mark the TC unallocated */ + write_tc_c0_tcstatus(read_tc_c0_tcstatus() & ~TCSTATUS_A); + + v->state = VPE_STATE_UNUSED; + + clear_c0_mvpcontrol(MVPCONTROL_VPC); + evpe(evpe_flags); + + return 0; +} +EXPORT_SYMBOL(vpe_free); + +static ssize_t store_kill(struct device *dev, struct device_attribute *attr, + const char *buf, size_t len) +{ + struct vpe *vpe = get_vpe(aprp_cpu_index()); + struct vpe_notifications *notifier; + + list_for_each_entry(notifier, &vpe->notify, list) + notifier->stop(aprp_cpu_index()); + + release_progmem(vpe->load_addr); + cleanup_tc(get_tc(aprp_cpu_index())); + vpe_stop(vpe); + vpe_free(vpe); + + return len; +} +static DEVICE_ATTR(kill, S_IWUSR, NULL, store_kill); + +static ssize_t ntcs_show(struct device *cd, struct device_attribute *attr, + char *buf) +{ + struct vpe *vpe = get_vpe(aprp_cpu_index()); + + return sprintf(buf, "%d\n", vpe->ntcs); +} + +static ssize_t ntcs_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t len) +{ + struct vpe *vpe = get_vpe(aprp_cpu_index()); + unsigned long new; + int ret; + + ret = kstrtoul(buf, 0, &new); + if (ret < 0) + return ret; + + if (new == 0 || new > (hw_tcs - aprp_cpu_index())) + return -EINVAL; + + vpe->ntcs = new; + + return len; +} +static DEVICE_ATTR_RW(ntcs); + +static struct attribute *vpe_attrs[] = { + &dev_attr_kill.attr, + &dev_attr_ntcs.attr, + NULL, +}; +ATTRIBUTE_GROUPS(vpe); + +static void vpe_device_release(struct device *cd) +{ + kfree(cd); +} + +static struct class vpe_class = { + .name = "vpe", + .owner = THIS_MODULE, + .dev_release = vpe_device_release, + .dev_groups = vpe_groups, +}; + +static struct device vpe_device; + +int __init vpe_module_init(void) +{ + unsigned int mtflags, vpflags; + unsigned long flags, val; + struct vpe *v = NULL; + struct tc *t; + int tc, err; + + if (!cpu_has_mipsmt) { + pr_warn("VPE loader: not a MIPS MT capable processor\n"); + return -ENODEV; + } + + if (vpelimit == 0) { + pr_warn("No VPEs reserved for AP/SP, not initialize VPE loader\n" + "Pass maxvpes=<n> argument as kernel argument\n"); + + return -ENODEV; + } + + if (aprp_cpu_index() == 0) { + pr_warn("No TCs reserved for AP/SP, not initialize VPE loader\n" + "Pass maxtcs=<n> argument as kernel argument\n"); + + return -ENODEV; + } + + major = register_chrdev(0, VPE_MODULE_NAME, &vpe_fops); + if (major < 0) { + pr_warn("VPE loader: unable to register character device\n"); + return major; + } + + err = class_register(&vpe_class); + if (err) { + pr_err("vpe_class registration failed\n"); + goto out_chrdev; + } + + device_initialize(&vpe_device); + vpe_device.class = &vpe_class, + vpe_device.parent = NULL, + dev_set_name(&vpe_device, "vpe1"); + vpe_device.devt = MKDEV(major, VPE_MODULE_MINOR); + err = device_add(&vpe_device); + if (err) { + pr_err("Adding vpe_device failed\n"); + goto out_class; + } + + local_irq_save(flags); + mtflags = dmt(); + vpflags = dvpe(); + + /* Put MVPE's into 'configuration state' */ + set_c0_mvpcontrol(MVPCONTROL_VPC); + + val = read_c0_mvpconf0(); + hw_tcs = (val & MVPCONF0_PTC) + 1; + hw_vpes = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; + + for (tc = aprp_cpu_index(); tc < hw_tcs; tc++) { + /* + * Must re-enable multithreading temporarily or in case we + * reschedule send IPIs or similar we might hang. + */ + clear_c0_mvpcontrol(MVPCONTROL_VPC); + evpe(vpflags); + emt(mtflags); + local_irq_restore(flags); + t = alloc_tc(tc); + if (!t) { + err = -ENOMEM; + goto out_dev; + } + + local_irq_save(flags); + mtflags = dmt(); + vpflags = dvpe(); + set_c0_mvpcontrol(MVPCONTROL_VPC); + + /* VPE's */ + if (tc < hw_tcs) { + settc(tc); + + v = alloc_vpe(tc); + if (v == NULL) { + pr_warn("VPE: unable to allocate VPE\n"); + goto out_reenable; + } + + v->ntcs = hw_tcs - aprp_cpu_index(); + + /* add the tc to the list of this vpe's tc's. */ + list_add(&t->tc, &v->tc); + + /* deactivate all but vpe0 */ + if (tc >= aprp_cpu_index()) { + unsigned long tmp = read_vpe_c0_vpeconf0(); + + tmp &= ~VPECONF0_VPA; + + /* master VPE */ + tmp |= VPECONF0_MVP; + write_vpe_c0_vpeconf0(tmp); + } + + /* disable multi-threading with TC's */ + write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & + ~VPECONTROL_TE); + + if (tc >= vpelimit) { + /* + * Set config to be the same as vpe0, + * particularly kseg0 coherency alg + */ + write_vpe_c0_config(read_c0_config()); + } + } + + /* TC's */ + t->pvpe = v; /* set the parent vpe */ + + if (tc >= aprp_cpu_index()) { + unsigned long tmp; + + settc(tc); + + /* + * A TC that is bound to any other VPE gets bound to + * VPE0, ideally I'd like to make it homeless but it + * doesn't appear to let me bind a TC to a non-existent + * VPE. Which is perfectly reasonable. + * + * The (un)bound state is visible to an EJTAG probe so + * may notify GDB... + */ + tmp = read_tc_c0_tcbind(); + if (tmp & TCBIND_CURVPE) { + /* tc is bound >vpe0 */ + write_tc_c0_tcbind(tmp & ~TCBIND_CURVPE); + + t->pvpe = get_vpe(0); /* set the parent vpe */ + } + + /* halt the TC */ + write_tc_c0_tchalt(TCHALT_H); + mips_ihb(); + + tmp = read_tc_c0_tcstatus(); + + /* mark not activated and not dynamically allocatable */ + tmp &= ~(TCSTATUS_A | TCSTATUS_DA); + tmp |= TCSTATUS_IXMT; /* interrupt exempt */ + write_tc_c0_tcstatus(tmp); + } + } + +out_reenable: + /* release config state */ + clear_c0_mvpcontrol(MVPCONTROL_VPC); + + evpe(vpflags); + emt(mtflags); + local_irq_restore(flags); + + return 0; + +out_dev: + device_del(&vpe_device); + +out_class: + class_unregister(&vpe_class); + +out_chrdev: + unregister_chrdev(major, VPE_MODULE_NAME); + + return err; +} + +void __exit vpe_module_exit(void) +{ + struct vpe *v, *n; + + device_del(&vpe_device); + class_unregister(&vpe_class); + unregister_chrdev(major, VPE_MODULE_NAME); + + /* No locking needed here */ + list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) { + if (v->state != VPE_STATE_UNUSED) + release_vpe(v); + } +} diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index 59b2b3cd7885..11da314565cc 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c @@ -1,37 +1,22 @@ /* - * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. - * - * This program is free software; you can distribute it and/or modify it - * under the terms of the GNU General Public License (Version 2) as - * published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. - */ - -/* - * VPE support module - * - * Provides support for loading a MIPS SP program on VPE1. - * The SP environment is rather simple, no tlb's. It needs to be relocatable - * (or partially linked). You should initialise your stack in the startup - * code. This loader looks for the symbol __start and sets up - * execution to resume from there. The MIPS SDE kit contains suitable examples. + * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. + * Copyright (C) 2013 Imagination Technologies Ltd. * - * To load and run, simply cat a SP 'program file' to /dev/vpe1. - * i.e cat spapp >/dev/vpe1. + * VPE spport module for loading a MIPS SP program into VPE1. The SP + * environment is rather simple since there are no TLBs. It needs + * to be relocatable (or partiall linked). Initialize your stack in + * the startup-code. The loader looks for the symbol __start and sets + * up the execution to resume from there. To load and run, simply do + * a cat SP 'binary' to the /dev/vpe1 device. */ #include <linux/kernel.h> #include <linux/device.h> #include <linux/fs.h> #include <linux/init.h> -#include <asm/uaccess.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/vmalloc.h> @@ -46,13 +31,10 @@ #include <asm/mipsmtregs.h> #include <asm/cacheflush.h> #include <linux/atomic.h> -#include <asm/cpu.h> #include <asm/mips_mt.h> #include <asm/processor.h> #include <asm/vpe.h> -typedef void *vpe_handle; - #ifndef ARCH_SHF_SMALL #define ARCH_SHF_SMALL 0 #endif @@ -60,96 +42,15 @@ typedef void *vpe_handle; /* If this is set, the section belongs in the init part of the module */ #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1)) -/* - * The number of TCs and VPEs physically available on the core - */ -static int hw_tcs, hw_vpes; -static char module_name[] = "vpe"; -static int major; -static const int minor = 1; /* fixed for now */ - -/* grab the likely amount of memory we will need. */ -#ifdef CONFIG_MIPS_VPE_LOADER_TOM -#define P_SIZE (2 * 1024 * 1024) -#else -/* add an overhead to the max kmalloc size for non-striped symbols/etc */ -#define P_SIZE (256 * 1024) -#endif - -extern unsigned long physical_memsize; - -#define MAX_VPES 16 -#define VPE_PATH_MAX 256 - -enum vpe_state { - VPE_STATE_UNUSED = 0, - VPE_STATE_INUSE, - VPE_STATE_RUNNING -}; - -enum tc_state { - TC_STATE_UNUSED = 0, - TC_STATE_INUSE, - TC_STATE_RUNNING, - TC_STATE_DYNAMIC -}; - -struct vpe { - enum vpe_state state; - - /* (device) minor associated with this vpe */ - int minor; - - /* elfloader stuff */ - void *load_addr; - unsigned long len; - char *pbuffer; - unsigned long plen; - unsigned int uid, gid; - char cwd[VPE_PATH_MAX]; - - unsigned long __start; - - /* tc's associated with this vpe */ - struct list_head tc; - - /* The list of vpe's */ - struct list_head list; - - /* shared symbol address */ - void *shared_ptr; - - /* the list of who wants to know when something major happens */ - struct list_head notify; - - unsigned int ntcs; -}; - -struct tc { - enum tc_state state; - int index; - - struct vpe *pvpe; /* parent VPE */ - struct list_head tc; /* The list of TC's with this VPE */ - struct list_head list; /* The global list of tc's */ -}; - -struct { - spinlock_t vpe_list_lock; - struct list_head vpe_list; /* Virtual processing elements */ - spinlock_t tc_list_lock; - struct list_head tc_list; /* Thread contexts */ -} vpecontrol = { +struct vpe_control vpecontrol = { .vpe_list_lock = __SPIN_LOCK_UNLOCKED(vpe_list_lock), .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list), .tc_list_lock = __SPIN_LOCK_UNLOCKED(tc_list_lock), .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list) }; -static void release_progmem(void *ptr); - /* get the vpe associated with this minor */ -static struct vpe *get_vpe(int minor) +struct vpe *get_vpe(int minor) { struct vpe *res, *v; @@ -159,7 +60,7 @@ static struct vpe *get_vpe(int minor) res = NULL; spin_lock(&vpecontrol.vpe_list_lock); list_for_each_entry(v, &vpecontrol.vpe_list, list) { - if (v->minor == minor) { + if (v->minor == VPE_MODULE_MINOR) { res = v; break; } @@ -170,7 +71,7 @@ static struct vpe *get_vpe(int minor) } /* get the vpe associated with this minor */ -static struct tc *get_tc(int index) +struct tc *get_tc(int index) { struct tc *res, *t; @@ -188,12 +89,13 @@ static struct tc *get_tc(int index) } /* allocate a vpe and associate it with this minor (or index) */ -static struct vpe *alloc_vpe(int minor) +struct vpe *alloc_vpe(int minor) { struct vpe *v; - if ((v = kzalloc(sizeof(struct vpe), GFP_KERNEL)) == NULL) - return NULL; + v = kzalloc(sizeof(struct vpe), GFP_KERNEL); + if (v == NULL) + goto out; INIT_LIST_HEAD(&v->tc); spin_lock(&vpecontrol.vpe_list_lock); @@ -201,17 +103,19 @@ static struct vpe *alloc_vpe(int minor) spin_unlock(&vpecontrol.vpe_list_lock); INIT_LIST_HEAD(&v->notify); - v->minor = minor; + v->minor = VPE_MODULE_MINOR; +out: return v; } /* allocate a tc. At startup only tc0 is running, all other can be halted. */ -static struct tc *alloc_tc(int index) +struct tc *alloc_tc(int index) { struct tc *tc; - if ((tc = kzalloc(sizeof(struct tc), GFP_KERNEL)) == NULL) + tc = kzalloc(sizeof(struct tc), GFP_KERNEL); + if (tc == NULL) goto out; INIT_LIST_HEAD(&tc->tc); @@ -226,7 +130,7 @@ out: } /* clean up and free everything */ -static void release_vpe(struct vpe *v) +void release_vpe(struct vpe *v) { list_del(&v->list); if (v->load_addr) @@ -234,28 +138,8 @@ static void release_vpe(struct vpe *v) kfree(v); } -static void __maybe_unused dump_mtregs(void) -{ - unsigned long val; - - val = read_c0_config3(); - printk("config3 0x%lx MT %ld\n", val, - (val & CONFIG3_MT) >> CONFIG3_MT_SHIFT); - - val = read_c0_mvpcontrol(); - printk("MVPControl 0x%lx, STLB %ld VPC %ld EVP %ld\n", val, - (val & MVPCONTROL_STLB) >> MVPCONTROL_STLB_SHIFT, - (val & MVPCONTROL_VPC) >> MVPCONTROL_VPC_SHIFT, - (val & MVPCONTROL_EVP)); - - val = read_c0_mvpconf0(); - printk("mvpconf0 0x%lx, PVPE %ld PTC %ld M %ld\n", val, - (val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT, - val & MVPCONF0_PTC, (val & MVPCONF0_M) >> MVPCONF0_M_SHIFT); -} - -/* Find some VPE program space */ -static void *alloc_progmem(unsigned long len) +/* Find some VPE program space */ +void *alloc_progmem(unsigned long len) { void *addr; @@ -274,7 +158,7 @@ static void *alloc_progmem(unsigned long len) return addr; } -static void release_progmem(void *ptr) +void release_progmem(void *ptr) { #ifndef CONFIG_MIPS_VPE_LOADER_TOM kfree(ptr); @@ -282,7 +166,7 @@ static void release_progmem(void *ptr) } /* Update size with this section: return offset. */ -static long get_offset(unsigned long *size, Elf_Shdr * sechdr) +static long get_offset(unsigned long *size, Elf_Shdr *sechdr) { long ret; @@ -295,8 +179,8 @@ static long get_offset(unsigned long *size, Elf_Shdr * sechdr) might -- code, read-only data, read-write data, small data. Tally sizes, and place the offsets into sh_entsize fields: high bit means it belongs in init. */ -static void layout_sections(struct module *mod, const Elf_Ehdr * hdr, - Elf_Shdr * sechdrs, const char *secstrings) +static void layout_sections(struct module *mod, const Elf_Ehdr *hdr, + Elf_Shdr *sechdrs, const char *secstrings) { static unsigned long const masks[][2] = { /* NOTE: all executable code must be the first section @@ -316,7 +200,6 @@ static void layout_sections(struct module *mod, const Elf_Ehdr * hdr, for (i = 0; i < hdr->e_shnum; ++i) { Elf_Shdr *s = &sechdrs[i]; - // || strncmp(secstrings + s->sh_name, ".init", 5) == 0) if ((s->sh_flags & masks[m][0]) != masks[m][0] || (s->sh_flags & masks[m][1]) || s->sh_entsize != ~0UL) @@ -331,7 +214,6 @@ static void layout_sections(struct module *mod, const Elf_Ehdr * hdr, } } - /* from module-elf32.c, but subverted a little */ struct mips_hi16 { @@ -354,20 +236,18 @@ static int apply_r_mips_gprel16(struct module *me, uint32_t *location, { int rel; - if( !(*location & 0xffff) ) { + if (!(*location & 0xffff)) { rel = (int)v - gp_addr; - } - else { + } else { /* .sbss + gp(relative) + offset */ /* kludge! */ rel = (int)(short)((int)v + gp_offs + (int)(short)(*location & 0xffff) - gp_addr); } - if( (rel > 32768) || (rel < -32768) ) { - printk(KERN_DEBUG "VPE loader: apply_r_mips_gprel16: " - "relative address 0x%x out of range of gp register\n", - rel); + if ((rel > 32768) || (rel < -32768)) { + pr_debug("VPE loader: apply_r_mips_gprel16: relative address 0x%x out of range of gp register\n", + rel); return -ENOEXEC; } @@ -381,12 +261,12 @@ static int apply_r_mips_pc16(struct module *me, uint32_t *location, { int rel; rel = (((unsigned int)v - (unsigned int)location)); - rel >>= 2; // because the offset is in _instructions_ not bytes. - rel -= 1; // and one instruction less due to the branch delay slot. + rel >>= 2; /* because the offset is in _instructions_ not bytes. */ + rel -= 1; /* and one instruction less due to the branch delay slot. */ - if( (rel > 32768) || (rel < -32768) ) { - printk(KERN_DEBUG "VPE loader: " - "apply_r_mips_pc16: relative address out of range 0x%x\n", rel); + if ((rel > 32768) || (rel < -32768)) { + pr_debug("VPE loader: apply_r_mips_pc16: relative address out of range 0x%x\n", + rel); return -ENOEXEC; } @@ -407,8 +287,7 @@ static int apply_r_mips_26(struct module *me, uint32_t *location, Elf32_Addr v) { if (v % 4) { - printk(KERN_DEBUG "VPE loader: apply_r_mips_26 " - " unaligned relocation\n"); + pr_debug("VPE loader: apply_r_mips_26: unaligned relocation\n"); return -ENOEXEC; } @@ -439,7 +318,7 @@ static int apply_r_mips_hi16(struct module *me, uint32_t *location, * the carry we need to add. Save the information, and let LO16 do the * actual relocation. */ - n = kmalloc(sizeof *n, GFP_KERNEL); + n = kmalloc(sizeof(*n), GFP_KERNEL); if (!n) return -ENOMEM; @@ -471,9 +350,7 @@ static int apply_r_mips_lo16(struct module *me, uint32_t *location, * The value for the HI16 had best be the same. */ if (v != l->value) { - printk(KERN_DEBUG "VPE loader: " - "apply_r_mips_lo16/hi16: \t" - "inconsistent value information\n"); + pr_debug("VPE loader: apply_r_mips_lo16/hi16: inconsistent value information\n"); goto out_free; } @@ -569,20 +446,19 @@ static int apply_relocations(Elf32_Shdr *sechdrs, + ELF32_R_SYM(r_info); if (!sym->st_value) { - printk(KERN_DEBUG "%s: undefined weak symbol %s\n", - me->name, strtab + sym->st_name); + pr_debug("%s: undefined weak symbol %s\n", + me->name, strtab + sym->st_name); /* just print the warning, dont barf */ } v = sym->st_value; res = reloc_handlers[ELF32_R_TYPE(r_info)](me, location, v); - if( res ) { + if (res) { char *r = rstrs[ELF32_R_TYPE(r_info)]; - printk(KERN_WARNING "VPE loader: .text+0x%x " - "relocation type %s for symbol \"%s\" failed\n", - rel[i].r_offset, r ? r : "UNKNOWN", - strtab + sym->st_name); + pr_warn("VPE loader: .text+0x%x relocation type %s for symbol \"%s\" failed\n", + rel[i].r_offset, r ? r : "UNKNOWN", + strtab + sym->st_name); return res; } } @@ -597,10 +473,8 @@ static inline void save_gp_address(unsigned int secbase, unsigned int rel) } /* end module-elf32.c */ - - /* Change all symbols so that sh_value encodes the pointer directly. */ -static void simplify_symbols(Elf_Shdr * sechdrs, +static void simplify_symbols(Elf_Shdr *sechdrs, unsigned int symindex, const char *strtab, const char *secstrings, @@ -641,18 +515,16 @@ static void simplify_symbols(Elf_Shdr * sechdrs, break; case SHN_MIPS_SCOMMON: - printk(KERN_DEBUG "simplify_symbols: ignoring SHN_MIPS_SCOMMON " - "symbol <%s> st_shndx %d\n", strtab + sym[i].st_name, - sym[i].st_shndx); - // .sbss section + pr_debug("simplify_symbols: ignoring SHN_MIPS_SCOMMON symbol <%s> st_shndx %d\n", + strtab + sym[i].st_name, sym[i].st_shndx); + /* .sbss section */ break; default: secbase = sechdrs[sym[i].st_shndx].sh_addr; - if (strncmp(strtab + sym[i].st_name, "_gp", 3) == 0) { + if (strncmp(strtab + sym[i].st_name, "_gp", 3) == 0) save_gp_address(secbase, sym[i].st_value); - } sym[i].st_value += secbase; break; @@ -661,142 +533,21 @@ static void simplify_symbols(Elf_Shdr * sechdrs, } #ifdef DEBUG_ELFLOADER -static void dump_elfsymbols(Elf_Shdr * sechdrs, unsigned int symindex, +static void dump_elfsymbols(Elf_Shdr *sechdrs, unsigned int symindex, const char *strtab, struct module *mod) { Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr; unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym); - printk(KERN_DEBUG "dump_elfsymbols: n %d\n", n); + pr_debug("dump_elfsymbols: n %d\n", n); for (i = 1; i < n; i++) { - printk(KERN_DEBUG " i %d name <%s> 0x%x\n", i, - strtab + sym[i].st_name, sym[i].st_value); + pr_debug(" i %d name <%s> 0x%x\n", i, strtab + sym[i].st_name, + sym[i].st_value); } } #endif -/* We are prepared so configure and start the VPE... */ -static int vpe_run(struct vpe * v) -{ - unsigned long flags, val, dmt_flag; - struct vpe_notifications *n; - unsigned int vpeflags; - struct tc *t; - - /* check we are the Master VPE */ - local_irq_save(flags); - val = read_c0_vpeconf0(); - if (!(val & VPECONF0_MVP)) { - printk(KERN_WARNING - "VPE loader: only Master VPE's are allowed to configure MT\n"); - local_irq_restore(flags); - - return -1; - } - - dmt_flag = dmt(); - vpeflags = dvpe(); - - if (list_empty(&v->tc)) { - evpe(vpeflags); - emt(dmt_flag); - local_irq_restore(flags); - - printk(KERN_WARNING - "VPE loader: No TC's associated with VPE %d\n", - v->minor); - - return -ENOEXEC; - } - - t = list_first_entry(&v->tc, struct tc, tc); - - /* Put MVPE's into 'configuration state' */ - set_c0_mvpcontrol(MVPCONTROL_VPC); - - settc(t->index); - - /* should check it is halted, and not activated */ - if ((read_tc_c0_tcstatus() & TCSTATUS_A) || !(read_tc_c0_tchalt() & TCHALT_H)) { - evpe(vpeflags); - emt(dmt_flag); - local_irq_restore(flags); - - printk(KERN_WARNING "VPE loader: TC %d is already active!\n", - t->index); - - return -ENOEXEC; - } - - /* Write the address we want it to start running from in the TCPC register. */ - write_tc_c0_tcrestart((unsigned long)v->__start); - write_tc_c0_tccontext((unsigned long)0); - - /* - * Mark the TC as activated, not interrupt exempt and not dynamically - * allocatable - */ - val = read_tc_c0_tcstatus(); - val = (val & ~(TCSTATUS_DA | TCSTATUS_IXMT)) | TCSTATUS_A; - write_tc_c0_tcstatus(val); - - write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H); - - /* - * The sde-kit passes 'memsize' to __start in $a3, so set something - * here... Or set $a3 to zero and define DFLT_STACK_SIZE and - * DFLT_HEAP_SIZE when you compile your program - */ - mttgpr(6, v->ntcs); - mttgpr(7, physical_memsize); - - /* set up VPE1 */ - /* - * bind the TC to VPE 1 as late as possible so we only have the final - * VPE registers to set up, and so an EJTAG probe can trigger on it - */ - write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | 1); - - write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~(VPECONF0_VPA)); - - back_to_back_c0_hazard(); - - /* Set up the XTC bit in vpeconf0 to point at our tc */ - write_vpe_c0_vpeconf0( (read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC)) - | (t->index << VPECONF0_XTC_SHIFT)); - - back_to_back_c0_hazard(); - - /* enable this VPE */ - write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA); - - /* clear out any left overs from a previous program */ - write_vpe_c0_status(0); - write_vpe_c0_cause(0); - - /* take system out of configuration state */ - clear_c0_mvpcontrol(MVPCONTROL_VPC); - - /* - * SMTC/SMVP kernels manage VPE enable independently, - * but uniprocessor kernels need to turn it on, even - * if that wasn't the pre-dvpe() state. - */ -#ifdef CONFIG_SMP - evpe(vpeflags); -#else - evpe(EVPE_ENABLE); -#endif - emt(dmt_flag); - local_irq_restore(flags); - - list_for_each_entry(n, &v->notify, list) - n->start(minor); - - return 0; -} - -static int find_vpe_symbols(struct vpe * v, Elf_Shdr * sechdrs, +static int find_vpe_symbols(struct vpe *v, Elf_Shdr *sechdrs, unsigned int symindex, const char *strtab, struct module *mod) { @@ -804,16 +555,14 @@ static int find_vpe_symbols(struct vpe * v, Elf_Shdr * sechdrs, unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym); for (i = 1; i < n; i++) { - if (strcmp(strtab + sym[i].st_name, "__start") == 0) { + if (strcmp(strtab + sym[i].st_name, "__start") == 0) v->__start = sym[i].st_value; - } - if (strcmp(strtab + sym[i].st_name, "vpe_shared") == 0) { + if (strcmp(strtab + sym[i].st_name, "vpe_shared") == 0) v->shared_ptr = (void *)sym[i].st_value; - } } - if ( (v->__start == 0) || (v->shared_ptr == NULL)) + if ((v->__start == 0) || (v->shared_ptr == NULL)) return -1; return 0; @@ -824,14 +573,14 @@ static int find_vpe_symbols(struct vpe * v, Elf_Shdr * sechdrs, * contents of the program (p)buffer performing relocatations/etc, free's it * when finished. */ -static int vpe_elfload(struct vpe * v) +static int vpe_elfload(struct vpe *v) { Elf_Ehdr *hdr; Elf_Shdr *sechdrs; long err = 0; char *secstrings, *strtab = NULL; unsigned int len, i, symindex = 0, strindex = 0, relocate = 0; - struct module mod; // so we can re-use the relocations code + struct module mod; /* so we can re-use the relocations code */ memset(&mod, 0, sizeof(struct module)); strcpy(mod.name, "VPE loader"); @@ -845,8 +594,7 @@ static int vpe_elfload(struct vpe * v) || (hdr->e_type != ET_REL && hdr->e_type != ET_EXEC) || !elf_check_arch(hdr) || hdr->e_shentsize != sizeof(*sechdrs)) { - printk(KERN_WARNING - "VPE loader: program wrong arch or weird elf version\n"); + pr_warn("VPE loader: program wrong arch or weird elf version\n"); return -ENOEXEC; } @@ -855,8 +603,7 @@ static int vpe_elfload(struct vpe * v) relocate = 1; if (len < hdr->e_shoff + hdr->e_shnum * sizeof(Elf_Shdr)) { - printk(KERN_ERR "VPE loader: program length %u truncated\n", - len); + pr_err("VPE loader: program length %u truncated\n", len); return -ENOEXEC; } @@ -871,22 +618,24 @@ static int vpe_elfload(struct vpe * v) if (relocate) { for (i = 1; i < hdr->e_shnum; i++) { - if (sechdrs[i].sh_type != SHT_NOBITS - && len < sechdrs[i].sh_offset + sechdrs[i].sh_size) { - printk(KERN_ERR "VPE program length %u truncated\n", + if ((sechdrs[i].sh_type != SHT_NOBITS) && + (len < sechdrs[i].sh_offset + sechdrs[i].sh_size)) { + pr_err("VPE program length %u truncated\n", len); return -ENOEXEC; } /* Mark all sections sh_addr with their address in the temporary image. */ - sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset; + sechdrs[i].sh_addr = (size_t) hdr + + sechdrs[i].sh_offset; /* Internal symbols and strings. */ if (sechdrs[i].sh_type == SHT_SYMTAB) { symindex = i; strindex = sechdrs[i].sh_link; - strtab = (char *)hdr + sechdrs[strindex].sh_offset; + strtab = (char *)hdr + + sechdrs[strindex].sh_offset; } } layout_sections(&mod, hdr, sechdrs, secstrings); @@ -913,8 +662,9 @@ static int vpe_elfload(struct vpe * v) /* Update sh_addr to point to copy in image. */ sechdrs[i].sh_addr = (unsigned long)dest; - printk(KERN_DEBUG " section sh_name %s sh_addr 0x%x\n", - secstrings + sechdrs[i].sh_name, sechdrs[i].sh_addr); + pr_debug(" section sh_name %s sh_addr 0x%x\n", + secstrings + sechdrs[i].sh_name, + sechdrs[i].sh_addr); } /* Fix up syms, so that st_value is a pointer to location. */ @@ -935,17 +685,18 @@ static int vpe_elfload(struct vpe * v) continue; if (sechdrs[i].sh_type == SHT_REL) - err = apply_relocations(sechdrs, strtab, symindex, i, - &mod); + err = apply_relocations(sechdrs, strtab, + symindex, i, &mod); else if (sechdrs[i].sh_type == SHT_RELA) - err = apply_relocate_add(sechdrs, strtab, symindex, i, - &mod); + err = apply_relocate_add(sechdrs, strtab, + symindex, i, &mod); if (err < 0) return err; } } else { - struct elf_phdr *phdr = (struct elf_phdr *) ((char *)hdr + hdr->e_phoff); + struct elf_phdr *phdr = (struct elf_phdr *) + ((char *)hdr + hdr->e_phoff); for (i = 0; i < hdr->e_phnum; i++) { if (phdr->p_type == PT_LOAD) { @@ -963,11 +714,15 @@ static int vpe_elfload(struct vpe * v) if (sechdrs[i].sh_type == SHT_SYMTAB) { symindex = i; strindex = sechdrs[i].sh_link; - strtab = (char *)hdr + sechdrs[strindex].sh_offset; + strtab = (char *)hdr + + sechdrs[strindex].sh_offset; - /* mark the symtab's address for when we try to find the - magic symbols */ - sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset; + /* + * mark symtab's address for when we try + * to find the magic symbols + */ + sechdrs[i].sh_addr = (size_t) hdr + + sechdrs[i].sh_offset; } } } @@ -978,53 +733,19 @@ static int vpe_elfload(struct vpe * v) if ((find_vpe_symbols(v, sechdrs, symindex, strtab, &mod)) < 0) { if (v->__start == 0) { - printk(KERN_WARNING "VPE loader: program does not contain " - "a __start symbol\n"); + pr_warn("VPE loader: program does not contain a __start symbol\n"); return -ENOEXEC; } if (v->shared_ptr == NULL) - printk(KERN_WARNING "VPE loader: " - "program does not contain vpe_shared symbol.\n" - " Unable to use AMVP (AP/SP) facilities.\n"); + pr_warn("VPE loader: program does not contain vpe_shared symbol.\n" + " Unable to use AMVP (AP/SP) facilities.\n"); } - printk(" elf loaded\n"); + pr_info(" elf loaded\n"); return 0; } -static void cleanup_tc(struct tc *tc) -{ - unsigned long flags; - unsigned int mtflags, vpflags; - int tmp; - - local_irq_save(flags); - mtflags = dmt(); - vpflags = dvpe(); - /* Put MVPE's into 'configuration state' */ - set_c0_mvpcontrol(MVPCONTROL_VPC); - - settc(tc->index); - tmp = read_tc_c0_tcstatus(); - - /* mark not allocated and not dynamically allocatable */ - tmp &= ~(TCSTATUS_A | TCSTATUS_DA); - tmp |= TCSTATUS_IXMT; /* interrupt exempt */ - write_tc_c0_tcstatus(tmp); - - write_tc_c0_tchalt(TCHALT_H); - mips_ihb(); - - /* bind it to anything other than VPE1 */ -// write_tc_c0_tcbind(read_tc_c0_tcbind() & ~TCBIND_CURVPE); // | TCBIND_CURVPE - - clear_c0_mvpcontrol(MVPCONTROL_VPC); - evpe(vpflags); - emt(mtflags); - local_irq_restore(flags); -} - static int getcwd(char *buff, int size) { mm_segment_t old_fs; @@ -1044,52 +765,49 @@ static int getcwd(char *buff, int size) static int vpe_open(struct inode *inode, struct file *filp) { enum vpe_state state; - struct vpe_notifications *not; + struct vpe_notifications *notifier; struct vpe *v; int ret; - if (minor != iminor(inode)) { + if (VPE_MODULE_MINOR != iminor(inode)) { /* assume only 1 device at the moment. */ - pr_warning("VPE loader: only vpe1 is supported\n"); + pr_warn("VPE loader: only vpe1 is supported\n"); return -ENODEV; } - if ((v = get_vpe(tclimit)) == NULL) { - pr_warning("VPE loader: unable to get vpe\n"); + v = get_vpe(aprp_cpu_index()); + if (v == NULL) { + pr_warn("VPE loader: unable to get vpe\n"); return -ENODEV; } state = xchg(&v->state, VPE_STATE_INUSE); if (state != VPE_STATE_UNUSED) { - printk(KERN_DEBUG "VPE loader: tc in use dumping regs\n"); + pr_debug("VPE loader: tc in use dumping regs\n"); - list_for_each_entry(not, &v->notify, list) { - not->stop(tclimit); - } + list_for_each_entry(notifier, &v->notify, list) + notifier->stop(aprp_cpu_index()); release_progmem(v->load_addr); - cleanup_tc(get_tc(tclimit)); + cleanup_tc(get_tc(aprp_cpu_index())); } /* this of-course trashes what was there before... */ v->pbuffer = vmalloc(P_SIZE); if (!v->pbuffer) { - pr_warning("VPE loader: unable to allocate memory\n"); + pr_warn("VPE loader: unable to allocate memory\n"); return -ENOMEM; } v->plen = P_SIZE; v->load_addr = NULL; v->len = 0; - v->uid = filp->f_cred->fsuid; - v->gid = filp->f_cred->fsgid; - v->cwd[0] = 0; ret = getcwd(v->cwd, VPE_PATH_MAX); if (ret < 0) - printk(KERN_WARNING "VPE loader: open, getcwd returned %d\n", ret); + pr_warn("VPE loader: open, getcwd returned %d\n", ret); v->shared_ptr = NULL; v->__start = 0; @@ -1103,20 +821,20 @@ static int vpe_release(struct inode *inode, struct file *filp) Elf_Ehdr *hdr; int ret = 0; - v = get_vpe(tclimit); + v = get_vpe(aprp_cpu_index()); if (v == NULL) return -ENODEV; hdr = (Elf_Ehdr *) v->pbuffer; if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) == 0) { - if (vpe_elfload(v) >= 0) { + if ((vpe_elfload(v) >= 0) && vpe_run) { vpe_run(v); } else { - printk(KERN_WARNING "VPE loader: ELF load failed.\n"); + pr_warn("VPE loader: ELF load failed.\n"); ret = -ENOEXEC; } } else { - printk(KERN_WARNING "VPE loader: only elf files are supported\n"); + pr_warn("VPE loader: only elf files are supported\n"); ret = -ENOEXEC; } @@ -1134,22 +852,22 @@ static int vpe_release(struct inode *inode, struct file *filp) return ret; } -static ssize_t vpe_write(struct file *file, const char __user * buffer, - size_t count, loff_t * ppos) +static ssize_t vpe_write(struct file *file, const char __user *buffer, + size_t count, loff_t *ppos) { size_t ret = count; struct vpe *v; - if (iminor(file_inode(file)) != minor) + if (iminor(file_inode(file)) != VPE_MODULE_MINOR) return -ENODEV; - v = get_vpe(tclimit); + v = get_vpe(aprp_cpu_index()); + if (v == NULL) return -ENODEV; if ((count + v->len) > v->plen) { - printk(KERN_WARNING - "VPE loader: elf size too big. Perhaps strip uneeded symbols\n"); + pr_warn("VPE loader: elf size too big. Perhaps strip uneeded symbols\n"); return -ENOMEM; } @@ -1161,7 +879,7 @@ static ssize_t vpe_write(struct file *file, const char __user * buffer, return ret; } -static const struct file_operations vpe_fops = { +const struct file_operations vpe_fops = { .owner = THIS_MODULE, .open = vpe_open, .release = vpe_release, @@ -1169,420 +887,40 @@ static const struct file_operations vpe_fops = { .llseek = noop_llseek, }; -/* module wrapper entry points */ -/* give me a vpe */ -vpe_handle vpe_alloc(void) -{ - int i; - struct vpe *v; - - /* find a vpe */ - for (i = 1; i < MAX_VPES; i++) { - if ((v = get_vpe(i)) != NULL) { - v->state = VPE_STATE_INUSE; - return v; - } - } - return NULL; -} - -EXPORT_SYMBOL(vpe_alloc); - -/* start running from here */ -int vpe_start(vpe_handle vpe, unsigned long start) -{ - struct vpe *v = vpe; - - v->__start = start; - return vpe_run(v); -} - -EXPORT_SYMBOL(vpe_start); - -/* halt it for now */ -int vpe_stop(vpe_handle vpe) -{ - struct vpe *v = vpe; - struct tc *t; - unsigned int evpe_flags; - - evpe_flags = dvpe(); - - if ((t = list_entry(v->tc.next, struct tc, tc)) != NULL) { - - settc(t->index); - write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA); - } - - evpe(evpe_flags); - - return 0; -} - -EXPORT_SYMBOL(vpe_stop); - -/* I've done with it thank you */ -int vpe_free(vpe_handle vpe) -{ - struct vpe *v = vpe; - struct tc *t; - unsigned int evpe_flags; - - if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) { - return -ENOEXEC; - } - - evpe_flags = dvpe(); - - /* Put MVPE's into 'configuration state' */ - set_c0_mvpcontrol(MVPCONTROL_VPC); - - settc(t->index); - write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA); - - /* halt the TC */ - write_tc_c0_tchalt(TCHALT_H); - mips_ihb(); - - /* mark the TC unallocated */ - write_tc_c0_tcstatus(read_tc_c0_tcstatus() & ~TCSTATUS_A); - - v->state = VPE_STATE_UNUSED; - - clear_c0_mvpcontrol(MVPCONTROL_VPC); - evpe(evpe_flags); - - return 0; -} - -EXPORT_SYMBOL(vpe_free); - void *vpe_get_shared(int index) { - struct vpe *v; + struct vpe *v = get_vpe(index); - if ((v = get_vpe(index)) == NULL) + if (v == NULL) return NULL; return v->shared_ptr; } - EXPORT_SYMBOL(vpe_get_shared); -int vpe_getuid(int index) -{ - struct vpe *v; - - if ((v = get_vpe(index)) == NULL) - return -1; - - return v->uid; -} - -EXPORT_SYMBOL(vpe_getuid); - -int vpe_getgid(int index) -{ - struct vpe *v; - - if ((v = get_vpe(index)) == NULL) - return -1; - - return v->gid; -} - -EXPORT_SYMBOL(vpe_getgid); - int vpe_notify(int index, struct vpe_notifications *notify) { - struct vpe *v; + struct vpe *v = get_vpe(index); - if ((v = get_vpe(index)) == NULL) + if (v == NULL) return -1; list_add(¬ify->list, &v->notify); return 0; } - EXPORT_SYMBOL(vpe_notify); char *vpe_getcwd(int index) { - struct vpe *v; + struct vpe *v = get_vpe(index); - if ((v = get_vpe(index)) == NULL) + if (v == NULL) return NULL; return v->cwd; } - EXPORT_SYMBOL(vpe_getcwd); -static ssize_t store_kill(struct device *dev, struct device_attribute *attr, - const char *buf, size_t len) -{ - struct vpe *vpe = get_vpe(tclimit); - struct vpe_notifications *not; - - list_for_each_entry(not, &vpe->notify, list) { - not->stop(tclimit); - } - - release_progmem(vpe->load_addr); - cleanup_tc(get_tc(tclimit)); - vpe_stop(vpe); - vpe_free(vpe); - - return len; -} -static DEVICE_ATTR(kill, S_IWUSR, NULL, store_kill); - -static ssize_t ntcs_show(struct device *cd, struct device_attribute *attr, - char *buf) -{ - struct vpe *vpe = get_vpe(tclimit); - - return sprintf(buf, "%d\n", vpe->ntcs); -} - -static ssize_t ntcs_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t len) -{ - struct vpe *vpe = get_vpe(tclimit); - unsigned long new; - char *endp; - - new = simple_strtoul(buf, &endp, 0); - if (endp == buf) - goto out_einval; - - if (new == 0 || new > (hw_tcs - tclimit)) - goto out_einval; - - vpe->ntcs = new; - - return len; - -out_einval: - return -EINVAL; -} -static DEVICE_ATTR_RW(ntcs); - -static struct attribute *vpe_attrs[] = { - &dev_attr_kill.attr, - &dev_attr_ntcs.attr, - NULL, -}; -ATTRIBUTE_GROUPS(vpe); - -static void vpe_device_release(struct device *cd) -{ - kfree(cd); -} - -struct class vpe_class = { - .name = "vpe", - .owner = THIS_MODULE, - .dev_release = vpe_device_release, - .dev_groups = vpe_groups, -}; - -struct device vpe_device; - -static int __init vpe_module_init(void) -{ - unsigned int mtflags, vpflags; - unsigned long flags, val; - struct vpe *v = NULL; - struct tc *t; - int tc, err; - - if (!cpu_has_mipsmt) { - printk("VPE loader: not a MIPS MT capable processor\n"); - return -ENODEV; - } - - if (vpelimit == 0) { - printk(KERN_WARNING "No VPEs reserved for AP/SP, not " - "initializing VPE loader.\nPass maxvpes=<n> argument as " - "kernel argument\n"); - - return -ENODEV; - } - - if (tclimit == 0) { - printk(KERN_WARNING "No TCs reserved for AP/SP, not " - "initializing VPE loader.\nPass maxtcs=<n> argument as " - "kernel argument\n"); - - return -ENODEV; - } - - major = register_chrdev(0, module_name, &vpe_fops); - if (major < 0) { - printk("VPE loader: unable to register character device\n"); - return major; - } - - err = class_register(&vpe_class); - if (err) { - printk(KERN_ERR "vpe_class registration failed\n"); - goto out_chrdev; - } - - device_initialize(&vpe_device); - vpe_device.class = &vpe_class, - vpe_device.parent = NULL, - dev_set_name(&vpe_device, "vpe1"); - vpe_device.devt = MKDEV(major, minor); - err = device_add(&vpe_device); - if (err) { - printk(KERN_ERR "Adding vpe_device failed\n"); - goto out_class; - } - - local_irq_save(flags); - mtflags = dmt(); - vpflags = dvpe(); - - /* Put MVPE's into 'configuration state' */ - set_c0_mvpcontrol(MVPCONTROL_VPC); - - /* dump_mtregs(); */ - - val = read_c0_mvpconf0(); - hw_tcs = (val & MVPCONF0_PTC) + 1; - hw_vpes = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; - - for (tc = tclimit; tc < hw_tcs; tc++) { - /* - * Must re-enable multithreading temporarily or in case we - * reschedule send IPIs or similar we might hang. - */ - clear_c0_mvpcontrol(MVPCONTROL_VPC); - evpe(vpflags); - emt(mtflags); - local_irq_restore(flags); - t = alloc_tc(tc); - if (!t) { - err = -ENOMEM; - goto out; - } - - local_irq_save(flags); - mtflags = dmt(); - vpflags = dvpe(); - set_c0_mvpcontrol(MVPCONTROL_VPC); - - /* VPE's */ - if (tc < hw_tcs) { - settc(tc); - - if ((v = alloc_vpe(tc)) == NULL) { - printk(KERN_WARNING "VPE: unable to allocate VPE\n"); - - goto out_reenable; - } - - v->ntcs = hw_tcs - tclimit; - - /* add the tc to the list of this vpe's tc's. */ - list_add(&t->tc, &v->tc); - - /* deactivate all but vpe0 */ - if (tc >= tclimit) { - unsigned long tmp = read_vpe_c0_vpeconf0(); - - tmp &= ~VPECONF0_VPA; - - /* master VPE */ - tmp |= VPECONF0_MVP; - write_vpe_c0_vpeconf0(tmp); - } - - /* disable multi-threading with TC's */ - write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE); - - if (tc >= vpelimit) { - /* - * Set config to be the same as vpe0, - * particularly kseg0 coherency alg - */ - write_vpe_c0_config(read_c0_config()); - } - } - - /* TC's */ - t->pvpe = v; /* set the parent vpe */ - - if (tc >= tclimit) { - unsigned long tmp; - - settc(tc); - - /* Any TC that is bound to VPE0 gets left as is - in case - we are running SMTC on VPE0. A TC that is bound to any - other VPE gets bound to VPE0, ideally I'd like to make - it homeless but it doesn't appear to let me bind a TC - to a non-existent VPE. Which is perfectly reasonable. - - The (un)bound state is visible to an EJTAG probe so may - notify GDB... - */ - - if (((tmp = read_tc_c0_tcbind()) & TCBIND_CURVPE)) { - /* tc is bound >vpe0 */ - write_tc_c0_tcbind(tmp & ~TCBIND_CURVPE); - - t->pvpe = get_vpe(0); /* set the parent vpe */ - } - - /* halt the TC */ - write_tc_c0_tchalt(TCHALT_H); - mips_ihb(); - - tmp = read_tc_c0_tcstatus(); - - /* mark not activated and not dynamically allocatable */ - tmp &= ~(TCSTATUS_A | TCSTATUS_DA); - tmp |= TCSTATUS_IXMT; /* interrupt exempt */ - write_tc_c0_tcstatus(tmp); - } - } - -out_reenable: - /* release config state */ - clear_c0_mvpcontrol(MVPCONTROL_VPC); - - evpe(vpflags); - emt(mtflags); - local_irq_restore(flags); - - return 0; - -out_class: - class_unregister(&vpe_class); -out_chrdev: - unregister_chrdev(major, module_name); - -out: - return err; -} - -static void __exit vpe_module_exit(void) -{ - struct vpe *v, *n; - - device_del(&vpe_device); - unregister_chrdev(major, module_name); - - /* No locking needed here */ - list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) { - if (v->state != VPE_STATE_UNUSED) - release_vpe(v); - } -} - module_init(vpe_module_init); module_exit(vpe_module_exit); MODULE_DESCRIPTION("MIPS VPE Loader"); |