diff options
Diffstat (limited to 'arch/mips/kernel')
73 files changed, 1539 insertions, 1430 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 7c96282bff2e..95a1e674fd67 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -3,9 +3,9 @@ # Makefile for the Linux/MIPS kernel. # -extra-y := head.o vmlinux.lds +always-$(KBUILD_BUILTIN) := vmlinux.lds -obj-y += branch.o cmpxchg.o elf.o entry.o genex.o idle.o irq.o \ +obj-y += head.o branch.o cmpxchg.o elf.o entry.o genex.o idle.o irq.o \ process.o prom.o ptrace.o reset.o setup.o signal.o \ syscall.o time.o topology.o traps.o unaligned.o watch.o \ vdso.o cacheinfo.o @@ -58,16 +58,13 @@ obj-$(CONFIG_CPU_BMIPS) += smp-bmips.o bmips_vec.o bmips_5xxx_init.o obj-$(CONFIG_MIPS_MT) += mips-mt.o obj-$(CONFIG_MIPS_MT_FPAFF) += mips-mt-fpaff.o obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o -obj-$(CONFIG_MIPS_CMP) += smp-cmp.o obj-$(CONFIG_MIPS_CPS) += smp-cps.o cps-vec.o obj-$(CONFIG_MIPS_CPS_NS16550) += cps-vec-ns16550.o obj-$(CONFIG_MIPS_SPRAM) += spram.o obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o -obj-$(CONFIG_MIPS_VPE_LOADER_CMP) += vpe-cmp.o obj-$(CONFIG_MIPS_VPE_LOADER_MT) += vpe-mt.o obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o -obj-$(CONFIG_MIPS_VPE_APSP_API_CMP) += rtlx-cmp.o obj-$(CONFIG_MIPS_VPE_APSP_API_MT) += rtlx-mt.o obj-$(CONFIG_MIPS_MSC) += irq-msc01.o @@ -93,7 +90,7 @@ obj-$(CONFIG_GPIO_TXX9) += gpio_txx9.o obj-$(CONFIG_RELOCATABLE) += relocate.o -obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o +obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o crash.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_EARLY_PRINTK_8250) += early_printk_8250.o diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index 04ca75278f02..1e29efcba46e 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -23,9 +23,16 @@ #include <linux/kvm_host.h> +void output_ptreg_defines(void); void output_ptreg_defines(void) { COMMENT("MIPS pt_regs offsets."); +#ifdef CONFIG_32BIT + OFFSET(PT_ARG4, pt_regs, args[4]); + OFFSET(PT_ARG5, pt_regs, args[5]); + OFFSET(PT_ARG6, pt_regs, args[6]); + OFFSET(PT_ARG7, pt_regs, args[7]); +#endif OFFSET(PT_R0, pt_regs, regs[0]); OFFSET(PT_R1, pt_regs, regs[1]); OFFSET(PT_R2, pt_regs, regs[2]); @@ -75,6 +82,7 @@ void output_ptreg_defines(void) BLANK(); } +void output_task_defines(void); void output_task_defines(void) { COMMENT("MIPS task_struct offsets."); @@ -89,6 +97,7 @@ void output_task_defines(void) BLANK(); } +void output_thread_info_defines(void); void output_thread_info_defines(void) { COMMENT("MIPS thread_info offsets."); @@ -98,6 +107,7 @@ void output_thread_info_defines(void) OFFSET(TI_CPU, thread_info, cpu); OFFSET(TI_PRE_COUNT, thread_info, preempt_count); OFFSET(TI_REGS, thread_info, regs); + OFFSET(TI_SYSCALL, thread_info, syscall); DEFINE(_THREAD_SIZE, THREAD_SIZE); DEFINE(_THREAD_MASK, THREAD_MASK); DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE); @@ -105,6 +115,7 @@ void output_thread_info_defines(void) BLANK(); } +void output_thread_defines(void); void output_thread_defines(void) { COMMENT("MIPS specific thread_struct offsets."); @@ -133,6 +144,7 @@ void output_thread_defines(void) } #ifdef CONFIG_MIPS_FP_SUPPORT +void output_thread_fpu_defines(void); void output_thread_fpu_defines(void) { OFFSET(THREAD_FPU, task_struct, thread.fpu); @@ -176,6 +188,7 @@ void output_thread_fpu_defines(void) } #endif +void output_mm_defines(void); void output_mm_defines(void) { COMMENT("Size of struct page"); @@ -196,11 +209,6 @@ void output_mm_defines(void) #endif DEFINE(_PTE_T_LOG2, PTE_T_LOG2); BLANK(); - DEFINE(_PGD_ORDER, PGD_ORDER); -#ifndef __PAGETABLE_PMD_FOLDED - DEFINE(_PMD_ORDER, PMD_ORDER); -#endif - DEFINE(_PTE_ORDER, PTE_ORDER); BLANK(); DEFINE(_PMD_SHIFT, PMD_SHIFT); DEFINE(_PGDIR_SHIFT, PGDIR_SHIFT); @@ -215,6 +223,7 @@ void output_mm_defines(void) } #ifdef CONFIG_32BIT +void output_sc_defines(void); void output_sc_defines(void) { COMMENT("Linux sigcontext offsets."); @@ -237,6 +246,7 @@ void output_sc_defines(void) #endif #ifdef CONFIG_64BIT +void output_sc_defines(void); void output_sc_defines(void) { COMMENT("Linux sigcontext offsets."); @@ -250,6 +260,7 @@ void output_sc_defines(void) } #endif +void output_signal_defined(void); void output_signal_defined(void) { COMMENT("Linux signal numbers."); @@ -289,6 +300,7 @@ void output_signal_defined(void) } #ifdef CONFIG_CPU_CAVIUM_OCTEON +void output_octeon_cop2_state_defines(void); void output_octeon_cop2_state_defines(void) { COMMENT("Octeon specific octeon_cop2_state offsets."); @@ -311,12 +323,16 @@ void output_octeon_cop2_state_defines(void) OFFSET(OCTEON_CP2_HSH_IVW, octeon_cop2_state, cop2_hsh_ivw); OFFSET(OCTEON_CP2_SHA3, octeon_cop2_state, cop2_sha3); OFFSET(THREAD_CP2, task_struct, thread.cp2); +#if defined(CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE) && \ + CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0 OFFSET(THREAD_CVMSEG, task_struct, thread.cvmseg.cvmseg); +#endif BLANK(); } #endif #ifdef CONFIG_HIBERNATION +void output_pbe_defines(void); void output_pbe_defines(void) { COMMENT(" Linux struct pbe offsets. "); @@ -329,6 +345,7 @@ void output_pbe_defines(void) #endif #ifdef CONFIG_CPU_PM +void output_pm_defines(void); void output_pm_defines(void) { COMMENT(" PM offsets. "); @@ -343,6 +360,7 @@ void output_pm_defines(void) #endif #ifdef CONFIG_MIPS_FP_SUPPORT +void output_kvm_defines(void); void output_kvm_defines(void) { COMMENT(" KVM/MIPS Specific offsets. "); @@ -387,10 +405,14 @@ void output_kvm_defines(void) #endif #ifdef CONFIG_MIPS_CPS +void output_cps_defines(void); void output_cps_defines(void) { COMMENT(" MIPS CPS offsets. "); + OFFSET(CLUSTERBOOTCFG_CORECONFIG, cluster_boot_config, core_config); + DEFINE(CLUSTERBOOTCFG_SIZE, sizeof(struct cluster_boot_config)); + OFFSET(COREBOOTCFG_VPEMASK, core_boot_config, vpe_mask); OFFSET(COREBOOTCFG_VPECONFIG, core_boot_config, vpe_config); DEFINE(COREBOOTCFG_SIZE, sizeof(struct core_boot_config)); diff --git a/arch/mips/kernel/cevt-bcm1480.c b/arch/mips/kernel/cevt-bcm1480.c index d39a2963b451..2a14dc4ee57e 100644 --- a/arch/mips/kernel/cevt-bcm1480.c +++ b/arch/mips/kernel/cevt-bcm1480.c @@ -103,7 +103,7 @@ void sb1480_clockevent_init(void) BUG_ON(cpu > 3); /* Only have 4 general purpose timers */ - sprintf(name, "bcm1480-counter-%d", cpu); + sprintf(name, "bcm1480-counter-%u", cpu); cd->name = name; cd->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; diff --git a/arch/mips/kernel/cevt-ds1287.c b/arch/mips/kernel/cevt-ds1287.c index 9a47fbcd4638..de64d6bb7ba3 100644 --- a/arch/mips/kernel/cevt-ds1287.c +++ b/arch/mips/kernel/cevt-ds1287.c @@ -10,6 +10,7 @@ #include <linux/mc146818rtc.h> #include <linux/irq.h> +#include <asm/ds1287.h> #include <asm/time.h> int ds1287_timer_state(void) diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c index 32ec67c9ab67..5f6e9e2ebbdb 100644 --- a/arch/mips/kernel/cevt-r4k.c +++ b/arch/mips/kernel/cevt-r4k.c @@ -200,7 +200,7 @@ int c0_compare_int_usable(void) */ if (c0_compare_int_pending()) { cnt = read_c0_count(); - write_c0_compare(cnt); + write_c0_compare(cnt - 1); back_to_back_c0_hazard(); while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS)) if (!c0_compare_int_pending()) @@ -228,7 +228,7 @@ int c0_compare_int_usable(void) if (!c0_compare_int_pending()) return 0; cnt = read_c0_count(); - write_c0_compare(cnt); + write_c0_compare(cnt - 1); back_to_back_c0_hazard(); while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS)) if (!c0_compare_int_pending()) @@ -303,13 +303,6 @@ int r4k_clockevent_init(void) if (!c0_compare_int_usable()) return -ENXIO; - /* - * With vectored interrupts things are getting platform specific. - * get_c0_compare_int is a hook to allow a platform to return the - * interrupt number of its liking. - */ - irq = get_c0_compare_int(); - cd = &per_cpu(mips_clockevent_device, cpu); cd->name = "MIPS"; @@ -320,7 +313,6 @@ int r4k_clockevent_init(void) min_delta = calculate_min_delta(); cd->rating = 300; - cd->irq = irq; cd->cpumask = cpumask_of(cpu); cd->set_next_event = mips_next_event; cd->event_handler = mips_event_handler; @@ -332,6 +324,13 @@ int r4k_clockevent_init(void) cp0_timer_irq_installed = 1; + /* + * With vectored interrupts things are getting platform specific. + * get_c0_compare_int is a hook to allow a platform to return the + * interrupt number of its liking. + */ + irq = get_c0_compare_int(); + if (request_irq(irq, c0_compare_interrupt, flags, "timer", c0_compare_interrupt)) pr_err("Failed to request irq %d (timer)\n", irq); diff --git a/arch/mips/kernel/cmpxchg.c b/arch/mips/kernel/cmpxchg.c index e974a4954df8..c371def2302d 100644 --- a/arch/mips/kernel/cmpxchg.c +++ b/arch/mips/kernel/cmpxchg.c @@ -102,3 +102,4 @@ unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old, return old; } } +EXPORT_SYMBOL(__cmpxchg_small); diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S index 975343240148..2ae7034a3d5c 100644 --- a/arch/mips/kernel/cps-vec.S +++ b/arch/mips/kernel/cps-vec.S @@ -4,6 +4,7 @@ * Author: Paul Burton <paul.burton@mips.com> */ +#include <linux/init.h> #include <asm/addrspace.h> #include <asm/asm.h> #include <asm/asm-offsets.h> @@ -13,10 +14,15 @@ #include <asm/mipsregs.h> #include <asm/mipsmtregs.h> #include <asm/pm.h> +#include <asm/smp-cps.h> #define GCR_CPC_BASE_OFS 0x0088 #define GCR_CL_COHERENCE_OFS 0x2008 #define GCR_CL_ID_OFS 0x2028 +#define CM3_GCR_Cx_ID_CLUSTER_SHF 8 +#define CM3_GCR_Cx_ID_CLUSTER_MSK (0xff << 8) +#define CM3_GCR_Cx_ID_CORENUM_SHF 0 +#define CM3_GCR_Cx_ID_CORENUM_MSK (0xff << 0) #define CPC_CL_VC_STOP_OFS 0x2020 #define CPC_CL_VC_RUN_OFS 0x2028 @@ -80,49 +86,16 @@ nop .endm - /* Calculate an uncached address for the CM GCRs */ - .macro cmgcrb dest - .set push - .set noat - MFC0 $1, CP0_CMGCRBASE - PTR_SLL $1, $1, 4 - PTR_LI \dest, UNCAC_BASE - PTR_ADDU \dest, \dest, $1 - .set pop - .endm - -.balign 0x1000 - -LEAF(mips_cps_core_entry) - /* - * These first 4 bytes will be patched by cps_smp_setup to load the - * CCA to use into register s0. - */ - .word 0 - - /* Check whether we're here due to an NMI */ - mfc0 k0, CP0_STATUS - and k0, k0, ST0_NMI - beqz k0, not_nmi - nop - - /* This is an NMI */ - PTR_LA k0, nmi_handler - jr k0 - nop - -not_nmi: - /* Setup Cause */ - li t0, CAUSEF_IV - mtc0 t0, CP0_CAUSE - /* Setup Status */ - li t0, ST0_CU1 | ST0_CU0 | ST0_BEV | STATUS_BITDEPS - mtc0 t0, CP0_STATUS +LEAF(mips_cps_core_boot) + /* Save CCA and GCR base */ + move s0, a0 + move s1, a1 + /* We don't know how to do coherence setup on earlier ISA */ +#if MIPS_ISA_REV > 0 /* Skip cache & coherence setup if we're already coherent */ - cmgcrb v1 - lw s7, GCR_CL_COHERENCE_OFS(v1) + lw s7, GCR_CL_COHERENCE_OFS(s1) bnez s7, 1f nop @@ -132,8 +105,9 @@ not_nmi: /* Enter the coherent domain */ li t0, 0xff - sw t0, GCR_CL_COHERENCE_OFS(v1) + sw t0, GCR_CL_COHERENCE_OFS(s1) ehb +#endif /* MIPS_ISA_REV > 0 */ /* Set Kseg0 CCA to that in s0 */ 1: mfc0 t0, CP0_CONFIG @@ -180,49 +154,45 @@ not_nmi: PTR_L sp, VPEBOOTCFG_SP(v1) jr t1 nop - END(mips_cps_core_entry) + END(mips_cps_core_boot) -.org 0x200 + __INIT LEAF(excep_tlbfill) DUMP_EXCEP("TLB Fill") b . nop END(excep_tlbfill) -.org 0x280 LEAF(excep_xtlbfill) DUMP_EXCEP("XTLB Fill") b . nop END(excep_xtlbfill) -.org 0x300 LEAF(excep_cache) DUMP_EXCEP("Cache") b . nop END(excep_cache) -.org 0x380 LEAF(excep_genex) DUMP_EXCEP("General") b . nop END(excep_genex) -.org 0x400 LEAF(excep_intex) DUMP_EXCEP("Interrupt") b . nop END(excep_intex) -.org 0x480 LEAF(excep_ejtag) PTR_LA k0, ejtag_debug_handler jr k0 nop END(excep_ejtag) + __FINIT LEAF(mips_cps_core_init) #ifdef CONFIG_MIPS_MT_SMP @@ -305,13 +275,21 @@ LEAF(mips_cps_core_init) */ LEAF(mips_cps_get_bootcfg) /* Calculate a pointer to this cores struct core_boot_config */ - cmgcrb t0 - lw t0, GCR_CL_ID_OFS(t0) + PTR_LA v0, mips_cps_cluster_bootcfg + PTR_L v0, 0(v0) + lw t0, GCR_CL_ID_OFS(s1) +#ifdef CONFIG_CPU_MIPSR6 + ext t1, t0, CM3_GCR_Cx_ID_CLUSTER_SHF, 8 + li t2, CLUSTERBOOTCFG_SIZE + mul t1, t1, t2 + PTR_ADDU \ + v0, v0, t1 +#endif + PTR_L v0, CLUSTERBOOTCFG_CORECONFIG(v0) + andi t0, t0, CM3_GCR_Cx_ID_CORENUM_MSK li t1, COREBOOTCFG_SIZE mul t0, t0, t1 - PTR_LA t1, mips_cps_core_bootcfg - PTR_L t1, 0(t1) - PTR_ADDU v0, t0, t1 + PTR_ADDU v0, v0, t0 /* Calculate this VPEs ID. If the core doesn't support MT use 0 */ li t9, 0 @@ -366,8 +344,9 @@ LEAF(mips_cps_boot_vpes) has_vp t0, 5f /* Find base address of CPC */ - cmgcrb t3 - PTR_L t1, GCR_CPC_BASE_OFS(t3) + PTR_LA t1, mips_gcr_base + PTR_L t1, 0(t1) + PTR_L t1, GCR_CPC_BASE_OFS(t1) PTR_LI t2, ~0x7fff and t1, t1, t2 PTR_LI t2, UNCAC_BASE @@ -430,7 +409,7 @@ LEAF(mips_cps_boot_vpes) /* Calculate a pointer to the VPEs struct vpe_boot_config */ li t0, VPEBOOTCFG_SIZE mul t0, t0, ta1 - addu t0, t0, ta3 + PTR_ADDU t0, t0, ta3 /* Set the TC restart PC */ lw t1, VPEBOOTCFG_PC(t0) @@ -520,6 +499,7 @@ LEAF(mips_cps_boot_vpes) nop END(mips_cps_boot_vpes) +#if MIPS_ISA_REV > 0 LEAF(mips_cps_cache_init) /* * Clear the bits used to index the caches. Note that the architecture @@ -593,6 +573,7 @@ dcache_done: jr ra nop END(mips_cps_cache_init) +#endif /* MIPS_ISA_REV > 0 */ #if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM) @@ -603,10 +584,10 @@ dcache_done: lw $1, TI_CPU(gp) sll $1, $1, LONGLOG PTR_LA \dest, __per_cpu_offset - addu $1, $1, \dest + PTR_ADDU $1, $1, \dest lw $1, 0($1) PTR_LA \dest, cps_cpu_state - addu \dest, \dest, $1 + PTR_ADDU \dest, \dest, $1 .set pop .endm diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index d510f628ee03..04dc9ab55524 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -9,6 +9,7 @@ */ #include <linux/init.h> #include <linux/kernel.h> +#include <linux/mmu_context.h> #include <linux/ptrace.h> #include <linux/smp.h> #include <linux/stddef.h> @@ -37,6 +38,8 @@ unsigned int elf_hwcap __read_mostly; EXPORT_SYMBOL_GPL(elf_hwcap); +static bool mmid_disabled_quirk; + static inline unsigned long cpu_get_msa_id(void) { unsigned long status, msa_id; @@ -179,7 +182,6 @@ void __init check_bugs32(void) static inline int cpu_has_confreg(void) { #ifdef CONFIG_CPU_R3000 - extern unsigned long r3k_cache_size(unsigned long); unsigned long size1, size2; unsigned long cfg = read_c0_conf(); @@ -646,7 +648,7 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c) config5 &= ~(MIPS_CONF5_UFR | MIPS_CONF5_UFE); if (cpu_has_mips_r6) { - if (!__builtin_constant_p(cpu_has_mmid) || cpu_has_mmid) + if (!mmid_disabled_quirk && (!__builtin_constant_p(cpu_has_mmid) || cpu_has_mmid)) config5 |= MIPS_CONF5_MI; else config5 &= ~MIPS_CONF5_MI; @@ -709,7 +711,6 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c) max_mmid_width); asid_mask = GENMASK(max_mmid_width - 1, 0); } - set_cpu_asid_mask(c, asid_mask); } } @@ -1115,46 +1116,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) MIPS_CPU_LLSC; c->tlbsize = 48; break; - case PRID_IMP_VR41XX: - set_isa(c, MIPS_CPU_ISA_III); - c->fpu_msk31 |= FPU_CSR_CONDX; - c->options = R4K_OPTS; - c->tlbsize = 32; - switch (c->processor_id & 0xf0) { - case PRID_REV_VR4111: - c->cputype = CPU_VR4111; - __cpu_name[cpu] = "NEC VR4111"; - break; - case PRID_REV_VR4121: - c->cputype = CPU_VR4121; - __cpu_name[cpu] = "NEC VR4121"; - break; - case PRID_REV_VR4122: - if ((c->processor_id & 0xf) < 0x3) { - c->cputype = CPU_VR4122; - __cpu_name[cpu] = "NEC VR4122"; - } else { - c->cputype = CPU_VR4181A; - __cpu_name[cpu] = "NEC VR4181A"; - } - break; - case PRID_REV_VR4130: - if ((c->processor_id & 0xf) < 0x4) { - c->cputype = CPU_VR4131; - __cpu_name[cpu] = "NEC VR4131"; - } else { - c->cputype = CPU_VR4133; - c->options |= MIPS_CPU_LLSC; - __cpu_name[cpu] = "NEC VR4133"; - } - break; - default: - printk(KERN_INFO "Unexpected CPU of NEC VR4100 series\n"); - c->cputype = CPU_VR41XX; - __cpu_name[cpu] = "NEC Vr41xx"; - break; - } - break; case PRID_IMP_R4300: c->cputype = CPU_R4300; __cpu_name[cpu] = "R4300"; @@ -1179,7 +1140,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) * This processor doesn't have an MMU, so it's not * "real easy" to run Linux on it. It is left purely * for documentation. Commented out because it shares - * it's c0_prid id number with the TX3900. + * its c0_prid id number with the TX3900. */ c->cputype = CPU_R4650; __cpu_name[cpu] = "R4650"; @@ -1542,6 +1503,10 @@ static inline void cpu_probe_alchemy(struct cpuinfo_mips *c, unsigned int cpu) break; } break; + case PRID_IMP_NETLOGIC_AU13XX: + c->cputype = CPU_ALCHEMY; + __cpu_name[cpu] = "Au1300"; + break; } } @@ -1642,6 +1607,8 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu) static inline void cpu_probe_cavium(struct cpuinfo_mips *c, unsigned int cpu) { decode_configs(c); + /* Octeon has different cache interface */ + c->options &= ~MIPS_CPU_4K_CACHE; switch (c->processor_id & PRID_IMP_MASK) { case PRID_IMP_CAVIUM_CN38XX: case PRID_IMP_CAVIUM_CN31XX: @@ -1711,7 +1678,10 @@ static inline void decode_cpucfg(struct cpuinfo_mips *c) static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) { + c->cputype = CPU_LOONGSON64; + /* All Loongson processors covered here define ExcCode 16 as GSExc. */ + decode_configs(c); c->options |= MIPS_CPU_GSEXCEX; switch (c->processor_id & PRID_IMP_MASK) { @@ -1721,7 +1691,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) case PRID_REV_LOONGSON2K_R1_1: case PRID_REV_LOONGSON2K_R1_2: case PRID_REV_LOONGSON2K_R1_3: - c->cputype = CPU_LOONGSON64; __cpu_name[cpu] = "Loongson-2K"; set_elf_platform(cpu, "gs264e"); set_isa(c, MIPS_CPU_ISA_M64R2); @@ -1734,14 +1703,12 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) switch (c->processor_id & PRID_REV_MASK) { case PRID_REV_LOONGSON3A_R2_0: case PRID_REV_LOONGSON3A_R2_1: - c->cputype = CPU_LOONGSON64; __cpu_name[cpu] = "ICT Loongson-3"; set_elf_platform(cpu, "loongson3a"); set_isa(c, MIPS_CPU_ISA_M64R2); break; case PRID_REV_LOONGSON3A_R3_0: case PRID_REV_LOONGSON3A_R3_1: - c->cputype = CPU_LOONGSON64; __cpu_name[cpu] = "ICT Loongson-3"; set_elf_platform(cpu, "loongson3a"); set_isa(c, MIPS_CPU_ISA_M64R2); @@ -1759,20 +1726,21 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM | MIPS_ASE_LOONGSON_EXT | MIPS_ASE_LOONGSON_EXT2); c->ases &= ~MIPS_ASE_VZ; /* VZ of Loongson-3A2000/3000 is incomplete */ + change_c0_config6(LOONGSON_CONF6_EXTIMER | LOONGSON_CONF6_INTIMER, + LOONGSON_CONF6_INTIMER); break; case PRID_IMP_LOONGSON_64G: - c->cputype = CPU_LOONGSON64; __cpu_name[cpu] = "ICT Loongson-3"; set_elf_platform(cpu, "loongson3a"); set_isa(c, MIPS_CPU_ISA_M64R2); decode_cpucfg(c); + change_c0_config6(LOONGSON_CONF6_EXTIMER | LOONGSON_CONF6_INTIMER, + LOONGSON_CONF6_INTIMER); break; default: panic("Unknown Loongson Processor ID!"); break; } - - decode_configs(c); } #else static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) { } @@ -1901,6 +1869,7 @@ void cpu_probe(void) cpu_probe_mips(c, cpu); break; case PRID_COMP_ALCHEMY: + case PRID_COMP_NETLOGIC: cpu_probe_alchemy(c, cpu); break; case PRID_COMP_SIBYTE: @@ -2079,3 +2048,39 @@ void cpu_set_vpe_id(struct cpuinfo_mips *cpuinfo, unsigned int vpe) cpuinfo->globalnumber &= ~MIPS_GLOBALNUMBER_VP; cpuinfo->globalnumber |= vpe << MIPS_GLOBALNUMBER_VP_SHF; } + +void cpu_disable_mmid(void) +{ + int i; + unsigned long asid_mask; + unsigned int cpu = smp_processor_id(); + struct cpuinfo_mips *c = ¤t_cpu_data; + unsigned int config4 = read_c0_config4(); + unsigned int config5 = read_c0_config5(); + + /* Setup the initial ASID mask based on config4 */ + asid_mask = MIPS_ENTRYHI_ASID; + if (config4 & MIPS_CONF4_AE) + asid_mask |= MIPS_ENTRYHI_ASIDX; + set_cpu_asid_mask(c, asid_mask); + + /* Disable MMID in the C0 and update cpuinfo_mips accordingly */ + config5 &= ~(MIPS_CONF5_UFR | MIPS_CONF5_UFE); + config5 &= ~MIPS_CONF5_MI; + write_c0_config5(config5); + /* Ensure the write to config5 above takes effect */ + back_to_back_c0_hazard(); + c->options &= ~MIPS_CPU_MMID; + + /* Setup asid cache value cleared in per_cpu_trap_init() */ + cpu_data[cpu].asid_cache = asid_first_version(cpu); + + /* Reinit context for each CPU */ + for_each_possible_cpu(i) + set_cpu_context(i, &init_mm, 0); + + /* Ensure that now MMID will be seen as disable */ + mmid_disabled_quirk = true; + + pr_info("MMID support disabled due to hardware support issue\n"); +} diff --git a/arch/mips/kernel/cpu-r3k-probe.c b/arch/mips/kernel/cpu-r3k-probe.c index be93469c0e0e..0c826f729f75 100644 --- a/arch/mips/kernel/cpu-r3k-probe.c +++ b/arch/mips/kernel/cpu-r3k-probe.c @@ -42,7 +42,6 @@ void __init check_bugs32(void) static inline int cpu_has_confreg(void) { #ifdef CONFIG_CPU_R3000 - extern unsigned long r3k_cache_size(unsigned long); unsigned long size1, size2; unsigned long cfg = read_c0_conf(); diff --git a/arch/mips/kernel/csrc-r4k.c b/arch/mips/kernel/csrc-r4k.c index edc4afc080fa..59eca397f297 100644 --- a/arch/mips/kernel/csrc-r4k.c +++ b/arch/mips/kernel/csrc-r4k.c @@ -66,6 +66,18 @@ static bool rdhwr_count_usable(void) return false; } +static inline __init bool count_can_be_sched_clock(void) +{ + if (IS_ENABLED(CONFIG_CPU_FREQ)) + return false; + + if (num_possible_cpus() > 1 && + !IS_ENABLED(CONFIG_HAVE_UNSTABLE_SCHED_CLOCK)) + return false; + + return true; +} + #ifdef CONFIG_CPU_FREQ static bool __read_mostly r4k_clock_unstable; @@ -111,7 +123,8 @@ int __init init_r4k_clocksource(void) return -ENXIO; /* Calculate a somewhat reasonable rating value */ - clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000; + clocksource_mips.rating = 200; + clocksource_mips.rating += clamp(mips_hpt_frequency / 10000000, 0, 99); /* * R2 onwards makes the count accessible to user mode so it can be used @@ -122,9 +135,8 @@ int __init init_r4k_clocksource(void) clocksource_register_hz(&clocksource_mips, mips_hpt_frequency); -#ifndef CONFIG_CPU_FREQ - sched_clock_register(r4k_read_sched_clock, 32, mips_hpt_frequency); -#endif + if (count_can_be_sched_clock()) + sched_clock_register(r4k_read_sched_clock, 32, mips_hpt_frequency); return 0; } diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c index 5582a4ca1e9e..f0e7fe85a42a 100644 --- a/arch/mips/kernel/elf.c +++ b/arch/mips/kernel/elf.c @@ -11,6 +11,7 @@ #include <asm/cpu-features.h> #include <asm/cpu-info.h> +#include <asm/fpu.h> #ifdef CONFIG_MIPS_FP_SUPPORT @@ -309,9 +310,18 @@ void mips_set_personality_nan(struct arch_elf_state *state) struct cpuinfo_mips *c = &boot_cpu_data; struct task_struct *t = current; + /* Do this early so t->thread.fpu.fcr31 won't be clobbered in case + * we are preempted before the lose_fpu(0) in start_thread. + */ + lose_fpu(0); + t->thread.fpu.fcr31 = c->fpu_csr31; switch (state->nan_2008) { case 0: + if (!(c->fpu_msk31 & FPU_CSR_NAN2008)) + t->thread.fpu.fcr31 &= ~FPU_CSR_NAN2008; + if (!(c->fpu_msk31 & FPU_CSR_ABS2008)) + t->thread.fpu.fcr31 &= ~FPU_CSR_ABS2008; break; case 1: if (!(c->fpu_msk31 & FPU_CSR_NAN2008)) diff --git a/arch/mips/kernel/fpu-probe.c b/arch/mips/kernel/fpu-probe.c index e689d6a83234..6bf3f19b1c33 100644 --- a/arch/mips/kernel/fpu-probe.c +++ b/arch/mips/kernel/fpu-probe.c @@ -144,7 +144,7 @@ static void cpu_set_fpu_2008(struct cpuinfo_mips *c) * IEEE 754 conformance mode to use. Affects the NaN encoding and the * ABS.fmt/NEG.fmt execution mode. */ -static enum { STRICT, LEGACY, STD2008, RELAXED } ieee754 = STRICT; +static enum { STRICT, EMULATED, LEGACY, STD2008, RELAXED } ieee754 = STRICT; /* * Set the IEEE 754 NaN encodings and the ABS.fmt/NEG.fmt execution modes @@ -160,6 +160,7 @@ static void cpu_set_nofpu_2008(struct cpuinfo_mips *c) switch (ieee754) { case STRICT: + case EMULATED: if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 | MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5 | @@ -204,6 +205,10 @@ static void cpu_set_nan_2008(struct cpuinfo_mips *c) mips_use_nan_legacy = !cpu_has_nan_2008; mips_use_nan_2008 = !!cpu_has_nan_2008; break; + case EMULATED: + /* Pretend ABS2008/NAN2008 options are dynamic */ + c->fpu_msk31 &= ~(FPU_CSR_NAN2008 | FPU_CSR_ABS2008); + fallthrough; case RELAXED: mips_use_nan_legacy = true; mips_use_nan_2008 = true; @@ -226,6 +231,8 @@ static int __init ieee754_setup(char *s) return -1; else if (!strcmp(s, "strict")) ieee754 = STRICT; + else if (!strcmp(s, "emulated")) + ieee754 = EMULATED; else if (!strcmp(s, "legacy")) ieee754 = LEGACY; else if (!strcmp(s, "2008")) diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index 8c401e42301c..f39e85fd58fa 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c @@ -248,7 +248,7 @@ int ftrace_disable_ftrace_graph_caller(void) #define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */ #define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */ -unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long +static unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long old_parent_ra, unsigned long parent_ra_addr, unsigned long fp) { unsigned long sp, ip, tmp; diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index 3425df6019c0..08c0a01d9a29 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S @@ -104,48 +104,59 @@ handle_vcei: __FINIT - .align 5 /* 32 byte rollback region */ -LEAF(__r4k_wait) - .set push - .set noreorder - /* start of rollback region */ - LONG_L t0, TI_FLAGS($28) - nop - andi t0, _TIF_NEED_RESCHED - bnez t0, 1f - nop - nop - nop -#ifdef CONFIG_CPU_MICROMIPS - nop - nop - nop - nop -#endif + .section .cpuidle.text,"ax" + /* Align to 32 bytes for the maximum idle interrupt region size. */ + .align 5 +LEAF(r4k_wait) + /* Keep the ISA bit clear for calculations on local labels here. */ +0: .fill 0 + /* Start of idle interrupt region. */ + local_irq_enable + /* + * If an interrupt lands here, before going idle on the next + * instruction, we must *NOT* go idle since the interrupt could + * have set TIF_NEED_RESCHED or caused a timer to need resched. + * Fall through -- see skipover_handler below -- and have the + * idle loop take care of things. + */ +1: .fill 0 + /* The R2 EI/EHB sequence takes 8 bytes, otherwise pad up. */ + .if 1b - 0b > 32 + .error "overlong idle interrupt region" + .elseif 1b - 0b > 8 + .align 4 + .endif +2: .fill 0 + .equ r4k_wait_idle_size, 2b - 0b + /* End of idle interrupt region; size has to be a power of 2. */ .set MIPS_ISA_ARCH_LEVEL_RAW +r4k_wait_insn: wait - /* end of rollback region (the region size must be power of two) */ -1: +r4k_wait_exit: + .set mips0 + local_irq_disable jr ra - nop - .set pop - END(__r4k_wait) + END(r4k_wait) + .previous - .macro BUILD_ROLLBACK_PROLOGUE handler - FEXPORT(rollback_\handler) + .macro BUILD_SKIPOVER_PROLOGUE handler + FEXPORT(skipover_\handler) .set push .set noat MFC0 k0, CP0_EPC - PTR_LA k1, __r4k_wait - ori k0, 0x1f /* 32 byte rollback region */ - xori k0, 0x1f + /* Subtract/add 2 to let the ISA bit propagate through the mask. */ + PTR_LA k1, r4k_wait_insn - 2 + ori k0, r4k_wait_idle_size - 2 + .set noreorder bne k0, k1, \handler + PTR_ADDIU k0, r4k_wait_exit - r4k_wait_insn + 2 + .set reorder MTC0 k0, CP0_EPC .set pop .endm .align 5 -BUILD_ROLLBACK_PROLOGUE handle_int +BUILD_SKIPOVER_PROLOGUE handle_int NESTED(handle_int, PT_SIZE, sp) .cfi_signal_frame #ifdef CONFIG_TRACE_IRQFLAGS @@ -265,25 +276,24 @@ NESTED(except_vec_ejtag_debug, 0, sp) * This prototype is copied to ebase + n*IntCtl.VS and patched * to invoke the handler */ -BUILD_ROLLBACK_PROLOGUE except_vec_vi +BUILD_SKIPOVER_PROLOGUE except_vec_vi NESTED(except_vec_vi, 0, sp) SAVE_SOME docfi=1 SAVE_AT docfi=1 .set push .set noreorder PTR_LA v1, except_vec_vi_handler -FEXPORT(except_vec_vi_lui) - lui v0, 0 /* Patched */ jr v1 FEXPORT(except_vec_vi_ori) - ori v0, 0 /* Patched */ + ori v0, zero, 0 /* Offset in vi_handlers[] */ .set pop END(except_vec_vi) EXPORT(except_vec_vi_end) /* * Common Vectored Interrupt code - * Complete the register saves and invoke the handler which is passed in $v0 + * Complete the register saves and invoke the handler, $v0 holds + * offset into vi_handlers[] */ NESTED(except_vec_vi_handler, 0, sp) SAVE_TEMP @@ -331,6 +341,7 @@ NESTED(except_vec_vi_handler, 0, sp) /* Save task's sp on IRQ stack so that unwinding can follow it */ LONG_S s1, 0(sp) 2: + PTR_L v0, vi_handlers(v0) jalr v0 /* Restore sp */ @@ -480,7 +491,7 @@ NESTED(nmi_handler, PT_SIZE, sp) .set push /* gas fails to assemble cfc1 for some archs (octeon).*/ \ .set mips1 - SET_HARDFLOAT + .set hardfloat cfc1 a1, fcr31 .set pop .endm diff --git a/arch/mips/kernel/gpio_txx9.c b/arch/mips/kernel/gpio_txx9.c index 8c083612df9d..96ac40d20c23 100644 --- a/arch/mips/kernel/gpio_txx9.c +++ b/arch/mips/kernel/gpio_txx9.c @@ -32,14 +32,16 @@ static void txx9_gpio_set_raw(unsigned int offset, int value) __raw_writel(val, &txx9_pioptr->dout); } -static void txx9_gpio_set(struct gpio_chip *chip, unsigned int offset, - int value) +static int txx9_gpio_set(struct gpio_chip *chip, unsigned int offset, + int value) { unsigned long flags; spin_lock_irqsave(&txx9_gpio_lock, flags); txx9_gpio_set_raw(offset, value); mmiowb(); spin_unlock_irqrestore(&txx9_gpio_lock, flags); + + return 0; } static int txx9_gpio_dir_in(struct gpio_chip *chip, unsigned int offset) diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S index b825ed4476c7..d99ed58b7043 100644 --- a/arch/mips/kernel/head.S +++ b/arch/mips/kernel/head.S @@ -59,6 +59,8 @@ #endif .endm + __HEAD + #ifndef CONFIG_NO_EXCEPT_FILL /* * Reserved space for exception handlers. diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c index 53adcc1b2ed5..80e8a04a642e 100644 --- a/arch/mips/kernel/idle.c +++ b/arch/mips/kernel/idle.c @@ -33,13 +33,6 @@ static void __cpuidle r3081_wait(void) { unsigned long cfg = read_c0_conf(); write_c0_conf(cfg | R30XX_CONF_HALT); - raw_local_irq_enable(); -} - -void __cpuidle r4k_wait(void) -{ - raw_local_irq_enable(); - __r4k_wait(); } /* @@ -57,7 +50,6 @@ void __cpuidle r4k_wait_irqoff(void) " .set arch=r4000 \n" " wait \n" " .set pop \n"); - raw_local_irq_enable(); } /* @@ -77,7 +69,6 @@ static void __cpuidle rm7k_wait_irqoff(void) " wait \n" " mtc0 $1, $12 # stalls until W stage \n" " .set pop \n"); - raw_local_irq_enable(); } /* @@ -103,6 +94,8 @@ static void __cpuidle au1k_wait(void) " nop \n" " .set pop \n" : : "r" (au1k_wait), "r" (c0status)); + + raw_local_irq_disable(); } static int __initdata nowait; @@ -241,18 +234,16 @@ void __init check_wait(void) } } -void arch_cpu_idle(void) +__cpuidle void arch_cpu_idle(void) { if (cpu_wait) cpu_wait(); - else - raw_local_irq_enable(); } #ifdef CONFIG_CPU_IDLE -int mips_cpuidle_wait_enter(struct cpuidle_device *dev, - struct cpuidle_driver *drv, int index) +__cpuidle int mips_cpuidle_wait_enter(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) { arch_cpu_idle(); return index; diff --git a/arch/mips/kernel/jump_label.c b/arch/mips/kernel/jump_label.c index 71a882c8c6eb..f7978d50a2ba 100644 --- a/arch/mips/kernel/jump_label.c +++ b/arch/mips/kernel/jump_label.c @@ -56,7 +56,7 @@ void arch_jump_label_transform(struct jump_entry *e, * The branch offset must fit in the instruction's 26 * bit field. */ - WARN_ON((offset >= BIT(25)) || + WARN_ON((offset >= (long)BIT(25)) || (offset < -(long)BIT(25))); insn.j_format.opcode = bc6_op; diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c index 316b27d0d2fb..dc39f5b3fb83 100644 --- a/arch/mips/kernel/kprobes.c +++ b/arch/mips/kernel/kprobes.c @@ -55,7 +55,7 @@ NOKPROBE_SYMBOL(insn_has_delayslot); * one; putting breakpoint on top of atomic ll/sc pair is bad idea; * so we need to prevent it and refuse kprobes insertion for such * instructions; cannot do much about breakpoint in the middle of - * ll/sc pair; it is upto user to avoid those places + * ll/sc pair; it is up to user to avoid those places */ static int insn_has_ll_or_sc(union mips_instruction insn) { diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c index 6b61be486303..a0c0a7a654e9 100644 --- a/arch/mips/kernel/linux32.c +++ b/arch/mips/kernel/linux32.c @@ -42,6 +42,7 @@ #include <linux/uaccess.h> #include <asm/mmu_context.h> #include <asm/mman.h> +#include <asm/syscalls.h> #ifdef __MIPSEB__ #define merge_64(r1, r2) ((((r1) & 0xffffffffUL) << 32) + ((r2) & 0xffffffffUL)) diff --git a/arch/mips/kernel/machine_kexec.c b/arch/mips/kernel/machine_kexec.c index 432bfd3e7f22..4e3579bbd620 100644 --- a/arch/mips/kernel/machine_kexec.c +++ b/arch/mips/kernel/machine_kexec.c @@ -8,6 +8,7 @@ #include <linux/mm.h> #include <linux/delay.h> #include <linux/libfdt.h> +#include <linux/reboot.h> #include <asm/cacheflush.h> #include <asm/page.h> diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S index cff52b283e03..fcec579f64e9 100644 --- a/arch/mips/kernel/mcount.S +++ b/arch/mips/kernel/mcount.S @@ -10,7 +10,7 @@ * Author: Wu Zhangjin <wuzhangjin@gmail.com> */ -#include <asm/export.h> +#include <linux/export.h> #include <asm/regdef.h> #include <asm/stackframe.h> #include <asm/ftrace.h> diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c index b4f7d950c846..7c9c5dc38823 100644 --- a/arch/mips/kernel/mips-cm.c +++ b/arch/mips/kernel/mips-cm.c @@ -5,15 +5,18 @@ */ #include <linux/errno.h> +#include <linux/of.h> #include <linux/percpu.h> #include <linux/spinlock.h> #include <asm/mips-cps.h> +#include <asm/smp-cps.h> #include <asm/mipsregs.h> void __iomem *mips_gcr_base; void __iomem *mips_cm_l2sync_base; int mips_cm_is64; +bool mips_cm_is_l2_hci_broken; static char *cm2_tr[8] = { "mem", "gcr", "gic", "mmio", @@ -179,13 +182,18 @@ static char *cm3_causes[32] = { static DEFINE_PER_CPU_ALIGNED(spinlock_t, cm_core_lock); static DEFINE_PER_CPU_ALIGNED(unsigned long, cm_core_lock_flags); -phys_addr_t __mips_cm_phys_base(void) +phys_addr_t __weak mips_cm_phys_base(void) { - u32 config3 = read_c0_config3(); unsigned long cmgcr; /* Check the CMGCRBase register is implemented */ - if (!(config3 & MIPS_CONF3_CMGCR)) + if (!(read_c0_config() & MIPS_CONF_M)) + return 0; + + if (!(read_c0_config2() & MIPS_CONF_M)) + return 0; + + if (!(read_c0_config3() & MIPS_CONF3_CMGCR)) return 0; /* Read the address from CMGCRBase */ @@ -193,10 +201,7 @@ phys_addr_t __mips_cm_phys_base(void) return (cmgcr & MIPS_CMGCRF_BASE) << (36 - 32); } -phys_addr_t mips_cm_phys_base(void) - __attribute__((weak, alias("__mips_cm_phys_base"))); - -phys_addr_t __mips_cm_l2sync_phys_base(void) +phys_addr_t __weak mips_cm_l2sync_phys_base(void) { u32 base_reg; @@ -212,9 +217,6 @@ phys_addr_t __mips_cm_l2sync_phys_base(void) return mips_cm_phys_base() + MIPS_CM_GCR_SIZE; } -phys_addr_t mips_cm_l2sync_phys_base(void) - __attribute__((weak, alias("__mips_cm_l2sync_phys_base"))); - static void mips_cm_probe_l2sync(void) { unsigned major_rev; @@ -238,6 +240,23 @@ static void mips_cm_probe_l2sync(void) mips_cm_l2sync_base = ioremap(addr, MIPS_CM_L2SYNC_SIZE); } +void mips_cm_update_property(void) +{ + struct device_node *cm_node; + + cm_node = of_find_compatible_node(of_root, NULL, "mobileye,eyeq6-cm"); + if (!cm_node) + return; + pr_info("HCI (Hardware Cache Init for the L2 cache) in GCR_L2_RAM_CONFIG from the CM3 is broken"); + mips_cm_is_l2_hci_broken = true; + + /* Disable MMID only if it was configured */ + if (cpu_has_mmid) + cpu_disable_mmid(); + + of_node_put(cm_node); +} + int mips_cm_probe(void) { phys_addr_t addr; @@ -309,7 +328,9 @@ void mips_cm_lock_other(unsigned int cluster, unsigned int core, FIELD_PREP(CM3_GCR_Cx_OTHER_VP, vp); if (cm_rev >= CM_REV_CM3_5) { - val |= CM_GCR_Cx_OTHER_CLUSTER_EN; + if (cluster != cpu_cluster(¤t_cpu_data)) + val |= CM_GCR_Cx_OTHER_CLUSTER_EN; + val |= CM_GCR_Cx_OTHER_GIC_EN; val |= FIELD_PREP(CM_GCR_Cx_OTHER_CLUSTER, cluster); val |= FIELD_PREP(CM_GCR_Cx_OTHER_BLOCK, block); } else { @@ -513,3 +534,24 @@ void mips_cm_error_report(void) /* reprime cause register */ write_gcr_error_cause(cm_error); } + +unsigned int mips_cps_first_online_in_cluster(int *first_cpu) +{ + unsigned int local_cl = cpu_cluster(¤t_cpu_data); + struct cpumask *local_cl_mask; + + /* + * mips_cps_cluster_bootcfg is allocated in cps_prepare_cpus. If it is + * not yet done, then we are so early that only one CPU is running, so + * it is the first online CPU in the cluster. + */ + if (IS_ENABLED(CONFIG_MIPS_CPS) && mips_cps_cluster_bootcfg) + local_cl_mask = &mips_cps_cluster_bootcfg[local_cl].cpumask; + else + return true; + + *first_cpu = cpumask_any_and_but(local_cl_mask, + cpu_online_mask, + smp_processor_id()); + return (*first_cpu >= nr_cpu_ids); +} diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c index 67e130d3f038..10172fc4f627 100644 --- a/arch/mips/kernel/mips-mt-fpaff.c +++ b/arch/mips/kernel/mips-mt-fpaff.c @@ -15,6 +15,7 @@ #include <linux/security.h> #include <linux/types.h> #include <linux/uaccess.h> +#include <asm/syscalls.h> /* * CPU mask used to set process affinity for MT VPEs/TCs with FPUs diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c index d5f7362e8c24..2ef610650a9e 100644 --- a/arch/mips/kernel/mips-mt.c +++ b/arch/mips/kernel/mips-mt.c @@ -19,6 +19,7 @@ #include <asm/mipsmtregs.h> #include <asm/r4kcache.h> #include <asm/cacheflush.h> +#include <asm/mips_mt.h> int vpelimit; @@ -42,83 +43,6 @@ static int __init maxtcs(char *str) __setup("maxtcs=", maxtcs); -/* - * Dump new MIPS MT state for the core. Does not leave TCs halted. - * Takes an argument which taken to be a pre-call MVPControl value. - */ - -void mips_mt_regdump(unsigned long mvpctl) -{ - unsigned long flags; - unsigned long vpflags; - unsigned long mvpconf0; - int nvpe; - int ntc; - int i; - int tc; - unsigned long haltval; - unsigned long tcstatval; - - local_irq_save(flags); - vpflags = dvpe(); - printk("=== MIPS MT State Dump ===\n"); - printk("-- Global State --\n"); - printk(" MVPControl Passed: %08lx\n", mvpctl); - printk(" MVPControl Read: %08lx\n", vpflags); - printk(" MVPConf0 : %08lx\n", (mvpconf0 = read_c0_mvpconf0())); - nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; - ntc = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; - printk("-- per-VPE State --\n"); - for (i = 0; i < nvpe; i++) { - for (tc = 0; tc < ntc; tc++) { - settc(tc); - if ((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) { - printk(" VPE %d\n", i); - printk(" VPEControl : %08lx\n", - read_vpe_c0_vpecontrol()); - printk(" VPEConf0 : %08lx\n", - read_vpe_c0_vpeconf0()); - printk(" VPE%d.Status : %08lx\n", - i, read_vpe_c0_status()); - printk(" VPE%d.EPC : %08lx %pS\n", - i, read_vpe_c0_epc(), - (void *) read_vpe_c0_epc()); - printk(" VPE%d.Cause : %08lx\n", - i, read_vpe_c0_cause()); - printk(" VPE%d.Config7 : %08lx\n", - i, read_vpe_c0_config7()); - break; /* Next VPE */ - } - } - } - printk("-- per-TC State --\n"); - for (tc = 0; tc < ntc; tc++) { - settc(tc); - if (read_tc_c0_tcbind() == read_c0_tcbind()) { - /* Are we dumping ourself? */ - haltval = 0; /* Then we're not halted, and mustn't be */ - tcstatval = flags; /* And pre-dump TCStatus is flags */ - printk(" TC %d (current TC with VPE EPC above)\n", tc); - } else { - haltval = read_tc_c0_tchalt(); - write_tc_c0_tchalt(1); - tcstatval = read_tc_c0_tcstatus(); - printk(" TC %d\n", tc); - } - printk(" TCStatus : %08lx\n", tcstatval); - printk(" TCBind : %08lx\n", read_tc_c0_tcbind()); - printk(" TCRestart : %08lx %pS\n", - read_tc_c0_tcrestart(), (void *) read_tc_c0_tcrestart()); - printk(" TCHalt : %08lx\n", haltval); - printk(" TCContext : %08lx\n", read_tc_c0_tccontext()); - if (!haltval) - write_tc_c0_tchalt(0); - } - printk("===========================\n"); - evpe(vpflags); - local_irq_restore(flags); -} - static int mt_opt_rpsctl = -1; static int mt_opt_nblsu = -1; static int mt_opt_forceconfig7; @@ -198,9 +122,8 @@ void mips_mt_set_cpuoptions(void) unsigned long ectlval; unsigned long itcblkgrn; - /* ErrCtl register is known as "ecc" to Linux */ - ectlval = read_c0_ecc(); - write_c0_ecc(ectlval | (0x1 << 26)); + ectlval = read_c0_errctl(); + write_c0_errctl(ectlval | (0x1 << 26)); ehb(); #define INDEX_0 (0x80000000) #define INDEX_8 (0x80000008) @@ -221,26 +144,20 @@ void mips_mt_set_cpuoptions(void) ehb(); /* Write out to ITU with CACHE op */ cache_op(Index_Store_Tag_D, INDEX_0); - write_c0_ecc(ectlval); + write_c0_errctl(ectlval); ehb(); printk("Mapped %ld ITC cells starting at 0x%08x\n", ((itcblkgrn & 0x7fe00000) >> 20), itc_base); } } -struct class *mt_class; +const struct class mt_class = { + .name = "mt", +}; -static int __init mt_init(void) +static int __init mips_mt_init(void) { - struct class *mtc; - - mtc = class_create(THIS_MODULE, "mt"); - if (IS_ERR(mtc)) - return PTR_ERR(mtc); - - mt_class = mtc; - - return 0; + return class_register(&mt_class); } -subsys_initcall(mt_init); +subsys_initcall(mips_mt_init); diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c index 0c936cbf20c5..ba0f62d8eff5 100644 --- a/arch/mips/kernel/module.c +++ b/arch/mips/kernel/module.c @@ -13,15 +13,13 @@ #include <linux/elf.h> #include <linux/mm.h> #include <linux/numa.h> -#include <linux/vmalloc.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/jump_label.h> - -extern void jump_label_apply_nops(struct module *mod); +#include <asm/jump_label.h> struct mips_hi16 { struct mips_hi16 *next; @@ -32,15 +30,6 @@ struct mips_hi16 { static LIST_HEAD(dbe_list); static DEFINE_SPINLOCK(dbe_lock); -#ifdef MODULE_START -void *module_alloc(unsigned long size) -{ - return __vmalloc_node_range(size, 1, MODULE_START, MODULE_END, - GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE, - __builtin_return_address(0)); -} -#endif - static void apply_r_mips_32(u32 *location, u32 base, Elf_Addr v) { *location = base + v; diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S index 896080b445c2..447a3ea14aa1 100644 --- a/arch/mips/kernel/octeon_switch.S +++ b/arch/mips/kernel/octeon_switch.S @@ -11,7 +11,6 @@ * written by Carsten Langgaard, carstenl@mips.com */ #include <asm/asm.h> -#include <asm/export.h> #include <asm/asm-offsets.h> #include <asm/mipsregs.h> #include <asm/regdef.h> @@ -428,7 +427,6 @@ done_restore: jr ra nop .space 30 * 4, 0 -octeon_mult_save_end: EXPORT(octeon_mult_save_end) END(octeon_mult_save) @@ -448,7 +446,6 @@ octeon_mult_save_end: sd k0, PT_MPL+8(sp) /* PT_MPL+8 has MPL1 */ jr ra sd k1, PT_MPL+16(sp) /* PT_MPL+16 has MPL2 */ -octeon_mult_save2_end: EXPORT(octeon_mult_save2_end) END(octeon_mult_save2) @@ -480,7 +477,6 @@ octeon_mult_save2_end: sd $10, PT_MPL+(4*8)(sp) /* store MPL4 */ jr ra sd $11, PT_MPL+(5*8)(sp) /* store MPL5 */ -octeon_mult_save3_end: EXPORT(octeon_mult_save3_end) END(octeon_mult_save3) .set pop @@ -498,7 +494,6 @@ octeon_mult_save3_end: jr ra nop .space 30 * 4, 0 -octeon_mult_restore_end: EXPORT(octeon_mult_restore_end) END(octeon_mult_restore) @@ -517,7 +512,6 @@ octeon_mult_restore_end: mtp1 v0 /* P1 */ jr ra mtp0 v1 /* P0 */ -octeon_mult_restore2_end: EXPORT(octeon_mult_restore2_end) END(octeon_mult_restore2) @@ -548,7 +542,6 @@ octeon_mult_restore2_end: .word 0x714b000b /* mtp2 $10, $11 restore P2 and P5 */ -octeon_mult_restore3_end: EXPORT(octeon_mult_restore3_end) END(octeon_mult_restore3) .set pop diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index c4d6b09136b1..196a070349b0 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -791,8 +791,7 @@ static void handle_associated_event(struct cpu_hw_events *cpuc, if (!mipspmu_event_set_period(event, hwc, idx)) return; - if (perf_event_overflow(event, data, regs)) - mipsxx_pmu_disable_event(idx); + perf_event_overflow(event, data, regs); } diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c index 9bf60d7d44d3..3de0e05e0511 100644 --- a/arch/mips/kernel/pm-cps.c +++ b/arch/mips/kernel/pm-cps.c @@ -18,6 +18,7 @@ #include <asm/mipsmtregs.h> #include <asm/pm.h> #include <asm/pm-cps.h> +#include <asm/regdef.h> #include <asm/smp-cps.h> #include <asm/uasm.h> @@ -56,10 +57,7 @@ static DEFINE_PER_CPU_ALIGNED(u32*, ready_count); /* Indicates online CPUs coupled with the current CPU */ static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled); -/* - * Used to synchronize entry to deep idle states. Actually per-core rather - * than per-CPU. - */ +/* Used to synchronize entry to deep idle states */ static DEFINE_PER_CPU_ALIGNED(atomic_t, pm_barrier); /* Saved CPU state across the CPS_PM_POWER_GATED state */ @@ -69,13 +67,6 @@ DEFINE_PER_CPU_ALIGNED(struct mips_static_suspend_state, cps_cpu_state); static struct uasm_label labels[32]; static struct uasm_reloc relocs[32]; -enum mips_reg { - zero, at, v0, v1, a0, a1, a2, a3, - t0, t1, t2, t3, t4, t5, t6, t7, - s0, s1, s2, s3, s4, s5, s6, s7, - t8, t9, k0, k1, gp, sp, fp, ra, -}; - bool cps_pm_support_state(enum cps_pm_state state) { return test_bit(state, state_support); @@ -110,17 +101,20 @@ static void coupled_barrier(atomic_t *a, unsigned online) int cps_pm_enter_state(enum cps_pm_state state) { unsigned cpu = smp_processor_id(); + unsigned int cluster = cpu_cluster(¤t_cpu_data); unsigned core = cpu_core(¤t_cpu_data); unsigned online, left; cpumask_t *coupled_mask = this_cpu_ptr(&online_coupled); u32 *core_ready_count, *nc_core_ready_count; void *nc_addr; cps_nc_entry_fn entry; + struct cluster_boot_config *cluster_cfg; struct core_boot_config *core_cfg; struct vpe_boot_config *vpe_cfg; + atomic_t *barrier; /* Check that there is an entry function for this state */ - entry = per_cpu(nc_asm_enter, core)[state]; + entry = per_cpu(nc_asm_enter, cpu)[state]; if (!entry) return -EINVAL; @@ -144,7 +138,8 @@ int cps_pm_enter_state(enum cps_pm_state state) if (!mips_cps_smp_in_use()) return -EINVAL; - core_cfg = &mips_cps_core_bootcfg[core]; + cluster_cfg = &mips_cps_cluster_bootcfg[cluster]; + core_cfg = &cluster_cfg->core_config[core]; vpe_cfg = &core_cfg->vpe_config[cpu_vpe_id(¤t_cpu_data)]; vpe_cfg->pc = (unsigned long)mips_cps_pm_restore; vpe_cfg->gp = (unsigned long)current_thread_info(); @@ -156,7 +151,7 @@ int cps_pm_enter_state(enum cps_pm_state state) smp_mb__after_atomic(); /* Create a non-coherent mapping of the core ready_count */ - core_ready_count = per_cpu(ready_count, core); + core_ready_count = per_cpu(ready_count, cpu); nc_addr = kmap_noncoherent(virt_to_page(core_ready_count), (unsigned long)core_ready_count); nc_addr += ((unsigned long)core_ready_count & ~PAGE_MASK); @@ -164,7 +159,8 @@ int cps_pm_enter_state(enum cps_pm_state state) /* Ensure ready_count is zero-initialised before the assembly runs */ WRITE_ONCE(*nc_core_ready_count, 0); - coupled_barrier(&per_cpu(pm_barrier, core), online); + barrier = &per_cpu(pm_barrier, cpumask_first(&cpu_sibling_map[cpu])); + coupled_barrier(barrier, online); /* Run the generated entry code */ left = entry(online, nc_core_ready_count); @@ -203,13 +199,13 @@ static void cps_gen_cache_routine(u32 **pp, struct uasm_label **pl, return; /* Load base address */ - UASM_i_LA(pp, t0, (long)CKSEG0); + UASM_i_LA(pp, GPR_T0, (long)CKSEG0); /* Calculate end address */ if (cache_size < 0x8000) - uasm_i_addiu(pp, t1, t0, cache_size); + uasm_i_addiu(pp, GPR_T1, GPR_T0, cache_size); else - UASM_i_LA(pp, t1, (long)(CKSEG0 + cache_size)); + UASM_i_LA(pp, GPR_T1, (long)(CKSEG0 + cache_size)); /* Start of cache op loop */ uasm_build_label(pl, *pp, lbl); @@ -217,19 +213,19 @@ static void cps_gen_cache_routine(u32 **pp, struct uasm_label **pl, /* Generate the cache ops */ for (i = 0; i < unroll_lines; i++) { if (cpu_has_mips_r6) { - uasm_i_cache(pp, op, 0, t0); - uasm_i_addiu(pp, t0, t0, cache->linesz); + uasm_i_cache(pp, op, 0, GPR_T0); + uasm_i_addiu(pp, GPR_T0, GPR_T0, cache->linesz); } else { - uasm_i_cache(pp, op, i * cache->linesz, t0); + uasm_i_cache(pp, op, i * cache->linesz, GPR_T0); } } if (!cpu_has_mips_r6) /* Update the base address */ - uasm_i_addiu(pp, t0, t0, unroll_lines * cache->linesz); + uasm_i_addiu(pp, GPR_T0, GPR_T0, unroll_lines * cache->linesz); /* Loop if we haven't reached the end address yet */ - uasm_il_bne(pp, pr, t0, t1, lbl); + uasm_il_bne(pp, pr, GPR_T0, GPR_T1, lbl); uasm_i_nop(pp); } @@ -275,25 +271,25 @@ static int cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl, */ /* Preserve perf counter setup */ - uasm_i_mfc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */ - uasm_i_mfc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */ + uasm_i_mfc0(pp, GPR_T2, 25, (perf_counter * 2) + 0); /* PerfCtlN */ + uasm_i_mfc0(pp, GPR_T3, 25, (perf_counter * 2) + 1); /* PerfCntN */ /* Setup perf counter to count FSB full pipeline stalls */ - uasm_i_addiu(pp, t0, zero, (perf_event << 5) | 0xf); - uasm_i_mtc0(pp, t0, 25, (perf_counter * 2) + 0); /* PerfCtlN */ + uasm_i_addiu(pp, GPR_T0, GPR_ZERO, (perf_event << 5) | 0xf); + uasm_i_mtc0(pp, GPR_T0, 25, (perf_counter * 2) + 0); /* PerfCtlN */ uasm_i_ehb(pp); - uasm_i_mtc0(pp, zero, 25, (perf_counter * 2) + 1); /* PerfCntN */ + uasm_i_mtc0(pp, GPR_ZERO, 25, (perf_counter * 2) + 1); /* PerfCntN */ uasm_i_ehb(pp); /* Base address for loads */ - UASM_i_LA(pp, t0, (long)CKSEG0); + UASM_i_LA(pp, GPR_T0, (long)CKSEG0); /* Start of clear loop */ uasm_build_label(pl, *pp, lbl); /* Perform some loads to fill the FSB */ for (i = 0; i < num_loads; i++) - uasm_i_lw(pp, zero, i * line_size * line_stride, t0); + uasm_i_lw(pp, GPR_ZERO, i * line_size * line_stride, GPR_T0); /* * Invalidate the new D-cache entries so that the cache will need @@ -301,9 +297,9 @@ static int cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl, */ for (i = 0; i < num_loads; i++) { uasm_i_cache(pp, Hit_Invalidate_D, - i * line_size * line_stride, t0); + i * line_size * line_stride, GPR_T0); uasm_i_cache(pp, Hit_Writeback_Inv_SD, - i * line_size * line_stride, t0); + i * line_size * line_stride, GPR_T0); } /* Barrier ensuring previous cache invalidates are complete */ @@ -311,16 +307,16 @@ static int cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl, uasm_i_ehb(pp); /* Check whether the pipeline stalled due to the FSB being full */ - uasm_i_mfc0(pp, t1, 25, (perf_counter * 2) + 1); /* PerfCntN */ + uasm_i_mfc0(pp, GPR_T1, 25, (perf_counter * 2) + 1); /* PerfCntN */ /* Loop if it didn't */ - uasm_il_beqz(pp, pr, t1, lbl); + uasm_il_beqz(pp, pr, GPR_T1, lbl); uasm_i_nop(pp); /* Restore perf counter 1. The count may well now be wrong... */ - uasm_i_mtc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */ + uasm_i_mtc0(pp, GPR_T2, 25, (perf_counter * 2) + 0); /* PerfCtlN */ uasm_i_ehb(pp); - uasm_i_mtc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */ + uasm_i_mtc0(pp, GPR_T3, 25, (perf_counter * 2) + 1); /* PerfCntN */ uasm_i_ehb(pp); return 0; @@ -330,12 +326,12 @@ static void cps_gen_set_top_bit(u32 **pp, struct uasm_label **pl, struct uasm_reloc **pr, unsigned r_addr, int lbl) { - uasm_i_lui(pp, t0, uasm_rel_hi(0x80000000)); + uasm_i_lui(pp, GPR_T0, uasm_rel_hi(0x80000000)); uasm_build_label(pl, *pp, lbl); - uasm_i_ll(pp, t1, 0, r_addr); - uasm_i_or(pp, t1, t1, t0); - uasm_i_sc(pp, t1, 0, r_addr); - uasm_il_beqz(pp, pr, t1, lbl); + uasm_i_ll(pp, GPR_T1, 0, r_addr); + uasm_i_or(pp, GPR_T1, GPR_T1, GPR_T0); + uasm_i_sc(pp, GPR_T1, 0, r_addr); + uasm_il_beqz(pp, pr, GPR_T1, lbl); uasm_i_nop(pp); } @@ -344,9 +340,9 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) struct uasm_label *l = labels; struct uasm_reloc *r = relocs; u32 *buf, *p; - const unsigned r_online = a0; - const unsigned r_nc_count = a1; - const unsigned r_pcohctl = t7; + const unsigned r_online = GPR_A0; + const unsigned r_nc_count = GPR_A1; + const unsigned r_pcohctl = GPR_T8; const unsigned max_instrs = 256; unsigned cpc_cmd; int err; @@ -383,8 +379,8 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) * with the return address placed in v0 to avoid clobbering * the ra register before it is saved. */ - UASM_i_LA(&p, t0, (long)mips_cps_pm_save); - uasm_i_jalr(&p, v0, t0); + UASM_i_LA(&p, GPR_T0, (long)mips_cps_pm_save); + uasm_i_jalr(&p, GPR_V0, GPR_T0); uasm_i_nop(&p); } @@ -399,11 +395,11 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) /* Increment ready_count */ uasm_i_sync(&p, __SYNC_mb); uasm_build_label(&l, p, lbl_incready); - uasm_i_ll(&p, t1, 0, r_nc_count); - uasm_i_addiu(&p, t2, t1, 1); - uasm_i_sc(&p, t2, 0, r_nc_count); - uasm_il_beqz(&p, &r, t2, lbl_incready); - uasm_i_addiu(&p, t1, t1, 1); + uasm_i_ll(&p, GPR_T1, 0, r_nc_count); + uasm_i_addiu(&p, GPR_T2, GPR_T1, 1); + uasm_i_sc(&p, GPR_T2, 0, r_nc_count); + uasm_il_beqz(&p, &r, GPR_T2, lbl_incready); + uasm_i_addiu(&p, GPR_T1, GPR_T1, 1); /* Barrier ensuring all CPUs see the updated r_nc_count value */ uasm_i_sync(&p, __SYNC_mb); @@ -412,7 +408,7 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) * If this is the last VPE to become ready for non-coherence * then it should branch below. */ - uasm_il_beq(&p, &r, t1, r_online, lbl_disable_coherence); + uasm_il_beq(&p, &r, GPR_T1, r_online, lbl_disable_coherence); uasm_i_nop(&p); if (state < CPS_PM_POWER_GATED) { @@ -422,13 +418,13 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) * has been disabled before proceeding, which it will do * by polling for the top bit of ready_count being set. */ - uasm_i_addiu(&p, t1, zero, -1); + uasm_i_addiu(&p, GPR_T1, GPR_ZERO, -1); uasm_build_label(&l, p, lbl_poll_cont); - uasm_i_lw(&p, t0, 0, r_nc_count); - uasm_il_bltz(&p, &r, t0, lbl_secondary_cont); + uasm_i_lw(&p, GPR_T0, 0, r_nc_count); + uasm_il_bltz(&p, &r, GPR_T0, lbl_secondary_cont); uasm_i_ehb(&p); if (cpu_has_mipsmt) - uasm_i_yield(&p, zero, t1); + uasm_i_yield(&p, GPR_ZERO, GPR_T1); uasm_il_b(&p, &r, lbl_poll_cont); uasm_i_nop(&p); } else { @@ -438,16 +434,16 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) */ if (cpu_has_mipsmt) { /* Halt the VPE via C0 tchalt register */ - uasm_i_addiu(&p, t0, zero, TCHALT_H); - uasm_i_mtc0(&p, t0, 2, 4); + uasm_i_addiu(&p, GPR_T0, GPR_ZERO, TCHALT_H); + uasm_i_mtc0(&p, GPR_T0, 2, 4); } else if (cpu_has_vp) { /* Halt the VP via the CPC VP_STOP register */ unsigned int vpe_id; vpe_id = cpu_vpe_id(&cpu_data[cpu]); - uasm_i_addiu(&p, t0, zero, 1 << vpe_id); - UASM_i_LA(&p, t1, (long)addr_cpc_cl_vp_stop()); - uasm_i_sw(&p, t0, 0, t1); + uasm_i_addiu(&p, GPR_T0, GPR_ZERO, 1 << vpe_id); + UASM_i_LA(&p, GPR_T1, (long)addr_cpc_cl_vp_stop()); + uasm_i_sw(&p, GPR_T0, 0, GPR_T1); } else { BUG(); } @@ -482,9 +478,9 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) * defined by the interAptiv & proAptiv SUMs as ensuring that the * operation resulting from the preceding store is complete. */ - uasm_i_addiu(&p, t0, zero, 1 << cpu_core(&cpu_data[cpu])); - uasm_i_sw(&p, t0, 0, r_pcohctl); - uasm_i_lw(&p, t0, 0, r_pcohctl); + uasm_i_addiu(&p, GPR_T0, GPR_ZERO, 1 << cpu_core(&cpu_data[cpu])); + uasm_i_sw(&p, GPR_T0, 0, r_pcohctl); + uasm_i_lw(&p, GPR_T0, 0, r_pcohctl); /* Barrier to ensure write to coherence control is complete */ uasm_i_sync(&p, __SYNC_full); @@ -492,8 +488,8 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) } /* Disable coherence */ - uasm_i_sw(&p, zero, 0, r_pcohctl); - uasm_i_lw(&p, t0, 0, r_pcohctl); + uasm_i_sw(&p, GPR_ZERO, 0, r_pcohctl); + uasm_i_lw(&p, GPR_T0, 0, r_pcohctl); if (state >= CPS_PM_CLOCK_GATED) { err = cps_gen_flush_fsb(&p, &l, &r, &cpu_data[cpu], @@ -515,9 +511,9 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) } /* Issue the CPC command */ - UASM_i_LA(&p, t0, (long)addr_cpc_cl_cmd()); - uasm_i_addiu(&p, t1, zero, cpc_cmd); - uasm_i_sw(&p, t1, 0, t0); + UASM_i_LA(&p, GPR_T0, (long)addr_cpc_cl_cmd()); + uasm_i_addiu(&p, GPR_T1, GPR_ZERO, cpc_cmd); + uasm_i_sw(&p, GPR_T1, 0, GPR_T0); if (state == CPS_PM_POWER_GATED) { /* If anything goes wrong just hang */ @@ -564,12 +560,12 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) * will run this. The first will actually re-enable coherence & the * rest will just be performing a rather unusual nop. */ - uasm_i_addiu(&p, t0, zero, mips_cm_revision() < CM_REV_CM3 + uasm_i_addiu(&p, GPR_T0, GPR_ZERO, mips_cm_revision() < CM_REV_CM3 ? CM_GCR_Cx_COHERENCE_COHDOMAINEN : CM3_GCR_Cx_COHERENCE_COHEN); - uasm_i_sw(&p, t0, 0, r_pcohctl); - uasm_i_lw(&p, t0, 0, r_pcohctl); + uasm_i_sw(&p, GPR_T0, 0, r_pcohctl); + uasm_i_lw(&p, GPR_T0, 0, r_pcohctl); /* Barrier to ensure write to coherence control is complete */ uasm_i_sync(&p, __SYNC_full); @@ -579,11 +575,11 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) /* Decrement ready_count */ uasm_build_label(&l, p, lbl_decready); uasm_i_sync(&p, __SYNC_mb); - uasm_i_ll(&p, t1, 0, r_nc_count); - uasm_i_addiu(&p, t2, t1, -1); - uasm_i_sc(&p, t2, 0, r_nc_count); - uasm_il_beqz(&p, &r, t2, lbl_decready); - uasm_i_andi(&p, v0, t1, (1 << fls(smp_num_siblings)) - 1); + uasm_i_ll(&p, GPR_T1, 0, r_nc_count); + uasm_i_addiu(&p, GPR_T2, GPR_T1, -1); + uasm_i_sc(&p, GPR_T2, 0, r_nc_count); + uasm_il_beqz(&p, &r, GPR_T2, lbl_decready); + uasm_i_andi(&p, GPR_V0, GPR_T1, (1 << fls(smp_num_siblings)) - 1); /* Barrier ensuring all CPUs see the updated r_nc_count value */ uasm_i_sync(&p, __SYNC_mb); @@ -612,7 +608,7 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) } /* The core is coherent, time to return to C code */ - uasm_i_jr(&p, ra); + uasm_i_jr(&p, GPR_RA); uasm_i_nop(&p); gen_done: @@ -635,12 +631,14 @@ out_err: static int cps_pm_online_cpu(unsigned int cpu) { - enum cps_pm_state state; - unsigned core = cpu_core(&cpu_data[cpu]); + unsigned int sibling, core; void *entry_fn, *core_rc; + enum cps_pm_state state; + + core = cpu_core(&cpu_data[cpu]); for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) { - if (per_cpu(nc_asm_enter, core)[state]) + if (per_cpu(nc_asm_enter, cpu)[state]) continue; if (!test_bit(state, state_support)) continue; @@ -652,16 +650,19 @@ static int cps_pm_online_cpu(unsigned int cpu) clear_bit(state, state_support); } - per_cpu(nc_asm_enter, core)[state] = entry_fn; + for_each_cpu(sibling, &cpu_sibling_map[cpu]) + per_cpu(nc_asm_enter, sibling)[state] = entry_fn; } - if (!per_cpu(ready_count, core)) { + if (!per_cpu(ready_count, cpu)) { core_rc = kmalloc(sizeof(u32), GFP_KERNEL); if (!core_rc) { pr_err("Failed allocate core %u ready_count\n", core); return -ENOMEM; } - per_cpu(ready_count, core) = core_rc; + + for_each_cpu(sibling, &cpu_sibling_map[cpu]) + per_cpu(ready_count, sibling) = core_rc; } return 0; diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index bb43bf850314..8f0a0001540c 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c @@ -66,24 +66,23 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, "BogoMIPS\t\t: %u.%02u\n", cpu_data[n].udelay_val / (500000/HZ), (cpu_data[n].udelay_val / (5000/HZ)) % 100); - seq_printf(m, "wait instruction\t: %s\n", cpu_wait ? "yes" : "no"); + seq_printf(m, "wait instruction\t: %s\n", str_yes_no(cpu_wait)); seq_printf(m, "microsecond timers\t: %s\n", - cpu_has_counter ? "yes" : "no"); + str_yes_no(cpu_has_counter)); seq_printf(m, "tlb_entries\t\t: %d\n", cpu_data[n].tlbsize); seq_printf(m, "extra interrupt vector\t: %s\n", - cpu_has_divec ? "yes" : "no"); - seq_printf(m, "hardware watchpoint\t: %s", - cpu_has_watch ? "yes, " : "no\n"); + str_yes_no(cpu_has_divec)); + seq_printf(m, "hardware watchpoint\t: %s", str_yes_no(cpu_has_watch)); if (cpu_has_watch) { - seq_printf(m, "count: %d, address/irw mask: [", + seq_printf(m, ", count: %d, address/irw mask: [", cpu_data[n].watch_reg_count); for (i = 0; i < cpu_data[n].watch_reg_count; i++) seq_printf(m, "%s0x%04x", i ? ", " : "", cpu_data[n].watch_reg_masks[i]); - seq_puts(m, "]\n"); + seq_puts(m, "]"); } - seq_puts(m, "isa\t\t\t:"); + seq_puts(m, "\nisa\t\t\t:"); if (cpu_has_mips_1) seq_puts(m, " mips1"); if (cpu_has_mips_2) @@ -155,7 +154,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) if (cpu_has_mmips) { seq_printf(m, "micromips kernel\t: %s\n", - (read_c0_config3() & MIPS_CONF3_ISA_OE) ? "yes" : "no"); + str_yes_no(read_c0_config3() & MIPS_CONF3_ISA_OE)); } seq_puts(m, "Options implemented\t:"); @@ -311,7 +310,7 @@ static void *c_start(struct seq_file *m, loff_t *pos) { unsigned long i = *pos; - return i < NR_CPUS ? (void *) (i + 1) : NULL; + return i < nr_cpu_ids ? (void *) (i + 1) : NULL; } static void *c_next(struct seq_file *m, void *v, loff_t *pos) diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 35b912bce429..02aa6a04a21d 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -40,7 +40,7 @@ #include <asm/stacktrace.h> #ifdef CONFIG_HOTPLUG_CPU -void arch_cpu_idle_dead(void) +void __noreturn arch_cpu_idle_dead(void) { play_dead(); } @@ -121,6 +121,19 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) /* Put the stack after the struct pt_regs. */ childksp = (unsigned long) childregs; p->thread.cp0_status = (read_c0_status() & ~(ST0_CU2|ST0_CU1)) | ST0_KERNEL_CUMASK; + + /* + * New tasks lose permission to use the fpu. This accelerates context + * switching for most programs since they don't use the fpu. + */ + clear_tsk_thread_flag(p, TIF_USEDFPU); + clear_tsk_thread_flag(p, TIF_USEDMSA); + clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE); + +#ifdef CONFIG_MIPS_MT_FPAFF + clear_tsk_thread_flag(p, TIF_FPUBOUND); +#endif /* CONFIG_MIPS_MT_FPAFF */ + if (unlikely(args->fn)) { /* kernel thread */ unsigned long status = p->thread.cp0_status; @@ -149,20 +162,8 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) p->thread.reg29 = (unsigned long) childregs; p->thread.reg31 = (unsigned long) ret_from_fork; - /* - * New tasks lose permission to use the fpu. This accelerates context - * switching for most programs since they don't use the fpu. - */ childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); - clear_tsk_thread_flag(p, TIF_USEDFPU); - clear_tsk_thread_flag(p, TIF_USEDMSA); - clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE); - -#ifdef CONFIG_MIPS_MT_FPAFF - clear_tsk_thread_flag(p, TIF_FPUBOUND); -#endif /* CONFIG_MIPS_MT_FPAFF */ - #ifdef CONFIG_MIPS_FP_SUPPORT atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE); #endif @@ -689,18 +690,20 @@ unsigned long mips_stack_top(void) } /* Space for the VDSO, data page & GIC user page */ - top -= PAGE_ALIGN(current->thread.abi->vdso->size); - top -= PAGE_SIZE; - top -= mips_gic_present() ? PAGE_SIZE : 0; + if (current->thread.abi) { + top -= PAGE_ALIGN(current->thread.abi->vdso->size); + top -= PAGE_SIZE; + top -= mips_gic_present() ? PAGE_SIZE : 0; + + /* Space to randomize the VDSO base */ + if (current->flags & PF_RANDOMIZE) + top -= VDSO_RANDOMIZE_SIZE; + } /* Space for cache colour alignment */ if (cpu_has_dc_aliases) top -= shm_align_mask + 1; - /* Space to randomize the VDSO base */ - if (current->flags & PF_RANDOMIZE) - top -= VDSO_RANDOMIZE_SIZE; - return top; } @@ -711,7 +714,7 @@ unsigned long mips_stack_top(void) unsigned long arch_align_stack(unsigned long sp) { if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) - sp -= get_random_int() & ~PAGE_MASK; + sp -= get_random_u32_below(PAGE_SIZE); return sp & ALMASK; } @@ -750,9 +753,9 @@ static void raise_backtrace(cpumask_t *mask) } } -void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) +void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu) { - nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace); + nmi_trigger_cpumask_backtrace(mask, exclude_cpu, raise_backtrace); } int mips_get_process_fp_mode(struct task_struct *task) diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c index 7db6ff9aed7d..4fd6da0a06c3 100644 --- a/arch/mips/kernel/prom.c +++ b/arch/mips/kernel/prom.c @@ -26,8 +26,10 @@ __init void mips_set_machine_name(const char *name) if (name == NULL) return; - strlcpy(mips_machine_name, name, sizeof(mips_machine_name)); + strscpy(mips_machine_name, name, sizeof(mips_machine_name)); pr_info("MIPS: machine is %s\n", mips_get_machine_name()); + + dump_stack_set_arch_desc(name); } char *mips_get_machine_name(void) @@ -39,7 +41,7 @@ char *mips_get_machine_name(void) void __init __dt_setup_arch(void *bph) { - if (!early_init_dt_scan(bph)) + if (!early_init_dt_scan(bph, __pa(bph))) return; mips_set_machine_name(of_flat_dt_get_machine_name()); @@ -52,9 +54,9 @@ int __init __dt_register_buses(const char *bus0, const char *bus1) if (!of_have_populated_dt()) panic("device tree not present"); - strlcpy(of_ids[0].compatible, bus0, sizeof(of_ids[0].compatible)); + strscpy(of_ids[0].compatible, bus0, sizeof(of_ids[0].compatible)); if (bus1) { - strlcpy(of_ids[1].compatible, bus1, + strscpy(of_ids[1].compatible, bus1, sizeof(of_ids[1].compatible)); } diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 567aec4abac0..3f4c94c88124 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -31,6 +31,7 @@ #include <linux/seccomp.h> #include <linux/ftrace.h> +#include <asm/branch.h> #include <asm/byteorder.h> #include <asm/cpu.h> #include <asm/cpu-info.h> @@ -48,6 +49,12 @@ #define CREATE_TRACE_POINTS #include <trace/events/syscalls.h> +unsigned long exception_ip(struct pt_regs *regs) +{ + return exception_epc(regs); +} +EXPORT_SYMBOL(exception_ip); + /* * Called by kernel/ptrace.c when detaching.. * @@ -531,10 +538,11 @@ static int fpr_set(struct task_struct *target, ptrace_setfcr31(target, fcr31); } - if (count > 0) - err = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, - fir_pos, - fir_pos + sizeof(u32)); + if (count > 0) { + user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, + fir_pos, fir_pos + sizeof(u32)); + return 0; + } return err; } @@ -914,58 +922,60 @@ static const struct pt_regs_offset regoffset_table[] = { */ int regs_query_register_offset(const char *name) { - const struct pt_regs_offset *roff; - for (roff = regoffset_table; roff->name != NULL; roff++) - if (!strcmp(roff->name, name)) - return roff->offset; - return -EINVAL; + const struct pt_regs_offset *roff; + + for (roff = regoffset_table; roff->name != NULL; roff++) + if (!strcmp(roff->name, name)) + return roff->offset; + + return -EINVAL; } #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32) static const struct user_regset mips_regsets[] = { [REGSET_GPR] = { - .core_note_type = NT_PRSTATUS, + USER_REGSET_NOTE_TYPE(PRSTATUS), .n = ELF_NGREG, .size = sizeof(unsigned int), .align = sizeof(unsigned int), - .regset_get = gpr32_get, + .regset_get = gpr32_get, .set = gpr32_set, }, [REGSET_DSP] = { - .core_note_type = NT_MIPS_DSP, + USER_REGSET_NOTE_TYPE(MIPS_DSP), .n = NUM_DSP_REGS + 1, .size = sizeof(u32), .align = sizeof(u32), - .regset_get = dsp32_get, + .regset_get = dsp32_get, .set = dsp32_set, .active = dsp_active, }, #ifdef CONFIG_MIPS_FP_SUPPORT [REGSET_FPR] = { - .core_note_type = NT_PRFPREG, + USER_REGSET_NOTE_TYPE(PRFPREG), .n = ELF_NFPREG, .size = sizeof(elf_fpreg_t), .align = sizeof(elf_fpreg_t), - .regset_get = fpr_get, + .regset_get = fpr_get, .set = fpr_set, }, [REGSET_FP_MODE] = { - .core_note_type = NT_MIPS_FP_MODE, + USER_REGSET_NOTE_TYPE(MIPS_FP_MODE), .n = 1, .size = sizeof(int), .align = sizeof(int), - .regset_get = fp_mode_get, + .regset_get = fp_mode_get, .set = fp_mode_set, }, #endif #ifdef CONFIG_CPU_HAS_MSA [REGSET_MSA] = { - .core_note_type = NT_MIPS_MSA, + USER_REGSET_NOTE_TYPE(MIPS_MSA), .n = NUM_FPU_REGS + 1, .size = 16, .align = 16, - .regset_get = msa_get, + .regset_get = msa_get, .set = msa_set, }, #endif @@ -985,47 +995,47 @@ static const struct user_regset_view user_mips_view = { static const struct user_regset mips64_regsets[] = { [REGSET_GPR] = { - .core_note_type = NT_PRSTATUS, + USER_REGSET_NOTE_TYPE(PRSTATUS), .n = ELF_NGREG, .size = sizeof(unsigned long), .align = sizeof(unsigned long), - .regset_get = gpr64_get, + .regset_get = gpr64_get, .set = gpr64_set, }, [REGSET_DSP] = { - .core_note_type = NT_MIPS_DSP, + USER_REGSET_NOTE_TYPE(MIPS_DSP), .n = NUM_DSP_REGS + 1, .size = sizeof(u64), .align = sizeof(u64), - .regset_get = dsp64_get, + .regset_get = dsp64_get, .set = dsp64_set, .active = dsp_active, }, #ifdef CONFIG_MIPS_FP_SUPPORT [REGSET_FP_MODE] = { - .core_note_type = NT_MIPS_FP_MODE, + USER_REGSET_NOTE_TYPE(MIPS_FP_MODE), .n = 1, .size = sizeof(int), .align = sizeof(int), - .regset_get = fp_mode_get, + .regset_get = fp_mode_get, .set = fp_mode_set, }, [REGSET_FPR] = { - .core_note_type = NT_PRFPREG, + USER_REGSET_NOTE_TYPE(PRFPREG), .n = ELF_NFPREG, .size = sizeof(elf_fpreg_t), .align = sizeof(elf_fpreg_t), - .regset_get = fpr_get, + .regset_get = fpr_get, .set = fpr_set, }, #endif #ifdef CONFIG_CPU_HAS_MSA [REGSET_MSA] = { - .core_note_type = NT_MIPS_MSA, + USER_REGSET_NOTE_TYPE(MIPS_MSA), .n = NUM_FPU_REGS + 1, .size = 16, .align = 16, - .regset_get = msa_get, + .regset_get = msa_get, .set = msa_set, }, #endif @@ -1309,51 +1319,32 @@ long arch_ptrace(struct task_struct *child, long request, * Notification of system call entry/exit * - triggered by current->work.syscall_trace */ -asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall) +asmlinkage long syscall_trace_enter(struct pt_regs *regs) { user_exit(); - current_thread_info()->syscall = syscall; - if (test_thread_flag(TIF_SYSCALL_TRACE)) { if (ptrace_report_syscall_entry(regs)) return -1; - syscall = current_thread_info()->syscall; } -#ifdef CONFIG_SECCOMP - if (unlikely(test_thread_flag(TIF_SECCOMP))) { - int ret, i; - struct seccomp_data sd; - unsigned long args[6]; - - sd.nr = syscall; - sd.arch = syscall_get_arch(current); - syscall_get_arguments(current, regs, args); - for (i = 0; i < 6; i++) - sd.args[i] = args[i]; - sd.instruction_pointer = KSTK_EIP(current); - - ret = __secure_computing(&sd); - if (ret == -1) - return ret; - syscall = current_thread_info()->syscall; - } -#endif + if (secure_computing()) + return -1; if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) trace_sys_enter(regs, regs->regs[2]); - audit_syscall_entry(syscall, regs->regs[4], regs->regs[5], + audit_syscall_entry(current_thread_info()->syscall, + regs->regs[4], regs->regs[5], regs->regs[6], regs->regs[7]); /* * Negative syscall numbers are mistaken for rejected syscalls, but * won't have had the return value set appropriately, so we do so now. */ - if (syscall < 0) + if (current_thread_info()->syscall < 0) syscall_set_return_value(current, regs, -ENOSYS, 0); - return syscall; + return current_thread_info()->syscall; } /* @@ -1362,7 +1353,7 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall) */ asmlinkage void syscall_trace_leave(struct pt_regs *regs) { - /* + /* * We may come here right after calling schedule_user() * or do_notify_resume(), in which case we can be in RCU * user mode. diff --git a/arch/mips/kernel/r2300_fpu.S b/arch/mips/kernel/r2300_fpu.S index 2748c55820c2..c000b22e3fcd 100644 --- a/arch/mips/kernel/r2300_fpu.S +++ b/arch/mips/kernel/r2300_fpu.S @@ -11,10 +11,10 @@ * Further modifications to make this work: * Copyright (c) 1998 Harald Koerfgen */ +#include <linux/export.h> #include <asm/asm.h> #include <asm/asmmacro.h> #include <asm/errno.h> -#include <asm/export.h> #include <asm/fpregdef.h> #include <asm/mipsregs.h> #include <asm/asm-offsets.h> @@ -64,7 +64,7 @@ LEAF(_restore_fp) */ LEAF(_save_fp_context) .set push - SET_HARDFLOAT + .set hardfloat li v0, 0 # assume success cfc1 t1, fcr31 EX2(s.d $f0, 0(a0)) @@ -98,7 +98,7 @@ LEAF(_save_fp_context) */ LEAF(_restore_fp_context) .set push - SET_HARDFLOAT + .set hardfloat li v0, 0 # assume success EX(lw t0, (a1)) EX2(l.d $f0, 0(a0)) diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S index 71b1aafae1bb..48e63943e6f7 100644 --- a/arch/mips/kernel/r2300_switch.S +++ b/arch/mips/kernel/r2300_switch.S @@ -13,7 +13,6 @@ */ #include <asm/asm.h> #include <asm/cachectl.h> -#include <asm/export.h> #include <asm/fpregdef.h> #include <asm/mipsregs.h> #include <asm/asm-offsets.h> diff --git a/arch/mips/kernel/r4k-bugs64.c b/arch/mips/kernel/r4k-bugs64.c index 6ffefb2c6971..1e300330078d 100644 --- a/arch/mips/kernel/r4k-bugs64.c +++ b/arch/mips/kernel/r4k-bugs64.c @@ -14,6 +14,7 @@ #include <asm/fpu.h> #include <asm/mipsregs.h> #include <asm/setup.h> +#include <asm/traps.h> static char bug64hit[] __initdata = "reliable operation impossible!\n%s"; diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S index 2e687c60bc4f..4bb97ee89904 100644 --- a/arch/mips/kernel/r4k_fpu.S +++ b/arch/mips/kernel/r4k_fpu.S @@ -12,10 +12,10 @@ * Copyright (C) 2000 MIPS Technologies, Inc. * Copyright (C) 1999, 2001 Silicon Graphics, Inc. */ +#include <linux/export.h> #include <asm/asm.h> #include <asm/asmmacro.h> #include <asm/errno.h> -#include <asm/export.h> #include <asm/fpregdef.h> #include <asm/mipsregs.h> #include <asm/asm-offsets.h> @@ -26,7 +26,7 @@ .macro EX insn, reg, src .set push - SET_HARDFLOAT + .set hardfloat .set nomacro .ex\@: \insn \reg, \src .set pop @@ -98,14 +98,14 @@ LEAF(_init_msa_upper) */ LEAF(_save_fp_context) .set push - SET_HARDFLOAT + .set hardfloat cfc1 t1, fcr31 .set pop #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \ defined(CONFIG_CPU_MIPSR5) || defined(CONFIG_CPU_MIPSR6) .set push - SET_HARDFLOAT + .set hardfloat #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5) .set mips32r2 .set fp=64 @@ -135,7 +135,7 @@ LEAF(_save_fp_context) #endif .set push - SET_HARDFLOAT + .set hardfloat /* Store the 16 even double precision registers */ EX sdc1 $f0, 0(a0) EX sdc1 $f2, 16(a0) @@ -173,7 +173,7 @@ LEAF(_restore_fp_context) #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \ defined(CONFIG_CPU_MIPSR5) || defined(CONFIG_CPU_MIPSR6) .set push - SET_HARDFLOAT + .set hardfloat #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5) .set mips32r2 .set fp=64 @@ -201,7 +201,7 @@ LEAF(_restore_fp_context) 1: .set pop #endif .set push - SET_HARDFLOAT + .set hardfloat EX ldc1 $f0, 0(a0) EX ldc1 $f2, 16(a0) EX ldc1 $f4, 32(a0) diff --git a/arch/mips/kernel/relocate.c b/arch/mips/kernel/relocate.c index 56b51de2dc51..7f1c136ad850 100644 --- a/arch/mips/kernel/relocate.c +++ b/arch/mips/kernel/relocate.c @@ -138,7 +138,7 @@ static int __init reloc_handler(u32 type, u32 *loc_orig, u32 *loc_new, apply_r_mips_hi16_rel(loc_orig, loc_new, offset); break; default: - pr_err("Unhandled relocation type %d at 0x%pK\n", type, + pr_err("Unhandled relocation type %d at 0x%p\n", type, loc_orig); return -ENOEXEC; } @@ -337,10 +337,10 @@ void *__init relocate_kernel(void) #if defined(CONFIG_USE_OF) /* Deal with the device tree */ fdt = plat_get_fdt(); - early_init_dt_scan(fdt); + early_init_dt_scan(fdt, __pa(fdt)); if (boot_command_line[0]) { /* Boot command line was passed in device tree */ - strlcpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE); + strscpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE); } #endif /* CONFIG_USE_OF */ @@ -380,7 +380,7 @@ void *__init relocate_kernel(void) } #endif /* CONFIG_USE_OF */ - /* Copy the kernel to it's new location */ + /* Copy the kernel to its new location */ memcpy(loc_new, &_text, kernel_length); /* Perform relocations on the new kernel */ @@ -439,10 +439,10 @@ static void show_kernel_relocation(const char *level) { if (__kaslr_offset > 0) { printk(level); - pr_cont("Kernel relocated by 0x%pK\n", (void *)__kaslr_offset); - pr_cont(" .text @ 0x%pK\n", _text); - pr_cont(" .data @ 0x%pK\n", _sdata); - pr_cont(" .bss @ 0x%pK\n", __bss_start); + pr_cont("Kernel relocated by 0x%p\n", (void *)__kaslr_offset); + pr_cont(" .text @ 0x%p\n", _text); + pr_cont(" .data @ 0x%p\n", _sdata); + pr_cont(" .bss @ 0x%p\n", __bss_start); } } diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S index cfde14b48fd8..de894a0211d7 100644 --- a/arch/mips/kernel/relocate_kernel.S +++ b/arch/mips/kernel/relocate_kernel.S @@ -66,12 +66,11 @@ copy_word: LONG_ADDIU s6, s6, -1 beq s6, zero, process_entry b copy_word - b process_entry done: #ifdef CONFIG_SMP /* kexec_flag reset is signal to other CPUs what kernel - was moved to it's location. Note - we need relocated address + was moved to its location. Note - we need relocated address of kexec_flag. */ bal 1f @@ -145,8 +144,7 @@ LEAF(kexec_smp_wait) * kexec_args[0..3] are used to prepare register values. */ -kexec_args: - EXPORT(kexec_args) +EXPORT(kexec_args) arg0: PTR_WD 0x0 arg1: PTR_WD 0x0 arg2: PTR_WD 0x0 @@ -159,8 +157,7 @@ arg3: PTR_WD 0x0 * their registers a0-a3. secondary_kexec_args[0..3] are used * to prepare register values. */ -secondary_kexec_args: - EXPORT(secondary_kexec_args) +EXPORT(secondary_kexec_args) s_arg0: PTR_WD 0x0 s_arg1: PTR_WD 0x0 s_arg2: PTR_WD 0x0 @@ -171,19 +168,16 @@ kexec_flag: #endif -kexec_start_address: - EXPORT(kexec_start_address) +EXPORT(kexec_start_address) PTR_WD 0x0 .size kexec_start_address, PTRSIZE -kexec_indirection_page: - EXPORT(kexec_indirection_page) +EXPORT(kexec_indirection_page) PTR_WD 0 .size kexec_indirection_page, PTRSIZE relocate_new_kernel_end: -relocate_new_kernel_size: - EXPORT(relocate_new_kernel_size) +EXPORT(relocate_new_kernel_size) PTR_WD relocate_new_kernel_end - relocate_new_kernel .size relocate_new_kernel_size, PTRSIZE diff --git a/arch/mips/kernel/rtlx-cmp.c b/arch/mips/kernel/rtlx-cmp.c deleted file mode 100644 index d26dcc4b46e7..000000000000 --- a/arch/mips/kernel/rtlx-cmp.c +++ /dev/null @@ -1,122 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved. - * Copyright (C) 2013 Imagination Technologies Ltd. - */ -#include <linux/device.h> -#include <linux/fs.h> -#include <linux/err.h> -#include <linux/wait.h> -#include <linux/sched.h> -#include <linux/smp.h> - -#include <asm/mips_mt.h> -#include <asm/vpe.h> -#include <asm/rtlx.h> - -static int major; - -static void rtlx_interrupt(void) -{ - int i; - struct rtlx_info *info; - struct rtlx_info **p = vpe_get_shared(aprp_cpu_index()); - - if (p == NULL || *p == NULL) - return; - - info = *p; - - if (info->ap_int_pending == 1 && smp_processor_id() == 0) { - for (i = 0; i < RTLX_CHANNELS; i++) { - wake_up(&channel_wqs[i].lx_queue); - wake_up(&channel_wqs[i].rt_queue); - } - info->ap_int_pending = 0; - } -} - -void _interrupt_sp(void) -{ - smp_send_reschedule(aprp_cpu_index()); -} - -int __init rtlx_module_init(void) -{ - struct device *dev; - int i, err; - - if (!cpu_has_mipsmt) { - pr_warn("VPE loader: not a MIPS MT capable processor\n"); - return -ENODEV; - } - - if (num_possible_cpus() - aprp_cpu_index() < 1) { - pr_warn("No TCs reserved for AP/SP, not initializing RTLX.\n" - "Pass maxcpus=<n> argument as kernel argument\n"); - - return -ENODEV; - } - - major = register_chrdev(0, RTLX_MODULE_NAME, &rtlx_fops); - if (major < 0) { - pr_err("rtlx_module_init: unable to register device\n"); - return major; - } - - /* initialise the wait queues */ - for (i = 0; i < RTLX_CHANNELS; i++) { - init_waitqueue_head(&channel_wqs[i].rt_queue); - init_waitqueue_head(&channel_wqs[i].lx_queue); - atomic_set(&channel_wqs[i].in_open, 0); - mutex_init(&channel_wqs[i].mutex); - - dev = device_create(mt_class, NULL, MKDEV(major, i), NULL, - "%s%d", RTLX_MODULE_NAME, i); - if (IS_ERR(dev)) { - while (i--) - device_destroy(mt_class, MKDEV(major, i)); - - err = PTR_ERR(dev); - goto out_chrdev; - } - } - - /* set up notifiers */ - rtlx_notify.start = rtlx_starting; - rtlx_notify.stop = rtlx_stopping; - vpe_notify(aprp_cpu_index(), &rtlx_notify); - - if (cpu_has_vint) { - aprp_hook = rtlx_interrupt; - } else { - pr_err("APRP RTLX init on non-vectored-interrupt processor\n"); - err = -ENODEV; - goto out_class; - } - - return 0; - -out_class: - for (i = 0; i < RTLX_CHANNELS; i++) - device_destroy(mt_class, MKDEV(major, i)); -out_chrdev: - unregister_chrdev(major, RTLX_MODULE_NAME); - - return err; -} - -void __exit rtlx_module_exit(void) -{ - int i; - - for (i = 0; i < RTLX_CHANNELS; i++) - device_destroy(mt_class, MKDEV(major, i)); - - unregister_chrdev(major, RTLX_MODULE_NAME); - - aprp_hook = NULL; -} diff --git a/arch/mips/kernel/rtlx-mt.c b/arch/mips/kernel/rtlx-mt.c index 38c6925a1bea..ff7535de42ca 100644 --- a/arch/mips/kernel/rtlx-mt.c +++ b/arch/mips/kernel/rtlx-mt.c @@ -95,11 +95,11 @@ int __init rtlx_module_init(void) atomic_set(&channel_wqs[i].in_open, 0); mutex_init(&channel_wqs[i].mutex); - dev = device_create(mt_class, NULL, MKDEV(major, i), NULL, + dev = device_create(&mt_class, NULL, MKDEV(major, i), NULL, "%s%d", RTLX_MODULE_NAME, i); if (IS_ERR(dev)) { while (i--) - device_destroy(mt_class, MKDEV(major, i)); + device_destroy(&mt_class, MKDEV(major, i)); err = PTR_ERR(dev); goto out_chrdev; @@ -127,7 +127,7 @@ int __init rtlx_module_init(void) out_class: for (i = 0; i < RTLX_CHANNELS; i++) - device_destroy(mt_class, MKDEV(major, i)); + device_destroy(&mt_class, MKDEV(major, i)); out_chrdev: unregister_chrdev(major, RTLX_MODULE_NAME); @@ -139,7 +139,7 @@ void __exit rtlx_module_exit(void) int i; for (i = 0; i < RTLX_CHANNELS; i++) - device_destroy(mt_class, MKDEV(major, i)); + device_destroy(&mt_class, MKDEV(major, i)); unregister_chrdev(major, RTLX_MODULE_NAME); diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 18dc9b345056..4947a4f39e37 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -64,10 +64,10 @@ load_a6: user_lw(t7, 24(t0)) # argument #7 from usp load_a7: user_lw(t8, 28(t0)) # argument #8 from usp loads_done: - sw t5, 16(sp) # argument #5 to ksp - sw t6, 20(sp) # argument #6 to ksp - sw t7, 24(sp) # argument #7 to ksp - sw t8, 28(sp) # argument #8 to ksp + sw t5, PT_ARG4(sp) # argument #5 to ksp + sw t6, PT_ARG5(sp) # argument #6 to ksp + sw t7, PT_ARG6(sp) # argument #7 to ksp + sw t8, PT_ARG7(sp) # argument #8 to ksp .set pop .section __ex_table,"a" @@ -77,6 +77,18 @@ loads_done: PTR_WD load_a7, bad_stack_a7 .previous + /* + * syscall number is in v0 unless we called syscall(__NR_###) + * where the real syscall number is in a0 + */ + subu t2, v0, __NR_O32_Linux + bnez t2, 1f /* __NR_syscall at offset 0 */ + LONG_S a0, TI_SYSCALL($28) # Save a0 as syscall number + b 2f +1: + LONG_S v0, TI_SYSCALL($28) # Save v0 as syscall number +2: + lw t0, TI_FLAGS($28) # syscall tracing enabled? li t1, _TIF_WORK_SYSCALL_ENTRY and t0, t1 @@ -114,16 +126,7 @@ syscall_trace_entry: SAVE_STATIC move a0, sp - /* - * syscall number is in v0 unless we called syscall(__NR_###) - * where the real syscall number is in a0 - */ - move a1, v0 - subu t2, v0, __NR_O32_Linux - bnez t2, 1f /* __NR_syscall at offset 0 */ - lw a1, PT_R4(sp) - -1: jal syscall_trace_enter + jal syscall_trace_enter bltz v0, 1f # seccomp failed? Skip syscall diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 97456b2ca7dc..97788859238c 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -44,6 +44,8 @@ NESTED(handle_sysn32, PT_SIZE, sp) sd a3, PT_R26(sp) # save a3 for syscall restarting + LONG_S v0, TI_SYSCALL($28) # Store syscall number + li t1, _TIF_WORK_SYSCALL_ENTRY LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? and t0, t1, t0 @@ -72,7 +74,6 @@ syscall_common: n32_syscall_trace_entry: SAVE_STATIC move a0, sp - move a1, v0 jal syscall_trace_enter bltz v0, 1f # seccomp failed? Skip syscall diff --git a/arch/mips/kernel/scall64-n64.S b/arch/mips/kernel/scall64-n64.S index e6264aa62e45..be11ea5cc67e 100644 --- a/arch/mips/kernel/scall64-n64.S +++ b/arch/mips/kernel/scall64-n64.S @@ -46,6 +46,8 @@ NESTED(handle_sys64, PT_SIZE, sp) sd a3, PT_R26(sp) # save a3 for syscall restarting + LONG_S v0, TI_SYSCALL($28) # Store syscall number + li t1, _TIF_WORK_SYSCALL_ENTRY LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? and t0, t1, t0 @@ -82,7 +84,6 @@ n64_syscall_exit: syscall_trace_entry: SAVE_STATIC move a0, sp - move a1, v0 jal syscall_trace_enter bltz v0, 1f # seccomp failed? Skip syscall diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index d3c2616cba22..7a5abb73e531 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -79,6 +79,22 @@ loads_done: PTR_WD load_a7, bad_stack_a7 .previous + /* + * absolute syscall number is in v0 unless we called syscall(__NR_###) + * where the real syscall number is in a0 + * note: NR_syscall is the first O32 syscall but the macro is + * only defined when compiling with -mabi=32 (CONFIG_32BIT) + * therefore __NR_O32_Linux is used (4000) + */ + + subu t2, v0, __NR_O32_Linux + bnez t2, 1f /* __NR_syscall at offset 0 */ + LONG_S a0, TI_SYSCALL($28) # Save a0 as syscall number + b 2f +1: + LONG_S v0, TI_SYSCALL($28) # Save v0 as syscall number +2: + li t1, _TIF_WORK_SYSCALL_ENTRY LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? and t0, t1, t0 @@ -113,22 +129,7 @@ trace_a_syscall: sd a7, PT_R11(sp) # For indirect syscalls move a0, sp - /* - * absolute syscall number is in v0 unless we called syscall(__NR_###) - * where the real syscall number is in a0 - * note: NR_syscall is the first O32 syscall but the macro is - * only defined when compiling with -mabi=32 (CONFIG_32BIT) - * therefore __NR_O32_Linux is used (4000) - */ - .set push - .set reorder - subu t1, v0, __NR_O32_Linux - move a1, v0 - bnez t1, 1f /* __NR_syscall at offset 0 */ - ld a1, PT_R4(sp) /* Arg1 for __NR_syscall case */ - .set pop - -1: jal syscall_trace_enter + jal syscall_trace_enter bltz v0, 1f # seccomp failed? Skip syscall diff --git a/arch/mips/kernel/segment.c b/arch/mips/kernel/segment.c index 0a9bd7b0983b..24560501c70d 100644 --- a/arch/mips/kernel/segment.c +++ b/arch/mips/kernel/segment.c @@ -46,7 +46,7 @@ static void build_segment_config(char *str, unsigned int cfg) ((cfg & MIPS_SEGCFG_EU) >> MIPS_SEGCFG_EU_SHIFT)); } -static int show_segments(struct seq_file *m, void *v) +static int segments_show(struct seq_file *m, void *v) { unsigned int segcfg; char str[42]; @@ -80,18 +80,7 @@ static int show_segments(struct seq_file *m, void *v) return 0; } - -static int segments_open(struct inode *inode, struct file *file) -{ - return single_open(file, show_segments, NULL); -} - -static const struct file_operations segments_fops = { - .open = segments_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(segments); static int __init segments_info(void) { diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 2ca156a5b231..11b9b6b63e19 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -11,9 +11,10 @@ * Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki */ #include <linux/init.h> +#include <linux/cpu.h> +#include <linux/delay.h> #include <linux/ioport.h> #include <linux/export.h> -#include <linux/screen_info.h> #include <linux/memblock.h> #include <linux/initrd.h> #include <linux/root_dev.h> @@ -41,7 +42,9 @@ #include <asm/sections.h> #include <asm/setup.h> #include <asm/smp-ops.h> +#include <asm/mips-cps.h> #include <asm/prom.h> +#include <asm/fw/fw.h> #ifdef CONFIG_MIPS_ELF_APPENDED_DTB char __section(".appended_dtb") __appended_dtb[0x100000]; @@ -51,10 +54,6 @@ struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly; EXPORT_SYMBOL(cpu_data); -#ifdef CONFIG_VT -struct screen_info screen_info; -#endif - /* * Setup information * @@ -148,7 +147,7 @@ static unsigned long __init init_initrd(void) /* * Board specific code or command line parser should have * already set up initrd_start and initrd_end. In these cases - * perfom sanity checks and use them if all looks good. + * perform sanity checks and use them if all looks good. */ if (!initrd_start || initrd_end <= initrd_start) goto disable; @@ -157,10 +156,6 @@ static unsigned long __init init_initrd(void) pr_err("initrd start must be page aligned\n"); goto disable; } - if (initrd_start < PAGE_OFFSET) { - pr_err("initrd start < PAGE_OFFSET\n"); - goto disable; - } /* * Sanitize initrd addresses. For example firmware @@ -173,6 +168,11 @@ static unsigned long __init init_initrd(void) initrd_end = (unsigned long)__va(end); initrd_start = (unsigned long)__va(__pa(initrd_start)); + if (initrd_start < PAGE_OFFSET) { + pr_err("initrd start < PAGE_OFFSET\n"); + goto disable; + } + ROOT_DEV = Root_RAM0; return PFN_UP(end); disable: @@ -322,11 +322,11 @@ static void __init bootmem_init(void) panic("Incorrect memory mapping !!!"); if (max_pfn > PFN_DOWN(HIGHMEM_START)) { + max_low_pfn = PFN_DOWN(HIGHMEM_START); #ifdef CONFIG_HIGHMEM - highstart_pfn = PFN_DOWN(HIGHMEM_START); + highstart_pfn = max_low_pfn; highend_pfn = max_pfn; #else - max_low_pfn = PFN_DOWN(HIGHMEM_START); max_pfn = max_low_pfn; #endif } @@ -442,8 +442,6 @@ static void __init mips_reserve_vmcore(void) #endif } -#ifdef CONFIG_KEXEC - /* 64M alignment for crash kernel regions */ #define CRASH_ALIGN SZ_64M #define CRASH_ADDR_MAX SZ_512M @@ -454,9 +452,13 @@ static void __init mips_parse_crashkernel(void) unsigned long long crash_size, crash_base; int ret; + if (!IS_ENABLED(CONFIG_CRASH_RESERVE)) + return; + total_mem = memblock_phys_mem_size(); ret = parse_crashkernel(boot_command_line, total_mem, - &crash_size, &crash_base); + &crash_size, &crash_base, + NULL, NULL, NULL); if (ret != 0 || crash_size <= 0) return; @@ -488,6 +490,9 @@ static void __init request_crashkernel(struct resource *res) { int ret; + if (!IS_ENABLED(CONFIG_CRASH_RESERVE)) + return; + if (crashk_res.start == crashk_res.end) return; @@ -497,15 +502,6 @@ static void __init request_crashkernel(struct resource *res) (unsigned long)(resource_size(&crashk_res) >> 20), (unsigned long)(crashk_res.start >> 20)); } -#else /* !defined(CONFIG_KEXEC) */ -static void __init mips_parse_crashkernel(void) -{ -} - -static void __init request_crashkernel(struct resource *res) -{ -} -#endif /* !defined(CONFIG_KEXEC) */ static void __init check_kernel_sections_mem(void) { @@ -708,10 +704,7 @@ static void __init resource_init(void) for_each_mem_range(i, &start, &end) { struct resource *res; - res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES); - if (!res) - panic("%s: Failed to allocate %zu bytes\n", __func__, - sizeof(struct resource)); + res = memblock_alloc_or_panic(sizeof(struct resource), SMP_CACHE_BYTES); res->start = start; /* @@ -750,12 +743,30 @@ static void __init prefill_possible_map(void) for (; i < NR_CPUS; i++) set_cpu_possible(i, false); - nr_cpu_ids = possible; + set_nr_cpu_ids(possible); } #else static inline void prefill_possible_map(void) {} #endif +static void __init setup_rng_seed(void) +{ + char *rng_seed_hex = fw_getenv("rngseed"); + u8 rng_seed[512]; + size_t len; + + if (!rng_seed_hex) + return; + + len = min(sizeof(rng_seed), strlen(rng_seed_hex) / 2); + if (hex2bin(rng_seed, rng_seed_hex, len)) + return; + + add_bootloader_randomness(rng_seed, len); + memzero_explicit(rng_seed, len); + memzero_explicit(rng_seed_hex, len * 2); +} + void __init setup_arch(char **cmdline_p) { cpu_probe(); @@ -767,13 +778,8 @@ void __init setup_arch(char **cmdline_p) setup_early_printk(); #endif cpu_report(); - check_bugs_early(); - -#if defined(CONFIG_VT) -#if defined(CONFIG_VGA_CONSOLE) - conswitchp = &vga_con; -#endif -#endif + if (IS_ENABLED(CONFIG_CPU_R4X00_BUGS64)) + check_bugs64_early(); arch_mem_init(cmdline_p); dmi_setup(); @@ -786,6 +792,8 @@ void __init setup_arch(char **cmdline_p) paging_init(); memblock_dump_all(); + + setup_rng_seed(); } unsigned long kernelsp[NR_CPUS]; @@ -818,3 +826,14 @@ static int __init setnocoherentio(char *str) } early_param("nocoherentio", setnocoherentio); #endif + +void __init arch_cpu_finalize_init(void) +{ + unsigned int cpu = smp_processor_id(); + + cpu_data[cpu].udelay_val = loops_per_jiffy; + check_bugs32(); + + if (IS_ENABLED(CONFIG_CPU_R4X00_BUGS64)) + check_bugs64(); +} diff --git a/arch/mips/kernel/signal-common.h b/arch/mips/kernel/signal-common.h index f50d48435c68..136eb20ac024 100644 --- a/arch/mips/kernel/signal-common.h +++ b/arch/mips/kernel/signal-common.h @@ -40,4 +40,7 @@ _restore_fp_context(void __user *fpregs, void __user *csr); extern asmlinkage int _save_msa_all_upper(void __user *buf); extern asmlinkage int _restore_msa_all_upper(void __user *buf); +extern int setup_sigcontext(struct pt_regs *, struct sigcontext __user *); +extern int restore_sigcontext(struct pt_regs *, struct sigcontext __user *); + #endif /* __SIGNAL_COMMON_H */ diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 479999b7f2de..4a10f18a8806 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -38,6 +38,7 @@ #include <asm/dsp.h> #include <asm/inst.h> #include <asm/msa.h> +#include <asm/syscalls.h> #include "signal-common.h" @@ -569,7 +570,7 @@ void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, return (void __user __force *)(-1UL); /* - * FPU emulator may have it's own trampoline active just + * FPU emulator may have its own trampoline active just * above the user stack, 16-bytes before the next lowest * 16 byte boundary. Try to avoid trashing it. */ diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index 59b8965433c2..73081d4ee8c1 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c @@ -18,6 +18,7 @@ #include <asm/compat-signal.h> #include <linux/uaccess.h> #include <asm/unistd.h> +#include <asm/syscalls.h> #include "signal-common.h" diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c index cfc77b69420a..139d2596b0d4 100644 --- a/arch/mips/kernel/signal_n32.c +++ b/arch/mips/kernel/signal_n32.c @@ -24,6 +24,7 @@ #include <asm/ucontext.h> #include <asm/fpu.h> #include <asm/cpu-features.h> +#include <asm/syscalls.h> #include "signal-common.h" @@ -32,9 +33,6 @@ */ #define __NR_N32_restart_syscall 6214 -extern int setup_sigcontext(struct pt_regs *, struct sigcontext __user *); -extern int restore_sigcontext(struct pt_regs *, struct sigcontext __user *); - struct ucontextn32 { u32 uc_flags; s32 uc_link; diff --git a/arch/mips/kernel/signal_o32.c b/arch/mips/kernel/signal_o32.c index 299a7a28ca33..4f0458459650 100644 --- a/arch/mips/kernel/signal_o32.c +++ b/arch/mips/kernel/signal_o32.c @@ -19,6 +19,7 @@ #include <asm/dsp.h> #include <asm/sim.h> #include <asm/unistd.h> +#include <asm/syscalls.h> #include "signal-common.h" diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index f5d7bfa3472a..35b8d810833c 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -54,6 +54,8 @@ static void bmips_set_reset_vec(int cpu, u32 val); #ifdef CONFIG_SMP +#include <asm/smp.h> + /* initial $sp, $gp - used by arch/mips/kernel/bmips_vec.S */ unsigned long bmips_smp_boot_sp; unsigned long bmips_smp_boot_gp; @@ -390,6 +392,7 @@ static void bmips_cpu_die(unsigned int cpu) void __ref play_dead(void) { idle_task_exit(); + cpuhp_ap_report_dead(); /* flush data cache */ _dma_cache_wback_inv(0, ~0); @@ -413,6 +416,8 @@ void __ref play_dead(void) " wait\n" " j bmips_secondary_reentry\n" : : : "memory"); + + BUG(); } #endif /* CONFIG_HOTPLUG_CPU */ @@ -429,7 +434,7 @@ const struct plat_smp_ops bmips43xx_smp_ops = { .cpu_disable = bmips_cpu_disable, .cpu_die = bmips_cpu_die, #endif -#ifdef CONFIG_KEXEC +#ifdef CONFIG_KEXEC_CORE .kexec_nonboot_cpu = kexec_nonboot_cpu_jump, #endif }; @@ -446,7 +451,7 @@ const struct plat_smp_ops bmips5000_smp_ops = { .cpu_disable = bmips_cpu_disable, .cpu_die = bmips_cpu_die, #endif -#ifdef CONFIG_KEXEC +#ifdef CONFIG_KEXEC_CORE .kexec_nonboot_cpu = kexec_nonboot_cpu_jump, #endif }; @@ -513,7 +518,7 @@ static void bmips_set_reset_vec(int cpu, u32 val) info.val = val; bmips_set_reset_vec_remote(&info); } else { - void __iomem *cbr = BMIPS_GET_CBR(); + void __iomem *cbr = bmips_cbr_addr; if (cpu == 0) __raw_writel(val, cbr + BMIPS_RELO_VECTOR_CONTROL_0); @@ -586,7 +591,8 @@ asmlinkage void __weak plat_wired_tlb_setup(void) void bmips_cpu_setup(void) { - void __iomem __maybe_unused *cbr = BMIPS_GET_CBR(); + void __iomem __maybe_unused *cbr = bmips_cbr_addr; + u32 __maybe_unused rac_addr; u32 __maybe_unused cfg; switch (current_cpu_type()) { @@ -615,6 +621,23 @@ void bmips_cpu_setup(void) __raw_readl(cbr + BMIPS_RAC_ADDRESS_RANGE); break; + case CPU_BMIPS4350: + rac_addr = BMIPS_RAC_CONFIG_1; + + if (!(read_c0_brcm_cmt_local() & (1 << 31))) + rac_addr = BMIPS_RAC_CONFIG; + + /* Enable data RAC */ + cfg = __raw_readl(cbr + rac_addr); + __raw_writel(cfg | 0xf, cbr + rac_addr); + __raw_readl(cbr + rac_addr); + + /* Flush stale data out of the readahead cache */ + cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG); + __raw_writel(cfg | 0x100, cbr + BMIPS_RAC_CONFIG); + __raw_readl(cbr + BMIPS_RAC_CONFIG); + break; + case CPU_BMIPS4380: /* CBG workaround for early BMIPS4380 CPUs */ switch (read_c0_prid()) { diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c deleted file mode 100644 index 76f5824cdb00..000000000000 --- a/arch/mips/kernel/smp-cmp.c +++ /dev/null @@ -1,148 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * - * Copyright (C) 2007 MIPS Technologies, Inc. - * Chris Dearman (chris@mips.com) - */ - -#undef DEBUG - -#include <linux/kernel.h> -#include <linux/sched/task_stack.h> -#include <linux/smp.h> -#include <linux/cpumask.h> -#include <linux/interrupt.h> -#include <linux/compiler.h> - -#include <linux/atomic.h> -#include <asm/cacheflush.h> -#include <asm/cpu.h> -#include <asm/processor.h> -#include <asm/hardirq.h> -#include <asm/mmu_context.h> -#include <asm/smp.h> -#include <asm/time.h> -#include <asm/mipsregs.h> -#include <asm/mipsmtregs.h> -#include <asm/mips_mt.h> -#include <asm/amon.h> - -static void cmp_init_secondary(void) -{ - struct cpuinfo_mips *c __maybe_unused = ¤t_cpu_data; - - /* Assume GIC is present */ - change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 | STATUSF_IP4 | - STATUSF_IP5 | STATUSF_IP6 | STATUSF_IP7); - - /* Enable per-cpu interrupts: platform specific */ - -#ifdef CONFIG_MIPS_MT_SMP - if (cpu_has_mipsmt) - cpu_set_vpe_id(c, (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & - TCBIND_CURVPE); -#endif -} - -static void cmp_smp_finish(void) -{ - pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__); - - /* CDFIXME: remove this? */ - write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ)); - -#ifdef CONFIG_MIPS_MT_FPAFF - /* If we have an FPU, enroll ourselves in the FPU-full mask */ - if (cpu_has_fpu) - cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask); -#endif /* CONFIG_MIPS_MT_FPAFF */ - - local_irq_enable(); -} - -/* - * Setup the PC, SP, and GP of a secondary processor and start it running - * smp_bootstrap is the place to resume from - * __KSTK_TOS(idle) is apparently the stack pointer - * (unsigned long)idle->thread_info the gp - */ -static int cmp_boot_secondary(int cpu, struct task_struct *idle) -{ - struct thread_info *gp = task_thread_info(idle); - unsigned long sp = __KSTK_TOS(idle); - unsigned long pc = (unsigned long)&smp_bootstrap; - unsigned long a0 = 0; - - pr_debug("SMPCMP: CPU%d: %s cpu %d\n", smp_processor_id(), - __func__, cpu); - -#if 0 - /* Needed? */ - flush_icache_range((unsigned long)gp, - (unsigned long)(gp + sizeof(struct thread_info))); -#endif - - amon_cpu_start(cpu, pc, sp, (unsigned long)gp, a0); - return 0; -} - -/* - * Common setup before any secondaries are started - */ -void __init cmp_smp_setup(void) -{ - int i; - int ncpu = 0; - - pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__); - -#ifdef CONFIG_MIPS_MT_FPAFF - /* If we have an FPU, enroll ourselves in the FPU-full mask */ - if (cpu_has_fpu) - cpumask_set_cpu(0, &mt_fpu_cpumask); -#endif /* CONFIG_MIPS_MT_FPAFF */ - - for (i = 1; i < NR_CPUS; i++) { - if (amon_cpu_avail(i)) { - set_cpu_possible(i, true); - __cpu_number_map[i] = ++ncpu; - __cpu_logical_map[ncpu] = i; - } - } - - if (cpu_has_mipsmt) { - unsigned int nvpe = 1; -#ifdef CONFIG_MIPS_MT_SMP - unsigned int mvpconf0 = read_c0_mvpconf0(); - - nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; -#endif - smp_num_siblings = nvpe; - } - pr_info("Detected %i available secondary CPU(s)\n", ncpu); -} - -void __init cmp_prepare_cpus(unsigned int max_cpus) -{ - pr_debug("SMPCMP: CPU%d: %s max_cpus=%d\n", - smp_processor_id(), __func__, max_cpus); - -#ifdef CONFIG_MIPS_MT - /* - * FIXME: some of these options are per-system, some per-core and - * some per-cpu - */ - mips_mt_set_cpuoptions(); -#endif - -} - -const struct plat_smp_ops cmp_smp_ops = { - .send_ipi_single = mips_smp_send_ipi_single, - .send_ipi_mask = mips_smp_send_ipi_mask, - .init_secondary = cmp_init_secondary, - .smp_finish = cmp_smp_finish, - .boot_secondary = cmp_boot_secondary, - .smp_setup = cmp_smp_setup, - .prepare_cpus = cmp_prepare_cpus, -}; diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index bcd6a944b839..22d4f9ff3ae2 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c @@ -7,6 +7,7 @@ #include <linux/cpu.h> #include <linux/delay.h> #include <linux/io.h> +#include <linux/memblock.h> #include <linux/sched/task_stack.h> #include <linux/sched/hotplug.h> #include <linux/slab.h> @@ -20,34 +21,196 @@ #include <asm/mipsregs.h> #include <asm/pm-cps.h> #include <asm/r4kcache.h> +#include <asm/regdef.h> +#include <asm/smp.h> #include <asm/smp-cps.h> #include <asm/time.h> #include <asm/uasm.h> -static bool threads_disabled; -static DECLARE_BITMAP(core_power, NR_CPUS); +#define BEV_VEC_SIZE 0x500 +#define BEV_VEC_ALIGN 0x1000 -struct core_boot_config *mips_cps_core_bootcfg; +enum label_id { + label_not_nmi = 1, +}; + +UASM_L_LA(_not_nmi) + +static u64 core_entry_reg; +static phys_addr_t cps_vec_pa; -static int __init setup_nothreads(char *s) +struct cluster_boot_config *mips_cps_cluster_bootcfg; + +static void power_up_other_cluster(unsigned int cluster) { - threads_disabled = true; + u32 stat, seq_state; + unsigned int timeout; + + mips_cm_lock_other(cluster, CM_GCR_Cx_OTHER_CORE_CM, 0, + CM_GCR_Cx_OTHER_BLOCK_LOCAL); + stat = read_cpc_co_stat_conf(); + mips_cm_unlock_other(); + + seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE; + seq_state >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE); + if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U5) + return; + + /* Set endianness & power up the CM */ + mips_cm_lock_other(cluster, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL); + write_cpc_redir_sys_config(IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)); + write_cpc_redir_pwrup_ctl(1); + mips_cm_unlock_other(); + + /* Wait for the CM to start up */ + timeout = 1000; + mips_cm_lock_other(cluster, CM_GCR_Cx_OTHER_CORE_CM, 0, + CM_GCR_Cx_OTHER_BLOCK_LOCAL); + while (1) { + stat = read_cpc_co_stat_conf(); + seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE; + seq_state >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE); + if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U5) + break; + + if (timeout) { + mdelay(1); + timeout--; + } else { + pr_warn("Waiting for cluster %u CM to power up... STAT_CONF=0x%x\n", + cluster, stat); + mdelay(1000); + } + } + + mips_cm_unlock_other(); +} + +static unsigned __init core_vpe_count(unsigned int cluster, unsigned core) +{ + return min(smp_max_threads, mips_cps_numvps(cluster, core)); +} + +static void __init *mips_cps_build_core_entry(void *addr) +{ + extern void (*nmi_handler)(void); + u32 *p = addr; + u32 val; + struct uasm_label labels[2]; + struct uasm_reloc relocs[2]; + struct uasm_label *l = labels; + struct uasm_reloc *r = relocs; + + memset(labels, 0, sizeof(labels)); + memset(relocs, 0, sizeof(relocs)); + + uasm_i_mfc0(&p, GPR_K0, C0_STATUS); + UASM_i_LA(&p, GPR_T9, ST0_NMI); + uasm_i_and(&p, GPR_K0, GPR_K0, GPR_T9); + + uasm_il_bnez(&p, &r, GPR_K0, label_not_nmi); + uasm_i_nop(&p); + UASM_i_LA(&p, GPR_K0, (long)&nmi_handler); + + uasm_l_not_nmi(&l, p); + + val = CAUSEF_IV; + uasm_i_lui(&p, GPR_K0, val >> 16); + uasm_i_ori(&p, GPR_K0, GPR_K0, val & 0xffff); + uasm_i_mtc0(&p, GPR_K0, C0_CAUSE); + val = ST0_CU1 | ST0_CU0 | ST0_BEV | ST0_KX_IF_64; + uasm_i_lui(&p, GPR_K0, val >> 16); + uasm_i_ori(&p, GPR_K0, GPR_K0, val & 0xffff); + uasm_i_mtc0(&p, GPR_K0, C0_STATUS); + uasm_i_ehb(&p); + uasm_i_ori(&p, GPR_A0, 0, read_c0_config() & CONF_CM_CMASK); + UASM_i_LA(&p, GPR_A1, (long)mips_gcr_base); +#if defined(KBUILD_64BIT_SYM32) || defined(CONFIG_32BIT) + UASM_i_LA(&p, GPR_T9, CKSEG1ADDR(__pa_symbol(mips_cps_core_boot))); +#else + UASM_i_LA(&p, GPR_T9, TO_UNCAC(__pa_symbol(mips_cps_core_boot))); +#endif + uasm_i_jr(&p, GPR_T9); + uasm_i_nop(&p); + + uasm_resolve_relocs(relocs, labels); + + return p; +} + +static bool __init check_64bit_reset(void) +{ + bool cx_64bit_reset = false; + + mips_cm_lock_other(0, 0, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL); + write_gcr_co_reset64_base(CM_GCR_Cx_RESET64_BASE_BEVEXCBASE); + if ((read_gcr_co_reset64_base() & CM_GCR_Cx_RESET64_BASE_BEVEXCBASE) == + CM_GCR_Cx_RESET64_BASE_BEVEXCBASE) + cx_64bit_reset = true; + mips_cm_unlock_other(); + + return cx_64bit_reset; +} + +static int __init allocate_cps_vecs(void) +{ + /* Try to allocate in KSEG1 first */ + cps_vec_pa = memblock_phys_alloc_range(BEV_VEC_SIZE, BEV_VEC_ALIGN, + 0x0, CSEGX_SIZE - 1); + + if (cps_vec_pa) + core_entry_reg = CKSEG1ADDR(cps_vec_pa) & + CM_GCR_Cx_RESET_BASE_BEVEXCBASE; + + if (!cps_vec_pa && mips_cm_is64) { + phys_addr_t end; + + if (check_64bit_reset()) { + pr_info("VP Local Reset Exception Base support 47 bits address\n"); + end = MEMBLOCK_ALLOC_ANYWHERE; + } else { + end = SZ_4G - 1; + } + cps_vec_pa = memblock_phys_alloc_range(BEV_VEC_SIZE, BEV_VEC_ALIGN, 0, end); + if (cps_vec_pa) { + if (check_64bit_reset()) + core_entry_reg = (cps_vec_pa & CM_GCR_Cx_RESET64_BASE_BEVEXCBASE) | + CM_GCR_Cx_RESET_BASE_MODE; + else + core_entry_reg = (cps_vec_pa & CM_GCR_Cx_RESET_BASE_BEVEXCBASE) | + CM_GCR_Cx_RESET_BASE_MODE; + } + } + + if (!cps_vec_pa) + return -ENOMEM; + return 0; } -early_param("nothreads", setup_nothreads); -static unsigned core_vpe_count(unsigned int cluster, unsigned core) +static void __init setup_cps_vecs(void) { - if (threads_disabled) - return 1; + void *cps_vec; + + cps_vec = (void *)CKSEG1ADDR_OR_64BIT(cps_vec_pa); + mips_cps_build_core_entry(cps_vec); - return mips_cps_numvps(cluster, core); + memcpy(cps_vec + 0x200, &excep_tlbfill, 0x80); + memcpy(cps_vec + 0x280, &excep_xtlbfill, 0x80); + memcpy(cps_vec + 0x300, &excep_cache, 0x80); + memcpy(cps_vec + 0x380, &excep_genex, 0x80); + memcpy(cps_vec + 0x400, &excep_intex, 0x80); + memcpy(cps_vec + 0x480, &excep_ejtag, 0x80); + + /* Make sure no prefetched data in cache */ + blast_inv_dcache_range(CKSEG0ADDR_OR_64BIT(cps_vec_pa), CKSEG0ADDR_OR_64BIT(cps_vec_pa) + BEV_VEC_SIZE); + bc_inv(CKSEG0ADDR_OR_64BIT(cps_vec_pa), BEV_VEC_SIZE); + __sync(); } static void __init cps_smp_setup(void) { unsigned int nclusters, ncores, nvpes, core_vpes; - unsigned long core_entry; int cl, c, v; /* Detect & record VPE topology */ @@ -59,6 +222,9 @@ static void __init cps_smp_setup(void) pr_cont(","); pr_cont("{"); + if (mips_cm_revision() >= CM_REV_CM3_5) + power_up_other_cluster(cl); + ncores = mips_cps_numcores(cl); for (c = 0; c < ncores; c++) { core_vpes = core_vpe_count(cl, c); @@ -70,6 +236,7 @@ static void __init cps_smp_setup(void) /* Use the number of VPEs in cluster 0 core 0 for smp_num_siblings */ if (!cl && !c) smp_num_siblings = core_vpes; + cpumask_set_cpu(nvpes, &__cpu_primary_thread_mask); for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) { cpu_set_cluster(&cpu_data[nvpes + v], cl); @@ -86,8 +253,8 @@ static void __init cps_smp_setup(void) /* Indicate present CPUs (CPU being synonymous with VPE) */ for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) { - set_cpu_possible(v, cpu_cluster(&cpu_data[v]) == 0); - set_cpu_present(v, cpu_cluster(&cpu_data[v]) == 0); + set_cpu_possible(v, true); + set_cpu_present(v, true); __cpu_number_map[v] = v; __cpu_logical_map[v] = v; } @@ -95,19 +262,17 @@ static void __init cps_smp_setup(void) /* Set a coherent default CCA (CWB) */ change_c0_config(CONF_CM_CMASK, 0x5); - /* Core 0 is powered up (we're running on it) */ - bitmap_set(core_power, 0, 1); - /* Initialise core 0 */ mips_cps_core_init(); /* Make core 0 coherent with everything */ write_gcr_cl_coherence(0xff); - if (mips_cm_revision() >= CM_REV_CM3) { - core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry); - write_gcr_bev_base(core_entry); - } + if (allocate_cps_vecs()) + pr_err("Failed to allocate CPS vectors\n"); + + if (core_entry_reg && mips_cm_revision() >= CM_REV_CM3) + write_gcr_bev_base(core_entry_reg); #ifdef CONFIG_MIPS_MT_FPAFF /* If we have an FPU, enroll ourselves in the FPU-full mask */ @@ -116,14 +281,31 @@ static void __init cps_smp_setup(void) #endif /* CONFIG_MIPS_MT_FPAFF */ } +unsigned long calibrate_delay_is_known(void) +{ + int first_cpu_cluster = 0; + + /* The calibration has to be done on the primary CPU of the cluster */ + if (mips_cps_first_online_in_cluster(&first_cpu_cluster)) + return 0; + + return cpu_data[first_cpu_cluster].udelay_val; +} + static void __init cps_prepare_cpus(unsigned int max_cpus) { - unsigned ncores, core_vpes, c, cca; + unsigned int nclusters, ncores, core_vpes, nvpe = 0, c, cl, cca; bool cca_unsuitable, cores_limited; - u32 *entry_code; + struct cluster_boot_config *cluster_bootcfg; + struct core_boot_config *core_bootcfg; mips_mt_set_cpuoptions(); + if (!core_entry_reg) { + pr_err("core_entry address unsuitable, disabling smp-cps\n"); + goto err_out; + } + /* Detect whether the CCA is unsuited to multi-core SMP */ cca = read_c0_config() & CONF_CM_CMASK; switch (cca) { @@ -155,53 +337,72 @@ static void __init cps_prepare_cpus(unsigned int max_cpus) (cca_unsuitable && cpu_has_dc_aliases) ? " & " : "", cpu_has_dc_aliases ? "dcache aliasing" : ""); - /* - * Patch the start of mips_cps_core_entry to provide: - * - * s0 = kseg0 CCA - */ - entry_code = (u32 *)&mips_cps_core_entry; - uasm_i_addiu(&entry_code, 16, 0, cca); - blast_dcache_range((unsigned long)&mips_cps_core_entry, - (unsigned long)entry_code); - bc_wback_inv((unsigned long)&mips_cps_core_entry, - (void *)entry_code - (void *)&mips_cps_core_entry); - __sync(); + setup_cps_vecs(); - /* Allocate core boot configuration structs */ - ncores = mips_cps_numcores(0); - mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg), - GFP_KERNEL); - if (!mips_cps_core_bootcfg) { - pr_err("Failed to allocate boot config for %u cores\n", ncores); + /* Allocate cluster boot configuration structs */ + nclusters = mips_cps_numclusters(); + mips_cps_cluster_bootcfg = kcalloc(nclusters, + sizeof(*mips_cps_cluster_bootcfg), + GFP_KERNEL); + if (!mips_cps_cluster_bootcfg) goto err_out; - } - /* Allocate VPE boot configuration structs */ - for (c = 0; c < ncores; c++) { - core_vpes = core_vpe_count(0, c); - mips_cps_core_bootcfg[c].vpe_config = kcalloc(core_vpes, - sizeof(*mips_cps_core_bootcfg[c].vpe_config), + if (nclusters > 1) + mips_cm_update_property(); + + for (cl = 0; cl < nclusters; cl++) { + /* Allocate core boot configuration structs */ + ncores = mips_cps_numcores(cl); + core_bootcfg = kcalloc(ncores, sizeof(*core_bootcfg), + GFP_KERNEL); + if (!core_bootcfg) + goto err_out; + mips_cps_cluster_bootcfg[cl].core_config = core_bootcfg; + + mips_cps_cluster_bootcfg[cl].core_power = + kcalloc(BITS_TO_LONGS(ncores), sizeof(unsigned long), GFP_KERNEL); - if (!mips_cps_core_bootcfg[c].vpe_config) { - pr_err("Failed to allocate %u VPE boot configs\n", - core_vpes); + if (!mips_cps_cluster_bootcfg[cl].core_power) goto err_out; + + /* Allocate VPE boot configuration structs */ + for (c = 0; c < ncores; c++) { + int v; + core_vpes = core_vpe_count(cl, c); + core_bootcfg[c].vpe_config = kcalloc(core_vpes, + sizeof(*core_bootcfg[c].vpe_config), + GFP_KERNEL); + for (v = 0; v < core_vpes; v++) + cpumask_set_cpu(nvpe++, &mips_cps_cluster_bootcfg[cl].cpumask); + if (!core_bootcfg[c].vpe_config) + goto err_out; } } - /* Mark this CPU as booted */ - atomic_set(&mips_cps_core_bootcfg[cpu_core(¤t_cpu_data)].vpe_mask, - 1 << cpu_vpe_id(¤t_cpu_data)); + /* Mark this CPU as powered up & booted */ + cl = cpu_cluster(¤t_cpu_data); + c = cpu_core(¤t_cpu_data); + cluster_bootcfg = &mips_cps_cluster_bootcfg[cl]; + cpu_smt_set_num_threads(core_vpes, core_vpes); + core_bootcfg = &cluster_bootcfg->core_config[c]; + bitmap_set(cluster_bootcfg->core_power, cpu_core(¤t_cpu_data), 1); + atomic_set(&core_bootcfg->vpe_mask, 1 << cpu_vpe_id(¤t_cpu_data)); return; err_out: /* Clean up allocations */ - if (mips_cps_core_bootcfg) { - for (c = 0; c < ncores; c++) - kfree(mips_cps_core_bootcfg[c].vpe_config); - kfree(mips_cps_core_bootcfg); - mips_cps_core_bootcfg = NULL; + if (mips_cps_cluster_bootcfg) { + for (cl = 0; cl < nclusters; cl++) { + cluster_bootcfg = &mips_cps_cluster_bootcfg[cl]; + ncores = mips_cps_numcores(cl); + for (c = 0; c < ncores; c++) { + core_bootcfg = &cluster_bootcfg->core_config[c]; + kfree(core_bootcfg->vpe_config); + } + kfree(mips_cps_cluster_bootcfg[c].core_config); + } + kfree(mips_cps_cluster_bootcfg); + mips_cps_cluster_bootcfg = NULL; } /* Effectively disable SMP by declaring CPUs not present */ @@ -212,16 +413,124 @@ err_out: } } -static void boot_core(unsigned int core, unsigned int vpe_id) +static void init_cluster_l2(void) { - u32 stat, seq_state; - unsigned timeout; + u32 l2_cfg, l2sm_cop, result; + + while (!mips_cm_is_l2_hci_broken) { + l2_cfg = read_gcr_redir_l2_ram_config(); + + /* If HCI is not supported, use the state machine below */ + if (!(l2_cfg & CM_GCR_L2_RAM_CONFIG_PRESENT)) + break; + if (!(l2_cfg & CM_GCR_L2_RAM_CONFIG_HCI_SUPPORTED)) + break; + + /* If the HCI_DONE bit is set, we're finished */ + if (l2_cfg & CM_GCR_L2_RAM_CONFIG_HCI_DONE) + return; + } + + l2sm_cop = read_gcr_redir_l2sm_cop(); + if (WARN(!(l2sm_cop & CM_GCR_L2SM_COP_PRESENT), + "L2 init not supported on this system yet")) + return; + + /* Clear L2 tag registers */ + write_gcr_redir_l2_tag_state(0); + write_gcr_redir_l2_ecc(0); + + /* Ensure the L2 tag writes complete before the state machine starts */ + mb(); + + /* Wait for the L2 state machine to be idle */ + do { + l2sm_cop = read_gcr_redir_l2sm_cop(); + } while (l2sm_cop & CM_GCR_L2SM_COP_RUNNING); + + /* Start a store tag operation */ + l2sm_cop = CM_GCR_L2SM_COP_TYPE_IDX_STORETAG; + l2sm_cop <<= __ffs(CM_GCR_L2SM_COP_TYPE); + l2sm_cop |= CM_GCR_L2SM_COP_CMD_START; + write_gcr_redir_l2sm_cop(l2sm_cop); + + /* Ensure the state machine starts before we poll for completion */ + mb(); + + /* Wait for the operation to be complete */ + do { + l2sm_cop = read_gcr_redir_l2sm_cop(); + result = l2sm_cop & CM_GCR_L2SM_COP_RESULT; + result >>= __ffs(CM_GCR_L2SM_COP_RESULT); + } while (!result); + + WARN(result != CM_GCR_L2SM_COP_RESULT_DONE_OK, + "L2 state machine failed cache init with error %u\n", result); +} + +static void boot_core(unsigned int cluster, unsigned int core, + unsigned int vpe_id) +{ + struct cluster_boot_config *cluster_cfg; + u32 access, stat, seq_state; + unsigned int timeout, ncores; + + cluster_cfg = &mips_cps_cluster_bootcfg[cluster]; + ncores = mips_cps_numcores(cluster); + + if ((cluster != cpu_cluster(¤t_cpu_data)) && + bitmap_empty(cluster_cfg->core_power, ncores)) { + power_up_other_cluster(cluster); + + mips_cm_lock_other(cluster, core, 0, + CM_GCR_Cx_OTHER_BLOCK_GLOBAL); + + /* Ensure cluster GCRs are where we expect */ + write_gcr_redir_base(read_gcr_base()); + write_gcr_redir_cpc_base(read_gcr_cpc_base()); + write_gcr_redir_gic_base(read_gcr_gic_base()); + + init_cluster_l2(); + + /* Mirror L2 configuration */ + write_gcr_redir_l2_only_sync_base(read_gcr_l2_only_sync_base()); + write_gcr_redir_l2_pft_control(read_gcr_l2_pft_control()); + write_gcr_redir_l2_pft_control_b(read_gcr_l2_pft_control_b()); + + /* Mirror ECC/parity setup */ + write_gcr_redir_err_control(read_gcr_err_control()); + + /* Set BEV base */ + write_gcr_redir_bev_base(core_entry_reg); + + mips_cm_unlock_other(); + } + + if (cluster != cpu_cluster(¤t_cpu_data)) { + mips_cm_lock_other(cluster, core, 0, + CM_GCR_Cx_OTHER_BLOCK_GLOBAL); + + /* Ensure the core can access the GCRs */ + access = read_gcr_redir_access(); + access |= BIT(core); + write_gcr_redir_access(access); + + mips_cm_unlock_other(); + } else { + /* Ensure the core can access the GCRs */ + access = read_gcr_access(); + access |= BIT(core); + write_gcr_access(access); + } /* Select the appropriate core */ - mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL); + mips_cm_lock_other(cluster, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL); /* Set its reset vector */ - write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry)); + if (mips_cm_is64) + write_gcr_co_reset64_base(core_entry_reg); + else + write_gcr_co_reset_base(core_entry_reg); /* Ensure its coherency is disabled */ write_gcr_co_coherence(0); @@ -230,7 +539,10 @@ static void boot_core(unsigned int core, unsigned int vpe_id) write_gcr_co_reset_ext_base(CM_GCR_Cx_RESET_EXT_BASE_UEB); /* Ensure the core can access the GCRs */ - set_gcr_access(1 << core); + if (mips_cm_revision() < CM_REV_CM3) + set_gcr_access(1 << core); + else + set_gcr_access_cm3(1 << core); if (mips_cpc_present()) { /* Reset the core */ @@ -281,31 +593,42 @@ static void boot_core(unsigned int core, unsigned int vpe_id) mips_cm_unlock_other(); /* The core is now powered up */ - bitmap_set(core_power, core, 1); + bitmap_set(cluster_cfg->core_power, core, 1); + + /* + * Restore CM_PWRUP=0 so that the CM can power down if all the cores in + * the cluster do (eg. if they're all removed via hotplug. + */ + if (mips_cm_revision() >= CM_REV_CM3_5) { + mips_cm_lock_other(cluster, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL); + write_cpc_redir_pwrup_ctl(0); + mips_cm_unlock_other(); + } } static void remote_vpe_boot(void *dummy) { + unsigned int cluster = cpu_cluster(¤t_cpu_data); unsigned core = cpu_core(¤t_cpu_data); - struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core]; + struct cluster_boot_config *cluster_cfg = + &mips_cps_cluster_bootcfg[cluster]; + struct core_boot_config *core_cfg = &cluster_cfg->core_config[core]; mips_cps_boot_vpes(core_cfg, cpu_vpe_id(¤t_cpu_data)); } static int cps_boot_secondary(int cpu, struct task_struct *idle) { + unsigned int cluster = cpu_cluster(&cpu_data[cpu]); unsigned core = cpu_core(&cpu_data[cpu]); unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]); - struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core]; + struct cluster_boot_config *cluster_cfg = + &mips_cps_cluster_bootcfg[cluster]; + struct core_boot_config *core_cfg = &cluster_cfg->core_config[core]; struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id]; - unsigned long core_entry; unsigned int remote; int err; - /* We don't yet support booting CPUs in other clusters */ - if (cpu_cluster(&cpu_data[cpu]) != cpu_cluster(&raw_current_cpu_data)) - return -ENOSYS; - vpe_cfg->pc = (unsigned long)&smp_bootstrap; vpe_cfg->sp = __KSTK_TOS(idle); vpe_cfg->gp = (unsigned long)task_thread_info(idle); @@ -314,16 +637,19 @@ static int cps_boot_secondary(int cpu, struct task_struct *idle) preempt_disable(); - if (!test_bit(core, core_power)) { + if (!test_bit(core, cluster_cfg->core_power)) { /* Boot a VPE on a powered down core */ - boot_core(core, vpe_id); + boot_core(cluster, core, vpe_id); goto out; } if (cpu_has_vp) { - mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL); - core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry); - write_gcr_co_reset_base(core_entry); + mips_cm_lock_other(cluster, core, vpe_id, + CM_GCR_Cx_OTHER_BLOCK_LOCAL); + if (mips_cm_is64) + write_gcr_co_reset64_base(core_entry_reg); + else + write_gcr_co_reset_base(core_entry_reg); mips_cm_unlock_other(); } @@ -359,6 +685,8 @@ out: static void cps_init_secondary(void) { + int core = cpu_core(¤t_cpu_data); + /* Disable MT - we only want to run 1 TC per VPE */ if (cpu_has_mipsmt) dmt(); @@ -374,6 +702,9 @@ static void cps_init_secondary(void) BUG_ON(ident != mips_cm_vp_id(smp_processor_id())); } + if (core > 0 && !read_gcr_cl_coherence()) + pr_warn("Core %u is not in coherent domain\n", core); + if (cpu_has_veic) clear_c0_status(ST0_IM); else @@ -395,7 +726,7 @@ static void cps_smp_finish(void) local_irq_enable(); } -#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_KEXEC) +#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_KEXEC_CORE) enum cpu_death { CPU_DEATH_HALT, @@ -424,13 +755,15 @@ static void cps_shutdown_this_cpu(enum cpu_death death) wmb(); } } else { - pr_debug("Gating power to core %d\n", core); - /* Power down the core */ - cps_pm_enter_state(CPS_PM_POWER_GATED); + if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) { + pr_debug("Gating power to core %d\n", core); + /* Power down the core */ + cps_pm_enter_state(CPS_PM_POWER_GATED); + } } } -#ifdef CONFIG_KEXEC +#ifdef CONFIG_KEXEC_CORE static void cps_kexec_nonboot_cpu(void) { @@ -440,21 +773,23 @@ static void cps_kexec_nonboot_cpu(void) cps_shutdown_this_cpu(CPU_DEATH_POWER); } -#endif /* CONFIG_KEXEC */ +#endif /* CONFIG_KEXEC_CORE */ -#endif /* CONFIG_HOTPLUG_CPU || CONFIG_KEXEC */ +#endif /* CONFIG_HOTPLUG_CPU || CONFIG_KEXEC_CORE */ #ifdef CONFIG_HOTPLUG_CPU static int cps_cpu_disable(void) { unsigned cpu = smp_processor_id(); + struct cluster_boot_config *cluster_cfg; struct core_boot_config *core_cfg; if (!cps_pm_support_state(CPS_PM_POWER_GATED)) return -EINVAL; - core_cfg = &mips_cps_core_bootcfg[cpu_core(¤t_cpu_data)]; + cluster_cfg = &mips_cps_cluster_bootcfg[cpu_cluster(¤t_cpu_data)]; + core_cfg = &cluster_cfg->core_config[cpu_core(¤t_cpu_data)]; atomic_sub(1 << cpu_vpe_id(¤t_cpu_data), &core_cfg->vpe_mask); smp_mb__after_atomic(); set_cpu_online(cpu, false); @@ -493,8 +828,7 @@ void play_dead(void) } } - /* This CPU has chosen its way out */ - (void)cpu_report_death(); + cpuhp_ap_report_dead(); cps_shutdown_this_cpu(cpu_death); @@ -517,19 +851,19 @@ static void wait_for_sibling_halt(void *ptr_cpu) } while (!(halted & TCHALT_H)); } -static void cps_cpu_die(unsigned int cpu) +static void cps_cpu_die(unsigned int cpu) { } + +static void cps_cleanup_dead_cpu(unsigned cpu) { + unsigned int cluster = cpu_cluster(&cpu_data[cpu]); unsigned core = cpu_core(&cpu_data[cpu]); unsigned int vpe_id = cpu_vpe_id(&cpu_data[cpu]); ktime_t fail_time; unsigned stat; int err; + struct cluster_boot_config *cluster_cfg; - /* Wait for the cpu to choose its way out */ - if (!cpu_wait_death(cpu, 5)) { - pr_err("CPU%u: didn't offline\n", cpu); - return; - } + cluster_cfg = &mips_cps_cluster_bootcfg[cluster]; /* * Now wait for the CPU to actually offline. Without doing this that @@ -581,7 +915,7 @@ static void cps_cpu_die(unsigned int cpu) } while (1); /* Indicate the core is powered off */ - bitmap_clear(core_power, core, 1); + bitmap_clear(cluster_cfg->core_power, core, 1); } else if (cpu_has_mipsmt) { /* * Have a CPU with access to the offlined CPUs registers wait @@ -614,8 +948,9 @@ static const struct plat_smp_ops cps_smp_ops = { #ifdef CONFIG_HOTPLUG_CPU .cpu_disable = cps_cpu_disable, .cpu_die = cps_cpu_die, + .cleanup_dead_cpu = cps_cleanup_dead_cpu, #endif -#ifdef CONFIG_KEXEC +#ifdef CONFIG_KEXEC_CORE .kexec_nonboot_cpu = cps_kexec_nonboot_cpu, #endif }; diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c index 5f04a0141068..7729cc733421 100644 --- a/arch/mips/kernel/smp-mt.c +++ b/arch/mips/kernel/smp-mt.c @@ -46,7 +46,8 @@ static void __init smvp_copy_vpe_config(void) static unsigned int __init smvp_vpe_init(unsigned int tc, unsigned int mvpconf0, unsigned int ncpu) { - if (tc > ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)) + if (tc >= smp_max_threads || + (tc > ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT))) return ncpu; /* Deactivate all but VPE 0 */ diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 1d93b85271ba..4868e79f3b30 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -10,6 +10,7 @@ #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> +#include <linux/profile.h> #include <linux/smp.h> #include <linux/spinlock.h> #include <linux/threads.h> @@ -55,8 +56,10 @@ EXPORT_SYMBOL(cpu_sibling_map); cpumask_t cpu_core_map[NR_CPUS] __read_mostly; EXPORT_SYMBOL(cpu_core_map); +#ifndef CONFIG_HOTPLUG_PARALLEL static DECLARE_COMPLETION(cpu_starting); static DECLARE_COMPLETION(cpu_running); +#endif /* * A logical cpu mask containing only one VPE per core to @@ -73,6 +76,26 @@ static cpumask_t cpu_core_setup_map; cpumask_t cpu_coherent_mask; +struct cpumask __cpu_primary_thread_mask __read_mostly; + +unsigned int smp_max_threads __initdata = UINT_MAX; + +static int __init early_nosmt(char *s) +{ + smp_max_threads = 1; + return 0; +} +early_param("nosmt", early_nosmt); + +static int __init early_smt(char *s) +{ + get_option(&s, &smp_max_threads); + /* Ensure at least one thread is available */ + smp_max_threads = clamp_val(smp_max_threads, 1U, UINT_MAX); + return 0; +} +early_param("smt", early_smt); + #ifdef CONFIG_GENERIC_IRQ_IPI static struct irq_desc *call_desc; static struct irq_desc *sched_desc; @@ -333,10 +356,11 @@ early_initcall(mips_smp_ipi_init); */ asmlinkage void start_secondary(void) { - unsigned int cpu; + unsigned int cpu = raw_smp_processor_id(); cpu_probe(); per_cpu_trap_init(false); + rcutree_report_cpu_starting(cpu); mips_clockevent_init(); mp_ops->init_secondary(); cpu_report(); @@ -347,8 +371,10 @@ asmlinkage void start_secondary(void) * to an option instead of something based on .cputype */ +#ifdef CONFIG_HOTPLUG_PARALLEL + cpuhp_ap_sync_alive(); +#endif calibrate_delay(); - cpu = smp_processor_id(); cpu_data[cpu].udelay_val = loops_per_jiffy; set_cpu_sibling_map(cpu); @@ -357,8 +383,10 @@ asmlinkage void start_secondary(void) cpumask_set_cpu(cpu, &cpu_coherent_mask); notify_cpu_starting(cpu); +#ifndef CONFIG_HOTPLUG_PARALLEL /* Notify boot CPU that we're starting & ready to sync counters */ complete(&cpu_starting); +#endif synchronise_count_slave(cpu); @@ -367,11 +395,13 @@ asmlinkage void start_secondary(void) calculate_cpu_foreign_map(); +#ifndef CONFIG_HOTPLUG_PARALLEL /* * Notify boot CPU that we're up & online and it can safely return * from __cpu_up */ complete(&cpu_running); +#endif /* * irq will be enabled in ->smp_finish(), enabling it too early @@ -420,7 +450,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) } /* preload SMP state for boot cpu */ -void smp_prepare_boot_cpu(void) +void __init smp_prepare_boot_cpu(void) { if (mp_ops->prepare_boot_cpu) mp_ops->prepare_boot_cpu(); @@ -428,6 +458,12 @@ void smp_prepare_boot_cpu(void) set_cpu_online(0, true); } +#ifdef CONFIG_HOTPLUG_PARALLEL +int arch_cpuhp_kick_ap_alive(unsigned int cpu, struct task_struct *tidle) +{ + return mp_ops->boot_secondary(cpu, tidle); +} +#else int __cpu_up(unsigned int cpu, struct task_struct *tidle) { int err; @@ -443,18 +479,19 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) return -EIO; } - synchronise_count_master(cpu); - /* Wait for CPU to finish startup & mark itself online before return */ wait_for_completion(&cpu_running); return 0; } +#endif +#ifdef CONFIG_PROFILING /* Not really SMP stuff ... */ int setup_profiling_timer(unsigned int multiplier) { return 0; } +#endif static void flush_tlb_all_ipi(void *info) { @@ -690,6 +727,14 @@ void flush_tlb_one(unsigned long vaddr) EXPORT_SYMBOL(flush_tlb_page); EXPORT_SYMBOL(flush_tlb_one); +#ifdef CONFIG_HOTPLUG_CORE_SYNC_DEAD +void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu) +{ + if (mp_ops->cleanup_dead_cpu) + mp_ops->cleanup_dead_cpu(cpu); +} +#endif + #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST static void tick_broadcast_callee(void *info) diff --git a/arch/mips/kernel/spram.c b/arch/mips/kernel/spram.c index d5d96214cce5..dd31e3fffd24 100644 --- a/arch/mips/kernel/spram.c +++ b/arch/mips/kernel/spram.c @@ -12,6 +12,7 @@ #include <asm/mipsregs.h> #include <asm/r4kcache.h> #include <asm/hazards.h> +#include <asm/spram.h> /* * These definitions are correct for the 24K/34K/74K SPRAM sample @@ -25,10 +26,6 @@ #define ERRCTL_SPRAM (1 << 28) -/* errctl access */ -#define read_c0_errctl(x) read_c0_ecc(x) -#define write_c0_errctl(x) write_c0_ecc(x) - /* * Different semantics to the set_c0_* function built by __BUILD_SET_C0 */ diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c index abdd7aaa3311..39156592582e 100644 --- a/arch/mips/kernel/sync-r4k.c +++ b/arch/mips/kernel/sync-r4k.c @@ -2,121 +2,244 @@ /* * Count register synchronisation. * - * All CPUs will have their count registers synchronised to the CPU0 next time - * value. This can cause a small timewarp for CPU0. All other CPU's should - * not have done anything significant (but they may have had interrupts - * enabled briefly - prom_smp_finish() should not be responsible for enabling - * interrupts...) + * Derived from arch/x86/kernel/tsc_sync.c + * Copyright (C) 2006, Red Hat, Inc., Ingo Molnar */ #include <linux/kernel.h> #include <linux/irqflags.h> #include <linux/cpumask.h> +#include <linux/atomic.h> +#include <linux/nmi.h> +#include <linux/smp.h> +#include <linux/spinlock.h> #include <asm/r4k-timer.h> -#include <linux/atomic.h> -#include <asm/barrier.h> #include <asm/mipsregs.h> +#include <asm/time.h> -static unsigned int initcount = 0; -static atomic_t count_count_start = ATOMIC_INIT(0); -static atomic_t count_count_stop = ATOMIC_INIT(0); - -#define COUNTON 100 -#define NR_LOOPS 3 - -void synchronise_count_master(int cpu) -{ - int i; - unsigned long flags; - - pr_info("Synchronize counters for CPU %u: ", cpu); +#define COUNTON 100 +#define NR_LOOPS 3 +#define LOOP_TIMEOUT 20 - local_irq_save(flags); +/* + * Entry/exit counters that make sure that both CPUs + * run the measurement code at once: + */ +static atomic_t start_count; +static atomic_t stop_count; +static atomic_t test_runs; - /* - * We loop a few times to get a primed instruction cache, - * then the last pass is more or less synchronised and - * the master and slaves each set their cycle counters to a known - * value all at once. This reduces the chance of having random offsets - * between the processors, and guarantees that the maximum - * delay between the cycle counters is never bigger than - * the latency of information-passing (cachelines) between - * two CPUs. - */ +/* + * We use a raw spinlock in this exceptional case, because + * we want to have the fastest, inlined, non-debug version + * of a critical section, to be able to prove counter time-warps: + */ +static arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED; - for (i = 0; i < NR_LOOPS; i++) { - /* slaves loop on '!= 2' */ - while (atomic_read(&count_count_start) != 1) - mb(); - atomic_set(&count_count_stop, 0); - smp_wmb(); +static uint32_t last_counter; +static uint32_t max_warp; +static int nr_warps; +static int random_warps; - /* Let the slave writes its count register */ - atomic_inc(&count_count_start); +/* + * Counter warp measurement loop running on both CPUs. + */ +static uint32_t check_counter_warp(void) +{ + uint32_t start, now, prev, end, cur_max_warp = 0; + int i, cur_warps = 0; - /* Count will be initialised to current timer */ - if (i == 1) - initcount = read_c0_count(); + start = read_c0_count(); + end = start + (uint32_t) mips_hpt_frequency / 1000 * LOOP_TIMEOUT; + for (i = 0; ; i++) { /* - * Everyone initialises count in the last loop: + * We take the global lock, measure counter, save the + * previous counter that was measured (possibly on + * another CPU) and update the previous counter timestamp. */ - if (i == NR_LOOPS-1) - write_c0_count(initcount); + arch_spin_lock(&sync_lock); + prev = last_counter; + now = read_c0_count(); + last_counter = now; + arch_spin_unlock(&sync_lock); /* - * Wait for slave to leave the synchronization point: + * Be nice every now and then (and also check whether + * measurement is done [we also insert a 10 million + * loops safety exit, so we dont lock up in case the + * counter is totally broken]): */ - while (atomic_read(&count_count_stop) != 1) - mb(); - atomic_set(&count_count_start, 0); - smp_wmb(); - atomic_inc(&count_count_stop); + if (unlikely(!(i & 7))) { + if (now > end || i > 10000000) + break; + cpu_relax(); + touch_nmi_watchdog(); + } + /* + * Outside the critical section we can now see whether + * we saw a time-warp of the counter going backwards: + */ + if (unlikely(prev > now)) { + arch_spin_lock(&sync_lock); + max_warp = max(max_warp, prev - now); + cur_max_warp = max_warp; + /* + * Check whether this bounces back and forth. Only + * one CPU should observe time going backwards. + */ + if (cur_warps != nr_warps) + random_warps++; + nr_warps++; + cur_warps = nr_warps; + arch_spin_unlock(&sync_lock); + } + } + WARN(!(now-start), + "Warning: zero counter calibration delta: %d [max: %d]\n", + now-start, end-start); + return cur_max_warp; +} + +/* + * The freshly booted CPU initiates this via an async SMP function call. + */ +static void check_counter_sync_source(void *__cpu) +{ + unsigned int cpu = (unsigned long)__cpu; + int cpus = 2; + + atomic_set(&test_runs, NR_LOOPS); +retry: + /* Wait for the target to start. */ + while (atomic_read(&start_count) != cpus - 1) + cpu_relax(); + + /* + * Trigger the target to continue into the measurement too: + */ + atomic_inc(&start_count); + + check_counter_warp(); + + while (atomic_read(&stop_count) != cpus-1) + cpu_relax(); + + /* + * If the test was successful set the number of runs to zero and + * stop. If not, decrement the number of runs an check if we can + * retry. In case of random warps no retry is attempted. + */ + if (!nr_warps) { + atomic_set(&test_runs, 0); + + pr_info("Counter synchronization [CPU#%d -> CPU#%u]: passed\n", + smp_processor_id(), cpu); + } else if (atomic_dec_and_test(&test_runs) || random_warps) { + /* Force it to 0 if random warps brought us here */ + atomic_set(&test_runs, 0); + + pr_info("Counter synchronization [CPU#%d -> CPU#%u]:\n", + smp_processor_id(), cpu); + pr_info("Measured %d cycles counter warp between CPUs", max_warp); + if (random_warps) + pr_warn("Counter warped randomly between CPUs\n"); } - /* Arrange for an interrupt in a short while */ - write_c0_compare(read_c0_count() + COUNTON); - local_irq_restore(flags); + /* + * Reset it - just in case we boot another CPU later: + */ + atomic_set(&start_count, 0); + random_warps = 0; + nr_warps = 0; + max_warp = 0; + last_counter = 0; + + /* + * Let the target continue with the bootup: + */ + atomic_inc(&stop_count); /* - * i386 code reported the skew here, but the - * count registers were almost certainly out of sync - * so no point in alarming people + * Retry, if there is a chance to do so. */ - pr_cont("done.\n"); + if (atomic_read(&test_runs) > 0) + goto retry; } +/* + * Freshly booted CPUs call into this: + */ void synchronise_count_slave(int cpu) { - int i; - unsigned long flags; + uint32_t cur_max_warp, gbl_max_warp, count; + int cpus = 2; - local_irq_save(flags); + if (!cpu_has_counter || !mips_hpt_frequency) + return; + /* Kick the control CPU into the counter synchronization function */ + smp_call_function_single(cpumask_first(cpu_online_mask), + check_counter_sync_source, + (unsigned long *)(unsigned long)cpu, 0); +retry: /* - * Not every cpu is online at the time this gets called, - * so we first wait for the master to say everyone is ready + * Register this CPU's participation and wait for the + * source CPU to start the measurement: */ + atomic_inc(&start_count); + while (atomic_read(&start_count) != cpus) + cpu_relax(); - for (i = 0; i < NR_LOOPS; i++) { - atomic_inc(&count_count_start); - while (atomic_read(&count_count_start) != 2) - mb(); + cur_max_warp = check_counter_warp(); - /* - * Everyone initialises count in the last loop: - */ - if (i == NR_LOOPS-1) - write_c0_count(initcount); + /* + * Store the maximum observed warp value for a potential retry: + */ + gbl_max_warp = max_warp; + + /* + * Ok, we are done: + */ + atomic_inc(&stop_count); + + /* + * Wait for the source CPU to print stuff: + */ + while (atomic_read(&stop_count) != cpus) + cpu_relax(); - atomic_inc(&count_count_stop); - while (atomic_read(&count_count_stop) != 2) - mb(); + /* + * Reset it for the next sync test: + */ + atomic_set(&stop_count, 0); + + /* + * Check the number of remaining test runs. If not zero, the test + * failed and a retry with adjusted counter is possible. If zero the + * test was either successful or failed terminally. + */ + if (!atomic_read(&test_runs)) { + /* Arrange for an interrupt in a short while */ + write_c0_compare(read_c0_count() + COUNTON); + return; } - /* Arrange for an interrupt in a short while */ - write_c0_compare(read_c0_count() + COUNTON); - local_irq_restore(flags); + /* + * If the warp value of this CPU is 0, then the other CPU + * observed time going backwards so this counter was ahead and + * needs to move backwards. + */ + if (!cur_max_warp) + cur_max_warp = -gbl_max_warp; + + count = read_c0_count(); + count += cur_max_warp; + write_c0_count(count); + + pr_debug("Counter compensate: CPU%u observed %d warp\n", cpu, cur_max_warp); + + goto retry; + } -#undef NR_LOOPS diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index ae93a607ddf7..1bfc34a2e5b3 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c @@ -39,6 +39,7 @@ #include <asm/shmparam.h> #include <asm/sync.h> #include <asm/sysmips.h> +#include <asm/syscalls.h> #include <asm/switch_to.h> /* diff --git a/arch/mips/kernel/syscalls/Makefile b/arch/mips/kernel/syscalls/Makefile index e6b21de65cca..56f6f093bb88 100644 --- a/arch/mips/kernel/syscalls/Makefile +++ b/arch/mips/kernel/syscalls/Makefile @@ -5,7 +5,7 @@ uapi := arch/$(SRCARCH)/include/generated/uapi/asm $(shell mkdir -p $(uapi) $(kapi)) syshdr := $(srctree)/scripts/syscallhdr.sh -sysnr := $(srctree)/$(src)/syscallnr.sh +sysnr := $(src)/syscallnr.sh systbl := $(srctree)/scripts/syscalltbl.sh quiet_cmd_syshdr = SYSHDR $@ diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl index 253ff994ed2e..d824ffe9a014 100644 --- a/arch/mips/kernel/syscalls/syscall_n32.tbl +++ b/arch/mips/kernel/syscalls/syscall_n32.tbl @@ -214,7 +214,7 @@ 203 n32 io_submit compat_sys_io_submit 204 n32 io_cancel sys_io_cancel 205 n32 exit_group sys_exit_group -206 n32 lookup_dcookie sys_lookup_dcookie +206 n32 lookup_dcookie sys_ni_syscall 207 n32 epoll_create sys_epoll_create 208 n32 epoll_ctl sys_epoll_ctl 209 n32 epoll_wait sys_epoll_wait @@ -354,7 +354,7 @@ 412 n32 utimensat_time64 sys_utimensat 413 n32 pselect6_time64 compat_sys_pselect6_time64 414 n32 ppoll_time64 compat_sys_ppoll_time64 -416 n32 io_pgetevents_time64 sys_io_pgetevents +416 n32 io_pgetevents_time64 compat_sys_io_pgetevents_time64 417 n32 recvmmsg_time64 compat_sys_recvmmsg_time64 418 n32 mq_timedsend_time64 sys_mq_timedsend 419 n32 mq_timedreceive_time64 sys_mq_timedreceive @@ -389,3 +389,22 @@ 448 n32 process_mrelease sys_process_mrelease 449 n32 futex_waitv sys_futex_waitv 450 n32 set_mempolicy_home_node sys_set_mempolicy_home_node +451 n32 cachestat sys_cachestat +452 n32 fchmodat2 sys_fchmodat2 +453 n32 map_shadow_stack sys_map_shadow_stack +454 n32 futex_wake sys_futex_wake +455 n32 futex_wait sys_futex_wait +456 n32 futex_requeue sys_futex_requeue +457 n32 statmount sys_statmount +458 n32 listmount sys_listmount +459 n32 lsm_get_self_attr sys_lsm_get_self_attr +460 n32 lsm_set_self_attr sys_lsm_set_self_attr +461 n32 lsm_list_modules sys_lsm_list_modules +462 n32 mseal sys_mseal +463 n32 setxattrat sys_setxattrat +464 n32 getxattrat sys_getxattrat +465 n32 listxattrat sys_listxattrat +466 n32 removexattrat sys_removexattrat +467 n32 open_tree_attr sys_open_tree_attr +468 n32 file_getattr sys_file_getattr +469 n32 file_setattr sys_file_setattr diff --git a/arch/mips/kernel/syscalls/syscall_n64.tbl b/arch/mips/kernel/syscalls/syscall_n64.tbl index 3f1886ad9d80..7a7049c2c307 100644 --- a/arch/mips/kernel/syscalls/syscall_n64.tbl +++ b/arch/mips/kernel/syscalls/syscall_n64.tbl @@ -214,7 +214,7 @@ 203 n64 io_submit sys_io_submit 204 n64 io_cancel sys_io_cancel 205 n64 exit_group sys_exit_group -206 n64 lookup_dcookie sys_lookup_dcookie +206 n64 lookup_dcookie sys_ni_syscall 207 n64 epoll_create sys_epoll_create 208 n64 epoll_ctl sys_epoll_ctl 209 n64 epoll_wait sys_epoll_wait @@ -365,3 +365,22 @@ 448 n64 process_mrelease sys_process_mrelease 449 n64 futex_waitv sys_futex_waitv 450 common set_mempolicy_home_node sys_set_mempolicy_home_node +451 n64 cachestat sys_cachestat +452 n64 fchmodat2 sys_fchmodat2 +453 n64 map_shadow_stack sys_map_shadow_stack +454 n64 futex_wake sys_futex_wake +455 n64 futex_wait sys_futex_wait +456 n64 futex_requeue sys_futex_requeue +457 n64 statmount sys_statmount +458 n64 listmount sys_listmount +459 n64 lsm_get_self_attr sys_lsm_get_self_attr +460 n64 lsm_set_self_attr sys_lsm_set_self_attr +461 n64 lsm_list_modules sys_lsm_list_modules +462 n64 mseal sys_mseal +463 n64 setxattrat sys_setxattrat +464 n64 getxattrat sys_getxattrat +465 n64 listxattrat sys_listxattrat +466 n64 removexattrat sys_removexattrat +467 n64 open_tree_attr sys_open_tree_attr +468 n64 file_getattr sys_file_getattr +469 n64 file_setattr sys_file_setattr diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl index 8f243e35a7b2..d330274f0601 100644 --- a/arch/mips/kernel/syscalls/syscall_o32.tbl +++ b/arch/mips/kernel/syscalls/syscall_o32.tbl @@ -27,7 +27,7 @@ 17 o32 break sys_ni_syscall # 18 was sys_stat 18 o32 unused18 sys_ni_syscall -19 o32 lseek sys_lseek +19 o32 lseek sys_lseek compat_sys_lseek 20 o32 getpid sys_getpid 21 o32 mount sys_mount 22 o32 umount sys_oldumount @@ -258,7 +258,7 @@ 244 o32 io_submit sys_io_submit compat_sys_io_submit 245 o32 io_cancel sys_io_cancel 246 o32 exit_group sys_exit_group -247 o32 lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie +247 o32 lookup_dcookie sys_ni_syscall 248 o32 epoll_create sys_epoll_create 249 o32 epoll_ctl sys_epoll_ctl 250 o32 epoll_wait sys_epoll_wait @@ -403,7 +403,7 @@ 412 o32 utimensat_time64 sys_utimensat sys_utimensat 413 o32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64 414 o32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64 -416 o32 io_pgetevents_time64 sys_io_pgetevents sys_io_pgetevents +416 o32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64 417 o32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64 418 o32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend 419 o32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive @@ -438,3 +438,22 @@ 448 o32 process_mrelease sys_process_mrelease 449 o32 futex_waitv sys_futex_waitv 450 o32 set_mempolicy_home_node sys_set_mempolicy_home_node +451 o32 cachestat sys_cachestat +452 o32 fchmodat2 sys_fchmodat2 +453 o32 map_shadow_stack sys_map_shadow_stack +454 o32 futex_wake sys_futex_wake +455 o32 futex_wait sys_futex_wait +456 o32 futex_requeue sys_futex_requeue +457 o32 statmount sys_statmount +458 o32 listmount sys_listmount +459 o32 lsm_get_self_attr sys_lsm_get_self_attr +460 o32 lsm_set_self_attr sys_lsm_set_self_attr +461 o32 lsm_list_modules sys_lsm_list_modules +462 o32 mseal sys_mseal +463 o32 setxattrat sys_setxattrat +464 o32 getxattrat sys_getxattrat +465 o32 listxattrat sys_listxattrat +466 o32 removexattrat sys_removexattrat +467 o32 open_tree_attr sys_open_tree_attr +468 o32 file_getattr sys_file_getattr +469 o32 file_setattr sys_file_setattr diff --git a/arch/mips/kernel/sysrq.c b/arch/mips/kernel/sysrq.c index 9c1a2019113b..2e98049fe783 100644 --- a/arch/mips/kernel/sysrq.c +++ b/arch/mips/kernel/sysrq.c @@ -44,7 +44,7 @@ static void sysrq_tlbdump_othercpus(struct work_struct *dummy) static DECLARE_WORK(sysrq_tlbdump, sysrq_tlbdump_othercpus); #endif -static void sysrq_handle_tlbdump(int key) +static void sysrq_handle_tlbdump(u8 key) { sysrq_tlbdump_single(NULL); #ifdef CONFIG_SMP diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 246c6a6b0261..8ec1e185b35c 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -38,6 +38,7 @@ #include <linux/kdb.h> #include <linux/irq.h> #include <linux/perf_event.h> +#include <linux/string_choices.h> #include <asm/addrspace.h> #include <asm/bootinfo.h> @@ -58,6 +59,7 @@ #include <asm/module.h> #include <asm/msa.h> #include <asm/ptrace.h> +#include <asm/regdef.h> #include <asm/sections.h> #include <asm/siginfo.h> #include <asm/tlbdebug.h> @@ -75,7 +77,7 @@ #include "access-helper.h" extern void check_wait(void); -extern asmlinkage void rollback_handle_int(void); +extern asmlinkage void skipover_handle_int(void); extern asmlinkage void handle_int(void); extern asmlinkage void handle_adel(void); extern asmlinkage void handle_ades(void); @@ -1704,10 +1706,10 @@ static inline __init void parity_protection_init(void) l2parity &= l1parity; /* Probe L1 ECC support */ - cp0_ectl = read_c0_ecc(); - write_c0_ecc(cp0_ectl | ERRCTL_PE); + cp0_ectl = read_c0_errctl(); + write_c0_errctl(cp0_ectl | ERRCTL_PE); back_to_back_c0_hazard(); - cp0_ectl = read_c0_ecc(); + cp0_ectl = read_c0_errctl(); /* Probe L2 ECC support */ gcr_ectl = read_gcr_err_control(); @@ -1726,9 +1728,9 @@ static inline __init void parity_protection_init(void) cp0_ectl |= ERRCTL_PE; else cp0_ectl &= ~ERRCTL_PE; - write_c0_ecc(cp0_ectl); + write_c0_errctl(cp0_ectl); back_to_back_c0_hazard(); - WARN_ON(!!(read_c0_ecc() & ERRCTL_PE) != l1parity); + WARN_ON(!!(read_c0_errctl() & ERRCTL_PE) != l1parity); /* Configure L2 ECC checking */ if (l2parity) @@ -1740,8 +1742,8 @@ static inline __init void parity_protection_init(void) gcr_ectl &= CM_GCR_ERR_CONTROL_L2_ECC_EN; WARN_ON(!!gcr_ectl != l2parity); - pr_info("Cache parity protection %sabled\n", - l1parity ? "en" : "dis"); + pr_info("Cache parity protection %s\n", + str_enabled_disabled(l1parity)); return; } @@ -1760,18 +1762,18 @@ static inline __init void parity_protection_init(void) unsigned long errctl; unsigned int l1parity_present, l2parity_present; - errctl = read_c0_ecc(); + errctl = read_c0_errctl(); errctl &= ~(ERRCTL_PE|ERRCTL_L2P); /* probe L1 parity support */ - write_c0_ecc(errctl | ERRCTL_PE); + write_c0_errctl(errctl | ERRCTL_PE); back_to_back_c0_hazard(); - l1parity_present = (read_c0_ecc() & ERRCTL_PE); + l1parity_present = (read_c0_errctl() & ERRCTL_PE); /* probe L2 parity support */ - write_c0_ecc(errctl|ERRCTL_L2P); + write_c0_errctl(errctl|ERRCTL_L2P); back_to_back_c0_hazard(); - l2parity_present = (read_c0_ecc() & ERRCTL_L2P); + l2parity_present = (read_c0_errctl() & ERRCTL_L2P); if (l1parity_present && l2parity_present) { if (l1parity) @@ -1790,20 +1792,20 @@ static inline __init void parity_protection_init(void) printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl); - write_c0_ecc(errctl); + write_c0_errctl(errctl); back_to_back_c0_hazard(); - errctl = read_c0_ecc(); + errctl = read_c0_errctl(); printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl); if (l1parity_present) - printk(KERN_INFO "Cache parity protection %sabled\n", - (errctl & ERRCTL_PE) ? "en" : "dis"); + pr_info("Cache parity protection %s\n", + str_enabled_disabled(errctl & ERRCTL_PE)); if (l2parity_present) { if (l1parity_present && l1parity) errctl ^= ERRCTL_L2P; - printk(KERN_INFO "L2 cache parity protection %sabled\n", - (errctl & ERRCTL_L2P) ? "en" : "dis"); + pr_info("L2 cache parity protection %s\n", + str_enabled_disabled(errctl & ERRCTL_L2P)); } } break; @@ -1811,11 +1813,11 @@ static inline __init void parity_protection_init(void) case CPU_5KC: case CPU_5KE: case CPU_LOONGSON32: - write_c0_ecc(0x80000000); + write_c0_errctl(0x80000000); back_to_back_c0_hazard(); /* Set the PE bit (bit 31) in the c0_errctl register. */ - printk(KERN_INFO "Cache parity protection %sabled\n", - (read_c0_ecc() & 0x80000000) ? "en" : "dis"); + pr_info("Cache parity protection %s\n", + str_enabled_disabled(read_c0_errctl() & 0x80000000)); break; case CPU_20KC: case CPU_25KF: @@ -1886,8 +1888,8 @@ asmlinkage void do_ftlb(void) if ((cpu_has_mips_r2_r6) && (((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS) || ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_LOONGSON))) { - pr_err("FTLB error exception, cp0_ecc=0x%08x:\n", - read_c0_ecc()); + pr_err("FTLB error exception, cp0_errctl=0x%08x:\n", + read_c0_errctl()); pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc()); reg_val = read_c0_cacheerr(); pr_err("c0_cacheerr == %08x\n", reg_val); @@ -2007,7 +2009,13 @@ unsigned long vi_handlers[64]; void reserve_exception_space(phys_addr_t addr, unsigned long size) { - memblock_reserve(addr, size); + /* + * reserve exception space on CPUs other than CPU0 + * is too late, since memblock is unavailable when APs + * up + */ + if (smp_processor_id() == 0) + memblock_reserve(addr, size); } void __init *set_except_vector(int n, void *addr) @@ -2035,13 +2043,12 @@ void __init *set_except_vector(int n, void *addr) unsigned long jump_mask = ~((1 << 28) - 1); #endif u32 *buf = (u32 *)(ebase + 0x200); - unsigned int k0 = 26; if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) { uasm_i_j(&buf, handler & ~jump_mask); uasm_i_nop(&buf); } else { - UASM_i_LA(&buf, k0, handler); - uasm_i_jr(&buf, k0); + UASM_i_LA(&buf, GPR_K0, handler); + uasm_i_jr(&buf, GPR_K0); uasm_i_nop(&buf); } local_flush_icache_range(ebase + 0x200, (unsigned long)buf); @@ -2055,110 +2062,71 @@ static void do_default_vi(void) panic("Caught unexpected vectored interrupt."); } -static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) +void *set_vi_handler(int n, vi_handler_t addr) { + extern const u8 except_vec_vi[]; + extern const u8 except_vec_vi_ori[], except_vec_vi_end[]; + extern const u8 skipover_except_vec_vi[]; unsigned long handler; unsigned long old_handler = vi_handlers[n]; int srssets = current_cpu_data.srsets; u16 *h; unsigned char *b; + const u8 *vec_start; + int ori_offset; + int handler_len; BUG_ON(!cpu_has_veic && !cpu_has_vint); if (addr == NULL) { handler = (unsigned long) do_default_vi; - srs = 0; } else handler = (unsigned long) addr; vi_handlers[n] = handler; b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING); - if (srs >= srssets) - panic("Shadow register set %d not supported", srs); - if (cpu_has_veic) { if (board_bind_eic_interrupt) - board_bind_eic_interrupt(n, srs); + board_bind_eic_interrupt(n, 0); } else if (cpu_has_vint) { /* SRSMap is only defined if shadow sets are implemented */ if (srssets > 1) - change_c0_srsmap(0xf << n*4, srs << n*4); + change_c0_srsmap(0xf << n*4, 0 << n*4); } - if (srs == 0) { - /* - * If no shadow set is selected then use the default handler - * that does normal register saving and standard interrupt exit - */ - extern const u8 except_vec_vi[], except_vec_vi_lui[]; - extern const u8 except_vec_vi_ori[], except_vec_vi_end[]; - extern const u8 rollback_except_vec_vi[]; - const u8 *vec_start = using_rollback_handler() ? - rollback_except_vec_vi : except_vec_vi; + vec_start = using_skipover_handler() ? skipover_except_vec_vi : + except_vec_vi; #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN) - const int lui_offset = except_vec_vi_lui - vec_start + 2; - const int ori_offset = except_vec_vi_ori - vec_start + 2; + ori_offset = except_vec_vi_ori - vec_start + 2; #else - const int lui_offset = except_vec_vi_lui - vec_start; - const int ori_offset = except_vec_vi_ori - vec_start; + ori_offset = except_vec_vi_ori - vec_start; #endif - const int handler_len = except_vec_vi_end - vec_start; + handler_len = except_vec_vi_end - vec_start; - if (handler_len > VECTORSPACING) { - /* - * Sigh... panicing won't help as the console - * is probably not configured :( - */ - panic("VECTORSPACING too small"); - } - - set_handler(((unsigned long)b - ebase), vec_start, -#ifdef CONFIG_CPU_MICROMIPS - (handler_len - 1)); -#else - handler_len); -#endif - h = (u16 *)(b + lui_offset); - *h = (handler >> 16) & 0xffff; - h = (u16 *)(b + ori_offset); - *h = (handler & 0xffff); - local_flush_icache_range((unsigned long)b, - (unsigned long)(b+handler_len)); - } - else { + if (handler_len > VECTORSPACING) { /* - * In other cases jump directly to the interrupt handler. It - * is the handler's responsibility to save registers if required - * (eg hi/lo) and return from the exception using "eret". + * Sigh... panicing won't help as the console + * is probably not configured :( */ - u32 insn; + panic("VECTORSPACING too small"); + } - h = (u16 *)b; - /* j handler */ + set_handler(((unsigned long)b - ebase), vec_start, #ifdef CONFIG_CPU_MICROMIPS - insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1); + (handler_len - 1)); #else - insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2); + handler_len); #endif - h[0] = (insn >> 16) & 0xffff; - h[1] = insn & 0xffff; - h[2] = 0; - h[3] = 0; - local_flush_icache_range((unsigned long)b, - (unsigned long)(b+8)); - } + /* insert offset into vi_handlers[] */ + h = (u16 *)(b + ori_offset); + *h = n * sizeof(handler); + local_flush_icache_range((unsigned long)b, + (unsigned long)(b+handler_len)); return (void *)old_handler; } -void *set_vi_handler(int n, vi_handler_t addr) -{ - return set_vi_srs_handler(n, addr, 0); -} - -extern void tlb_init(void); - /* * Timer interrupt */ @@ -2332,7 +2300,7 @@ static const char panic_null_cerr[] = void set_uncached_handler(unsigned long offset, void *addr, unsigned long size) { - unsigned long uncached_ebase = CKSEG1ADDR(ebase); + unsigned long uncached_ebase = CKSEG1ADDR_OR_64BIT(__pa(ebase)); if (!addr) panic(panic_null_cerr); @@ -2384,10 +2352,13 @@ void __init trap_init(void) * EVA is special though as it allows segments to be rearranged * and to become uncached during cache error handling. */ - if (!IS_ENABLED(CONFIG_EVA) && !WARN_ON(ebase_pa >= 0x20000000)) + if (!IS_ENABLED(CONFIG_EVA) && ebase_pa < 0x20000000) ebase = CKSEG0ADDR(ebase_pa); else ebase = (unsigned long)phys_to_virt(ebase_pa); + if (ebase_pa >= 0x20000000) + pr_warn("ebase(%pa) should better be in KSeg0", + &ebase_pa); } if (cpu_has_mmips) { @@ -2418,7 +2389,7 @@ void __init trap_init(void) set_except_vector(i, handle_reserved); /* - * Copy the EJTAG debug exception vector handler code to it's final + * Copy the EJTAG debug exception vector handler code to its final * destination. */ if (cpu_has_ejtag && board_ejtag_handler_setup) @@ -2455,8 +2426,8 @@ void __init trap_init(void) if (board_be_init) board_be_init(); - set_except_vector(EXCCODE_INT, using_rollback_handler() ? - rollback_handle_int : handle_int); + set_except_vector(EXCCODE_INT, using_skipover_handler() ? + skipover_handle_int : handle_int); set_except_vector(EXCCODE_MOD, handle_tlbm); set_except_vector(EXCCODE_TLBL, handle_tlbl); set_except_vector(EXCCODE_TLBS, handle_tlbs); diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index 7b5aba5df02e..db652c99b72e 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -91,6 +91,7 @@ #include <asm/inst.h> #include <asm/unaligned-emul.h> #include <asm/mmu_context.h> +#include <asm/traps.h> #include <linux/uaccess.h> #include "access-helper.h" @@ -160,6 +161,47 @@ static void emulate_load_store_insn(struct pt_regs *regs, * The remaining opcodes are the ones that are really of * interest. */ +#ifdef CONFIG_MACH_INGENIC + case spec2_op: + if (insn.mxu_lx_format.func != mxu_lx_op) + goto sigbus; /* other MXU instructions we don't care */ + + switch (insn.mxu_lx_format.op) { + case mxu_lxw_op: + if (user && !access_ok(addr, 4)) + goto sigbus; + LoadW(addr, value, res); + if (res) + goto fault; + compute_return_epc(regs); + regs->regs[insn.mxu_lx_format.rd] = value; + break; + case mxu_lxh_op: + if (user && !access_ok(addr, 2)) + goto sigbus; + LoadHW(addr, value, res); + if (res) + goto fault; + compute_return_epc(regs); + regs->regs[insn.dsp_format.rd] = value; + break; + case mxu_lxhu_op: + if (user && !access_ok(addr, 2)) + goto sigbus; + LoadHWU(addr, value, res); + if (res) + goto fault; + compute_return_epc(regs); + regs->regs[insn.dsp_format.rd] = value; + break; + case mxu_lxb_op: + case mxu_lxbu_op: + goto sigbus; + default: + goto sigill; + } + break; +#endif case spec3_op: if (insn.dsp_format.func == lx_op) { switch (insn.dsp_format.op) { diff --git a/arch/mips/kernel/uprobes.c b/arch/mips/kernel/uprobes.c index 6c063aa188e6..401b148f8917 100644 --- a/arch/mips/kernel/uprobes.c +++ b/arch/mips/kernel/uprobes.c @@ -191,6 +191,7 @@ void arch_uprobe_abort_xol(struct arch_uprobe *aup, { struct uprobe_task *utask = current->utask; + current->thread.trap_nr = utask->autask.saved_trap_nr; instruction_pointer_set(regs, utask->vaddr); } @@ -207,24 +208,6 @@ unsigned long arch_uretprobe_hijack_return_addr( return ra; } -/** - * set_swbp - store breakpoint at a given address. - * @auprobe: arch specific probepoint information. - * @mm: the probed process address space. - * @vaddr: the virtual address to insert the opcode. - * - * For mm @mm, store the breakpoint instruction at @vaddr. - * Return 0 (success) or a negative errno. - * - * This version overrides the weak version in kernel/events/uprobes.c. - * It is required to handle MIPS16 and microMIPS. - */ -int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, - unsigned long vaddr) -{ - return uprobe_write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN); -} - void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, void *src, unsigned long len) { diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c index 3d0cf471f2fe..de096777172f 100644 --- a/arch/mips/kernel/vdso.c +++ b/arch/mips/kernel/vdso.c @@ -11,10 +11,11 @@ #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/mm.h> +#include <linux/mman.h> #include <linux/random.h> #include <linux/sched.h> #include <linux/slab.h> -#include <linux/timekeeper_internal.h> +#include <linux/vdso_datastore.h> #include <asm/abi.h> #include <asm/mips-cps.h> @@ -23,20 +24,7 @@ #include <vdso/helpers.h> #include <vdso/vsyscall.h> -/* Kernel-provided data used by the VDSO. */ -static union mips_vdso_data mips_vdso_data __page_aligned_data; -struct vdso_data *vdso_data = mips_vdso_data.data; - -/* - * Mapping for the VDSO data/GIC pages. The real pages are mapped manually, as - * what we map and where within the area they are mapped is determined at - * runtime. - */ -static struct page *no_pages[] = { NULL }; -static struct vm_special_mapping vdso_vvar_mapping = { - .name = "[vvar]", - .pages = no_pages, -}; +static_assert(VDSO_NR_PAGES == __VDSO_PAGES); static void __init init_vdso_image(struct mips_vdso_image *image) { @@ -79,7 +67,7 @@ static unsigned long vdso_base(void) } if (current->flags & PF_RANDOMIZE) { - base += get_random_int() & (VDSO_RANDOMIZE_SIZE - 1); + base += get_random_u32_below(VDSO_RANDOMIZE_SIZE); base = PAGE_ALIGN(base); } @@ -90,7 +78,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mips_vdso_image *image = current->thread.abi->vdso; struct mm_struct *mm = current->mm; - unsigned long gic_size, vvar_size, size, base, data_addr, vdso_addr, gic_pfn, gic_base; + unsigned long gic_size, size, base, data_addr, vdso_addr, gic_pfn, gic_base; struct vm_area_struct *vma; int ret; @@ -98,11 +86,12 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) return -EINTR; if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT)) { + unsigned long unused; + /* Map delay slot emulation page */ - base = mmap_region(NULL, STACK_TOP, PAGE_SIZE, - VM_READ | VM_EXEC | - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, - 0, NULL); + base = do_mmap(NULL, STACK_TOP, PAGE_SIZE, PROT_READ | PROT_EXEC, + MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, 0, 0, &unused, + NULL); if (IS_ERR_VALUE(base)) { ret = base; goto out; @@ -118,8 +107,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) * the counter registers at the start. */ gic_size = mips_gic_present() ? PAGE_SIZE : 0; - vvar_size = gic_size + PAGE_SIZE; - size = vvar_size + image->size; + size = gic_size + VDSO_NR_PAGES * PAGE_SIZE + image->size; /* * Find a region that's large enough for us to perform the @@ -142,15 +130,13 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) */ if (cpu_has_dc_aliases) { base = __ALIGN_MASK(base, shm_align_mask); - base += ((unsigned long)vdso_data - gic_size) & shm_align_mask; + base += ((unsigned long)vdso_k_time_data - gic_size) & shm_align_mask; } data_addr = base + gic_size; - vdso_addr = data_addr + PAGE_SIZE; + vdso_addr = data_addr + VDSO_NR_PAGES * PAGE_SIZE; - vma = _install_special_mapping(mm, base, vvar_size, - VM_READ | VM_MAYREAD, - &vdso_vvar_mapping); + vma = vdso_install_vvar_mapping(mm, data_addr); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto out; @@ -159,7 +145,18 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) /* Map GIC user page. */ if (gic_size) { gic_base = (unsigned long)mips_gic_base + MIPS_GIC_USER_OFS; - gic_pfn = virt_to_phys((void *)gic_base) >> PAGE_SHIFT; + gic_pfn = PFN_DOWN(__pa(gic_base)); + static const struct vm_special_mapping gic_mapping = { + .name = "[gic]", + .pages = (struct page **) { NULL }, + }; + + vma = _install_special_mapping(mm, base, gic_size, VM_READ | VM_MAYREAD, + &gic_mapping); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto out; + } ret = io_remap_pfn_range(vma, base, gic_pfn, gic_size, pgprot_noncached(vma->vm_page_prot)); @@ -167,13 +164,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) goto out; } - /* Map data page. */ - ret = remap_pfn_range(vma, data_addr, - virt_to_phys(vdso_data) >> PAGE_SHIFT, - PAGE_SIZE, vma->vm_page_prot); - if (ret) - goto out; - /* Map VDSO image. */ vma = _install_special_mapping(mm, vdso_addr, image->size, VM_READ | VM_EXEC | diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index 1f98947fe715..2b708fac8d2c 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S @@ -15,6 +15,8 @@ #define EMITS_PT_NOTE #endif +#define RUNTIME_DISCARD_EXIT + #include <asm-generic/vmlinux.lds.h> #undef mips @@ -59,9 +61,9 @@ SECTIONS /* read-only */ _text = .; /* Text and read-only data */ .text : { + HEAD_TEXT TEXT_TEXT SCHED_TEXT - CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT diff --git a/arch/mips/kernel/vpe-cmp.c b/arch/mips/kernel/vpe-cmp.c deleted file mode 100644 index e673603e11e5..000000000000 --- a/arch/mips/kernel/vpe-cmp.c +++ /dev/null @@ -1,180 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. - * Copyright (C) 2013 Imagination Technologies Ltd. - */ -#include <linux/kernel.h> -#include <linux/device.h> -#include <linux/fs.h> -#include <linux/slab.h> -#include <linux/export.h> - -#include <asm/vpe.h> - -static int major; - -void cleanup_tc(struct tc *tc) -{ - -} - -static ssize_t store_kill(struct device *dev, struct device_attribute *attr, - const char *buf, size_t len) -{ - struct vpe *vpe = get_vpe(aprp_cpu_index()); - struct vpe_notifications *notifier; - - list_for_each_entry(notifier, &vpe->notify, list) - notifier->stop(aprp_cpu_index()); - - release_progmem(vpe->load_addr); - vpe->state = VPE_STATE_UNUSED; - - return len; -} -static DEVICE_ATTR(kill, S_IWUSR, NULL, store_kill); - -static ssize_t ntcs_show(struct device *cd, struct device_attribute *attr, - char *buf) -{ - struct vpe *vpe = get_vpe(aprp_cpu_index()); - - return sprintf(buf, "%d\n", vpe->ntcs); -} - -static ssize_t ntcs_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t len) -{ - struct vpe *vpe = get_vpe(aprp_cpu_index()); - unsigned long new; - int ret; - - ret = kstrtoul(buf, 0, &new); - if (ret < 0) - return ret; - - /* APRP can only reserve one TC in a VPE and no more. */ - if (new != 1) - return -EINVAL; - - vpe->ntcs = new; - - return len; -} -static DEVICE_ATTR_RW(ntcs); - -static struct attribute *vpe_attrs[] = { - &dev_attr_kill.attr, - &dev_attr_ntcs.attr, - NULL, -}; -ATTRIBUTE_GROUPS(vpe); - -static void vpe_device_release(struct device *cd) -{ - kfree(cd); -} - -static struct class vpe_class = { - .name = "vpe", - .owner = THIS_MODULE, - .dev_release = vpe_device_release, - .dev_groups = vpe_groups, -}; - -static struct device vpe_device; - -int __init vpe_module_init(void) -{ - struct vpe *v = NULL; - struct tc *t; - int err; - - if (!cpu_has_mipsmt) { - pr_warn("VPE loader: not a MIPS MT capable processor\n"); - return -ENODEV; - } - - if (num_possible_cpus() - aprp_cpu_index() < 1) { - pr_warn("No VPEs reserved for AP/SP, not initialize VPE loader\n" - "Pass maxcpus=<n> argument as kernel argument\n"); - return -ENODEV; - } - - major = register_chrdev(0, VPE_MODULE_NAME, &vpe_fops); - if (major < 0) { - pr_warn("VPE loader: unable to register character device\n"); - return major; - } - - err = class_register(&vpe_class); - if (err) { - pr_err("vpe_class registration failed\n"); - goto out_chrdev; - } - - device_initialize(&vpe_device); - vpe_device.class = &vpe_class; - vpe_device.parent = NULL; - dev_set_name(&vpe_device, "vpe_sp"); - vpe_device.devt = MKDEV(major, VPE_MODULE_MINOR); - err = device_add(&vpe_device); - if (err) { - pr_err("Adding vpe_device failed\n"); - goto out_class; - } - - t = alloc_tc(aprp_cpu_index()); - if (!t) { - pr_warn("VPE: unable to allocate TC\n"); - err = -ENOMEM; - goto out_dev; - } - - /* VPE */ - v = alloc_vpe(aprp_cpu_index()); - if (v == NULL) { - pr_warn("VPE: unable to allocate VPE\n"); - kfree(t); - err = -ENOMEM; - goto out_dev; - } - - v->ntcs = 1; - - /* add the tc to the list of this vpe's tc's. */ - list_add(&t->tc, &v->tc); - - /* TC */ - t->pvpe = v; /* set the parent vpe */ - - return 0; - -out_dev: - device_del(&vpe_device); - -out_class: - class_unregister(&vpe_class); - -out_chrdev: - unregister_chrdev(major, VPE_MODULE_NAME); - - return err; -} - -void __exit vpe_module_exit(void) -{ - struct vpe *v, *n; - - device_del(&vpe_device); - class_unregister(&vpe_class); - unregister_chrdev(major, VPE_MODULE_NAME); - - /* No locking needed here */ - list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) - if (v->state != VPE_STATE_UNUSED) - release_vpe(v); -} diff --git a/arch/mips/kernel/vpe-mt.c b/arch/mips/kernel/vpe-mt.c index bad6b0891b2b..84124ac2d2a5 100644 --- a/arch/mips/kernel/vpe-mt.c +++ b/arch/mips/kernel/vpe-mt.c @@ -92,12 +92,11 @@ int vpe_run(struct vpe *v) write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H); /* - * The sde-kit passes 'memsize' to __start in $a3, so set something - * here... Or set $a3 to zero and define DFLT_STACK_SIZE and - * DFLT_HEAP_SIZE when you compile your program + * We don't pass the memsize here, so VPE programs need to be + * compiled with DFLT_STACK_SIZE and DFLT_HEAP_SIZE defined. */ - mttgpr(6, v->ntcs); - mttgpr(7, physical_memsize); + mttgpr($7, 0); + mttgpr($6, v->ntcs); /* set up VPE1 */ /* @@ -313,12 +312,10 @@ ATTRIBUTE_GROUPS(vpe); static void vpe_device_release(struct device *cd) { - kfree(cd); } static struct class vpe_class = { .name = "vpe", - .owner = THIS_MODULE, .dev_release = vpe_device_release, .dev_groups = vpe_groups, }; @@ -497,6 +494,7 @@ out_dev: device_del(&vpe_device); out_class: + put_device(&vpe_device); class_unregister(&vpe_class); out_chrdev: @@ -509,7 +507,7 @@ void __exit vpe_module_exit(void) { struct vpe *v, *n; - device_del(&vpe_device); + device_unregister(&vpe_device); class_unregister(&vpe_class); unregister_chrdev(major, VPE_MODULE_NAME); diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index 13294972707b..2b67c44adab9 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c @@ -6,9 +6,9 @@ * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. * Copyright (C) 2013 Imagination Technologies Ltd. * - * VPE spport module for loading a MIPS SP program into VPE1. The SP + * VPE support module for loading a MIPS SP program into VPE1. The SP * environment is rather simple since there are no TLBs. It needs - * to be relocatable (or partiall linked). Initialize your stack in + * to be relocatable (or partially linked). Initialize your stack in * the startup-code. The loader looks for the symbol __start and sets * up the execution to resume from there. To load and run, simply do * a cat SP 'binary' to the /dev/vpe1 device. @@ -22,6 +22,7 @@ #include <linux/vmalloc.h> #include <linux/elf.h> #include <linux/seq_file.h> +#include <linux/string.h> #include <linux/syscalls.h> #include <linux/moduleloader.h> #include <linux/interrupt.h> @@ -199,18 +200,17 @@ static void layout_sections(struct module *mod, const Elf_Ehdr *hdr, for (m = 0; m < ARRAY_SIZE(masks); ++m) { for (i = 0; i < hdr->e_shnum; ++i) { Elf_Shdr *s = &sechdrs[i]; + struct module_memory *mod_mem; + + mod_mem = &mod->mem[MOD_TEXT]; if ((s->sh_flags & masks[m][0]) != masks[m][0] || (s->sh_flags & masks[m][1]) || s->sh_entsize != ~0UL) continue; s->sh_entsize = - get_offset((unsigned long *)&mod->core_layout.size, s); + get_offset((unsigned long *)&mod_mem->size, s); } - - if (m == 0) - mod->core_layout.text_size = mod->core_layout.size; - } } @@ -583,7 +583,7 @@ static int vpe_elfload(struct vpe *v) struct module mod; /* so we can re-use the relocations code */ memset(&mod, 0, sizeof(struct module)); - strcpy(mod.name, "VPE loader"); + strscpy(mod.name, "VPE loader"); hdr = (Elf_Ehdr *) v->pbuffer; len = v->plen; @@ -641,7 +641,7 @@ static int vpe_elfload(struct vpe *v) layout_sections(&mod, hdr, sechdrs, secstrings); } - v->load_addr = alloc_progmem(mod.core_layout.size); + v->load_addr = alloc_progmem(mod.mem[MOD_TEXT].size); if (!v->load_addr) return -ENOMEM; @@ -795,7 +795,7 @@ static int vpe_open(struct inode *inode, struct file *filp) static int vpe_release(struct inode *inode, struct file *filp) { -#if defined(CONFIG_MIPS_VPE_LOADER_MT) || defined(CONFIG_MIPS_VPE_LOADER_CMP) +#ifdef CONFIG_MIPS_VPE_LOADER_MT struct vpe *v; Elf_Ehdr *hdr; int ret = 0; |