aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2014-05-28 19:00:14 +0200
committerRalf Baechle <ralf@linux-mips.org>2014-05-29 15:08:23 +0200
commit2e2d663d2dd64ffe9855be0b35aa221c9b8139f2 (patch)
tree508667aa6fbab564e7875d3953671265d0176e69 /arch/mips/kernel
parentMIPS: SNI: Remove USE_GENERIC_EARLY_PRINTK_8250 (diff)
parentMIPS: Malta: CPS SMP by default (diff)
downloadlinux-dev-2e2d663d2dd64ffe9855be0b35aa221c9b8139f2.tar.xz
linux-dev-2e2d663d2dd64ffe9855be0b35aa221c9b8139f2.zip
Merge branch 'wip-mips-pm' of https://github.com/paulburton/linux into mips-for-linux-next
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/Makefile3
-rw-r--r--arch/mips/kernel/asm-offsets.c29
-rw-r--r--arch/mips/kernel/cevt-gic.c5
-rw-r--r--arch/mips/kernel/cevt-r4k.c10
-rw-r--r--arch/mips/kernel/cps-vec.S328
-rw-r--r--arch/mips/kernel/idle.c11
-rw-r--r--arch/mips/kernel/irq-gic.c15
-rw-r--r--arch/mips/kernel/mips-cpc.c28
-rw-r--r--arch/mips/kernel/pm-cps.c716
-rw-r--r--arch/mips/kernel/pm.c99
-rw-r--r--arch/mips/kernel/smp-cps.c432
-rw-r--r--arch/mips/kernel/smp-gic.c11
-rw-r--r--arch/mips/kernel/smp.c47
-rw-r--r--arch/mips/kernel/traps.c57
14 files changed, 1608 insertions, 183 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 8f8b531bc848..a61d108f4c0e 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -105,6 +105,9 @@ obj-$(CONFIG_JUMP_LABEL) += jump_label.o
obj-$(CONFIG_MIPS_CM) += mips-cm.o
obj-$(CONFIG_MIPS_CPC) += mips-cpc.o
+obj-$(CONFIG_CPU_PM) += pm.o
+obj-$(CONFIG_MIPS_CPS_PM) += pm-cps.o
+
#
# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is not
# safe to unconditionnaly use the assembler -mdsp / -mdspr2 switches
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 08f897ee9a77..02f075df8f2e 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -14,6 +14,7 @@
#include <linux/mm.h>
#include <linux/kbuild.h>
#include <linux/suspend.h>
+#include <asm/pm.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/smp-cps.h>
@@ -401,6 +402,20 @@ void output_pbe_defines(void)
}
#endif
+#ifdef CONFIG_CPU_PM
+void output_pm_defines(void)
+{
+ COMMENT(" PM offsets. ");
+#ifdef CONFIG_EVA
+ OFFSET(SSS_SEGCTL0, mips_static_suspend_state, segctl[0]);
+ OFFSET(SSS_SEGCTL1, mips_static_suspend_state, segctl[1]);
+ OFFSET(SSS_SEGCTL2, mips_static_suspend_state, segctl[2]);
+#endif
+ OFFSET(SSS_SP, mips_static_suspend_state, sp);
+ BLANK();
+}
+#endif
+
void output_kvm_defines(void)
{
COMMENT(" KVM/MIPS Specfic offsets. ");
@@ -469,10 +484,14 @@ void output_kvm_defines(void)
void output_cps_defines(void)
{
COMMENT(" MIPS CPS offsets. ");
- OFFSET(BOOTCFG_CORE, boot_config, core);
- OFFSET(BOOTCFG_VPE, boot_config, vpe);
- OFFSET(BOOTCFG_PC, boot_config, pc);
- OFFSET(BOOTCFG_SP, boot_config, sp);
- OFFSET(BOOTCFG_GP, boot_config, gp);
+
+ OFFSET(COREBOOTCFG_VPEMASK, core_boot_config, vpe_mask);
+ OFFSET(COREBOOTCFG_VPECONFIG, core_boot_config, vpe_config);
+ DEFINE(COREBOOTCFG_SIZE, sizeof(struct core_boot_config));
+
+ OFFSET(VPEBOOTCFG_PC, vpe_boot_config, pc);
+ OFFSET(VPEBOOTCFG_SP, vpe_boot_config, sp);
+ OFFSET(VPEBOOTCFG_GP, vpe_boot_config, gp);
+ DEFINE(VPEBOOTCFG_SIZE, sizeof(struct vpe_boot_config));
}
#endif
diff --git a/arch/mips/kernel/cevt-gic.c b/arch/mips/kernel/cevt-gic.c
index 594cbbf16d62..6093716980b9 100644
--- a/arch/mips/kernel/cevt-gic.c
+++ b/arch/mips/kernel/cevt-gic.c
@@ -26,7 +26,7 @@ static int gic_next_event(unsigned long delta, struct clock_event_device *evt)
cnt = gic_read_count();
cnt += (u64)delta;
- gic_write_compare(cnt);
+ gic_write_cpu_compare(cnt, cpumask_first(evt->cpumask));
res = ((int)(gic_read_count() - cnt) >= 0) ? -ETIME : 0;
return res;
}
@@ -73,7 +73,8 @@ int gic_clockevent_init(void)
cd = &per_cpu(gic_clockevent_device, cpu);
cd->name = "MIPS GIC";
- cd->features = CLOCK_EVT_FEAT_ONESHOT;
+ cd->features = CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_C3STOP;
clockevent_set_clock(cd, gic_frequency);
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index bff124ae69fa..bc127e22fdab 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -62,9 +62,6 @@ irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
/* Clear Count/Compare Interrupt */
write_c0_compare(read_c0_compare());
cd = &per_cpu(mips_clockevent_device, cpu);
-#ifdef CONFIG_CEVT_GIC
- if (!gic_present)
-#endif
cd->event_handler(cd);
}
@@ -182,7 +179,9 @@ int r4k_clockevent_init(void)
cd = &per_cpu(mips_clockevent_device, cpu);
cd->name = "MIPS";
- cd->features = CLOCK_EVT_FEAT_ONESHOT;
+ cd->features = CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_C3STOP |
+ CLOCK_EVT_FEAT_PERCPU;
clockevent_set_clock(cd, mips_hpt_frequency);
@@ -197,9 +196,6 @@ int r4k_clockevent_init(void)
cd->set_mode = mips_set_clock_mode;
cd->event_handler = mips_event_handler;
-#ifdef CONFIG_CEVT_GIC
- if (!gic_present)
-#endif
clockevents_register_device(cd);
if (cp0_timer_irq_installed)
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
index f7a46db4b161..6f4f739dad96 100644
--- a/arch/mips/kernel/cps-vec.S
+++ b/arch/mips/kernel/cps-vec.S
@@ -14,19 +14,43 @@
#include <asm/asmmacro.h>
#include <asm/cacheops.h>
#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
+#include <asm/pm.h>
-#define GCR_CL_COHERENCE_OFS 0x2008
+#define GCR_CL_COHERENCE_OFS 0x2008
+#define GCR_CL_ID_OFS 0x2028
+
+.extern mips_cm_base
+
+.set noreorder
+
+ /*
+ * Set dest to non-zero if the core supports the MT ASE, else zero. If
+ * MT is not supported then branch to nomt.
+ */
+ .macro has_mt dest, nomt
+ mfc0 \dest, CP0_CONFIG
+ bgez \dest, \nomt
+ mfc0 \dest, CP0_CONFIG, 1
+ bgez \dest, \nomt
+ mfc0 \dest, CP0_CONFIG, 2
+ bgez \dest, \nomt
+ mfc0 \dest, CP0_CONFIG, 3
+ andi \dest, \dest, MIPS_CONF3_MT
+ beqz \dest, \nomt
+ .endm
.section .text.cps-vec
.balign 0x1000
-.set noreorder
LEAF(mips_cps_core_entry)
/*
- * These first 8 bytes will be patched by cps_smp_setup to load the
- * base address of the CM GCRs into register v1.
+ * These first 12 bytes will be patched by cps_smp_setup to load the
+ * base address of the CM GCRs into register v1 and the CCA to use into
+ * register s0.
*/
.quad 0
+ .word 0
/* Check whether we're here due to an NMI */
mfc0 k0, CP0_STATUS
@@ -117,10 +141,11 @@ icache_done:
add a0, a0, t0
dcache_done:
- /* Set Kseg0 cacheable, coherent, write-back, write-allocate */
+ /* Set Kseg0 CCA to that in s0 */
mfc0 t0, CP0_CONFIG
ori t0, 0x7
- xori t0, 0x2
+ xori t0, 0x7
+ or t0, t0, s0
mtc0 t0, CP0_CONFIG
ehb
@@ -134,21 +159,24 @@ dcache_done:
jr t0
nop
-1: /* We're up, cached & coherent */
+ /*
+ * We're up, cached & coherent. Perform any further required core-level
+ * initialisation.
+ */
+1: jal mips_cps_core_init
+ nop
/*
- * TODO: We should check the VPE number we intended to boot here, and
- * if non-zero we should start that VPE and stop this one. For
- * the moment this doesn't matter since CPUs are brought up
- * sequentially and in order, but once hotplug is implemented
- * this will need revisiting.
+ * Boot any other VPEs within this core that should be online, and
+ * deactivate this VPE if it should be offline.
*/
+ jal mips_cps_boot_vpes
+ nop
/* Off we go! */
- la t0, mips_cps_bootcfg
- lw t1, BOOTCFG_PC(t0)
- lw gp, BOOTCFG_GP(t0)
- lw sp, BOOTCFG_SP(t0)
+ lw t1, VPEBOOTCFG_PC(v0)
+ lw gp, VPEBOOTCFG_GP(v0)
+ lw sp, VPEBOOTCFG_SP(v0)
jr t1
nop
END(mips_cps_core_entry)
@@ -189,3 +217,271 @@ LEAF(excep_ejtag)
jr k0
nop
END(excep_ejtag)
+
+LEAF(mips_cps_core_init)
+#ifdef CONFIG_MIPS_MT
+ /* Check that the core implements the MT ASE */
+ has_mt t0, 3f
+ nop
+
+ .set push
+ .set mt
+
+ /* Only allow 1 TC per VPE to execute... */
+ dmt
+
+ /* ...and for the moment only 1 VPE */
+ dvpe
+ la t1, 1f
+ jr.hb t1
+ nop
+
+ /* Enter VPE configuration state */
+1: mfc0 t0, CP0_MVPCONTROL
+ ori t0, t0, MVPCONTROL_VPC
+ mtc0 t0, CP0_MVPCONTROL
+
+ /* Retrieve the number of VPEs within the core */
+ mfc0 t0, CP0_MVPCONF0
+ srl t0, t0, MVPCONF0_PVPE_SHIFT
+ andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
+ addi t7, t0, 1
+
+ /* If there's only 1, we're done */
+ beqz t0, 2f
+ nop
+
+ /* Loop through each VPE within this core */
+ li t5, 1
+
+1: /* Operate on the appropriate TC */
+ mtc0 t5, CP0_VPECONTROL
+ ehb
+
+ /* Bind TC to VPE (1:1 TC:VPE mapping) */
+ mttc0 t5, CP0_TCBIND
+
+ /* Set exclusive TC, non-active, master */
+ li t0, VPECONF0_MVP
+ sll t1, t5, VPECONF0_XTC_SHIFT
+ or t0, t0, t1
+ mttc0 t0, CP0_VPECONF0
+
+ /* Set TC non-active, non-allocatable */
+ mttc0 zero, CP0_TCSTATUS
+
+ /* Set TC halted */
+ li t0, TCHALT_H
+ mttc0 t0, CP0_TCHALT
+
+ /* Next VPE */
+ addi t5, t5, 1
+ slt t0, t5, t7
+ bnez t0, 1b
+ nop
+
+ /* Leave VPE configuration state */
+2: mfc0 t0, CP0_MVPCONTROL
+ xori t0, t0, MVPCONTROL_VPC
+ mtc0 t0, CP0_MVPCONTROL
+
+3: .set pop
+#endif
+ jr ra
+ nop
+ END(mips_cps_core_init)
+
+LEAF(mips_cps_boot_vpes)
+ /* Retrieve CM base address */
+ la t0, mips_cm_base
+ lw t0, 0(t0)
+
+ /* Calculate a pointer to this cores struct core_boot_config */
+ lw t0, GCR_CL_ID_OFS(t0)
+ li t1, COREBOOTCFG_SIZE
+ mul t0, t0, t1
+ la t1, mips_cps_core_bootcfg
+ lw t1, 0(t1)
+ addu t0, t0, t1
+
+ /* Calculate this VPEs ID. If the core doesn't support MT use 0 */
+ has_mt t6, 1f
+ li t9, 0
+
+ /* Find the number of VPEs present in the core */
+ mfc0 t1, CP0_MVPCONF0
+ srl t1, t1, MVPCONF0_PVPE_SHIFT
+ andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT
+ addi t1, t1, 1
+
+ /* Calculate a mask for the VPE ID from EBase.CPUNum */
+ clz t1, t1
+ li t2, 31
+ subu t1, t2, t1
+ li t2, 1
+ sll t1, t2, t1
+ addiu t1, t1, -1
+
+ /* Retrieve the VPE ID from EBase.CPUNum */
+ mfc0 t9, $15, 1
+ and t9, t9, t1
+
+1: /* Calculate a pointer to this VPEs struct vpe_boot_config */
+ li t1, VPEBOOTCFG_SIZE
+ mul v0, t9, t1
+ lw t7, COREBOOTCFG_VPECONFIG(t0)
+ addu v0, v0, t7
+
+#ifdef CONFIG_MIPS_MT
+
+ /* If the core doesn't support MT then return */
+ bnez t6, 1f
+ nop
+ jr ra
+ nop
+
+ .set push
+ .set mt
+
+1: /* Enter VPE configuration state */
+ dvpe
+ la t1, 1f
+ jr.hb t1
+ nop
+1: mfc0 t1, CP0_MVPCONTROL
+ ori t1, t1, MVPCONTROL_VPC
+ mtc0 t1, CP0_MVPCONTROL
+ ehb
+
+ /* Loop through each VPE */
+ lw t6, COREBOOTCFG_VPEMASK(t0)
+ move t8, t6
+ li t5, 0
+
+ /* Check whether the VPE should be running. If not, skip it */
+1: andi t0, t6, 1
+ beqz t0, 2f
+ nop
+
+ /* Operate on the appropriate TC */
+ mfc0 t0, CP0_VPECONTROL
+ ori t0, t0, VPECONTROL_TARGTC
+ xori t0, t0, VPECONTROL_TARGTC
+ or t0, t0, t5
+ mtc0 t0, CP0_VPECONTROL
+ ehb
+
+ /* Skip the VPE if its TC is not halted */
+ mftc0 t0, CP0_TCHALT
+ beqz t0, 2f
+ nop
+
+ /* Calculate a pointer to the VPEs struct vpe_boot_config */
+ li t0, VPEBOOTCFG_SIZE
+ mul t0, t0, t5
+ addu t0, t0, t7
+
+ /* Set the TC restart PC */
+ lw t1, VPEBOOTCFG_PC(t0)
+ mttc0 t1, CP0_TCRESTART
+
+ /* Set the TC stack pointer */
+ lw t1, VPEBOOTCFG_SP(t0)
+ mttgpr t1, sp
+
+ /* Set the TC global pointer */
+ lw t1, VPEBOOTCFG_GP(t0)
+ mttgpr t1, gp
+
+ /* Copy config from this VPE */
+ mfc0 t0, CP0_CONFIG
+ mttc0 t0, CP0_CONFIG
+
+ /* Ensure no software interrupts are pending */
+ mttc0 zero, CP0_CAUSE
+ mttc0 zero, CP0_STATUS
+
+ /* Set TC active, not interrupt exempt */
+ mftc0 t0, CP0_TCSTATUS
+ li t1, ~TCSTATUS_IXMT
+ and t0, t0, t1
+ ori t0, t0, TCSTATUS_A
+ mttc0 t0, CP0_TCSTATUS
+
+ /* Clear the TC halt bit */
+ mttc0 zero, CP0_TCHALT
+
+ /* Set VPE active */
+ mftc0 t0, CP0_VPECONF0
+ ori t0, t0, VPECONF0_VPA
+ mttc0 t0, CP0_VPECONF0
+
+ /* Next VPE */
+2: srl t6, t6, 1
+ addi t5, t5, 1
+ bnez t6, 1b
+ nop
+
+ /* Leave VPE configuration state */
+ mfc0 t1, CP0_MVPCONTROL
+ xori t1, t1, MVPCONTROL_VPC
+ mtc0 t1, CP0_MVPCONTROL
+ ehb
+ evpe
+
+ /* Check whether this VPE is meant to be running */
+ li t0, 1
+ sll t0, t0, t9
+ and t0, t0, t8
+ bnez t0, 2f
+ nop
+
+ /* This VPE should be offline, halt the TC */
+ li t0, TCHALT_H
+ mtc0 t0, CP0_TCHALT
+ la t0, 1f
+1: jr.hb t0
+ nop
+
+2: .set pop
+
+#endif /* CONFIG_MIPS_MT */
+
+ /* Return */
+ jr ra
+ nop
+ END(mips_cps_boot_vpes)
+
+#if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM)
+
+ /* Calculate a pointer to this CPUs struct mips_static_suspend_state */
+ .macro psstate dest
+ .set push
+ .set noat
+ lw $1, TI_CPU(gp)
+ sll $1, $1, LONGLOG
+ la \dest, __per_cpu_offset
+ addu $1, $1, \dest
+ lw $1, 0($1)
+ la \dest, cps_cpu_state
+ addu \dest, \dest, $1
+ .set pop
+ .endm
+
+LEAF(mips_cps_pm_save)
+ /* Save CPU state */
+ SUSPEND_SAVE_REGS
+ psstate t1
+ SUSPEND_SAVE_STATIC
+ jr v0
+ nop
+ END(mips_cps_pm_save)
+
+LEAF(mips_cps_pm_restore)
+ /* Restore CPU state */
+ psstate t1
+ RESUME_RESTORE_STATIC
+ RESUME_RESTORE_REGS_RETURN
+ END(mips_cps_pm_restore)
+
+#endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
index c4ceccfa3828..09ce45980758 100644
--- a/arch/mips/kernel/idle.c
+++ b/arch/mips/kernel/idle.c
@@ -236,3 +236,14 @@ void arch_cpu_idle(void)
else
local_irq_enable();
}
+
+#ifdef CONFIG_CPU_IDLE
+
+int mips_cpuidle_wait_enter(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+{
+ arch_cpu_idle();
+ return index;
+}
+
+#endif
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c
index 8520dad6d4e3..88e4c323382c 100644
--- a/arch/mips/kernel/irq-gic.c
+++ b/arch/mips/kernel/irq-gic.c
@@ -54,6 +54,21 @@ void gic_write_compare(cycle_t cnt)
(int)(cnt & 0xffffffff));
}
+void gic_write_cpu_compare(cycle_t cnt, int cpu)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), cpu);
+ GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI),
+ (int)(cnt >> 32));
+ GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO),
+ (int)(cnt & 0xffffffff));
+
+ local_irq_restore(flags);
+}
+
cycle_t gic_read_compare(void)
{
unsigned int hi, lo;
diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c
index c9dc67402969..ba473608a347 100644
--- a/arch/mips/kernel/mips-cpc.c
+++ b/arch/mips/kernel/mips-cpc.c
@@ -9,12 +9,18 @@
*/
#include <linux/errno.h>
+#include <linux/percpu.h>
+#include <linux/spinlock.h>
#include <asm/mips-cm.h>
#include <asm/mips-cpc.h>
void __iomem *mips_cpc_base;
+static DEFINE_PER_CPU_ALIGNED(spinlock_t, cpc_core_lock);
+
+static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags);
+
phys_t __weak mips_cpc_phys_base(void)
{
u32 cpc_base;
@@ -39,6 +45,10 @@ phys_t __weak mips_cpc_phys_base(void)
int mips_cpc_probe(void)
{
phys_t addr;
+ unsigned cpu;
+
+ for_each_possible_cpu(cpu)
+ spin_lock_init(&per_cpu(cpc_core_lock, cpu));
addr = mips_cpc_phys_base();
if (!addr)
@@ -50,3 +60,21 @@ int mips_cpc_probe(void)
return 0;
}
+
+void mips_cpc_lock_other(unsigned int core)
+{
+ unsigned curr_core;
+ preempt_disable();
+ curr_core = current_cpu_data.core;
+ spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core),
+ per_cpu(cpc_core_lock_flags, curr_core));
+ write_cpc_cl_other(core << CPC_Cx_OTHER_CORENUM_SHF);
+}
+
+void mips_cpc_unlock_other(void)
+{
+ unsigned curr_core = current_cpu_data.core;
+ spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core),
+ per_cpu(cpc_core_lock_flags, curr_core));
+ preempt_enable();
+}
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
new file mode 100644
index 000000000000..5aa4c6f8cf83
--- /dev/null
+++ b/arch/mips/kernel/pm-cps.c
@@ -0,0 +1,716 @@
+/*
+ * Copyright (C) 2014 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+
+#include <asm/asm-offsets.h>
+#include <asm/cacheflush.h>
+#include <asm/cacheops.h>
+#include <asm/idle.h>
+#include <asm/mips-cm.h>
+#include <asm/mips-cpc.h>
+#include <asm/mipsmtregs.h>
+#include <asm/pm.h>
+#include <asm/pm-cps.h>
+#include <asm/smp-cps.h>
+#include <asm/uasm.h>
+
+/*
+ * cps_nc_entry_fn - type of a generated non-coherent state entry function
+ * @online: the count of online coupled VPEs
+ * @nc_ready_count: pointer to a non-coherent mapping of the core ready_count
+ *
+ * The code entering & exiting non-coherent states is generated at runtime
+ * using uasm, in order to ensure that the compiler cannot insert a stray
+ * memory access at an unfortunate time and to allow the generation of optimal
+ * core-specific code particularly for cache routines. If coupled_coherence
+ * is non-zero and this is the entry function for the CPS_PM_NC_WAIT state,
+ * returns the number of VPEs that were in the wait state at the point this
+ * VPE left it. Returns garbage if coupled_coherence is zero or this is not
+ * the entry function for CPS_PM_NC_WAIT.
+ */
+typedef unsigned (*cps_nc_entry_fn)(unsigned online, u32 *nc_ready_count);
+
+/*
+ * The entry point of the generated non-coherent idle state entry/exit
+ * functions. Actually per-core rather than per-CPU.
+ */
+static DEFINE_PER_CPU_READ_MOSTLY(cps_nc_entry_fn[CPS_PM_STATE_COUNT],
+ nc_asm_enter);
+
+/* Bitmap indicating which states are supported by the system */
+DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT);
+
+/*
+ * Indicates the number of coupled VPEs ready to operate in a non-coherent
+ * state. Actually per-core rather than per-CPU.
+ */
+static DEFINE_PER_CPU_ALIGNED(u32*, ready_count);
+static DEFINE_PER_CPU_ALIGNED(void*, ready_count_alloc);
+
+/* Indicates online CPUs coupled with the current CPU */
+static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled);
+
+/*
+ * Used to synchronize entry to deep idle states. Actually per-core rather
+ * than per-CPU.
+ */
+static DEFINE_PER_CPU_ALIGNED(atomic_t, pm_barrier);
+
+/* Saved CPU state across the CPS_PM_POWER_GATED state */
+DEFINE_PER_CPU_ALIGNED(struct mips_static_suspend_state, cps_cpu_state);
+
+/* A somewhat arbitrary number of labels & relocs for uasm */
+static struct uasm_label labels[32] __initdata;
+static struct uasm_reloc relocs[32] __initdata;
+
+/* CPU dependant sync types */
+static unsigned stype_intervention;
+static unsigned stype_memory;
+static unsigned stype_ordering;
+
+enum mips_reg {
+ zero, at, v0, v1, a0, a1, a2, a3,
+ t0, t1, t2, t3, t4, t5, t6, t7,
+ s0, s1, s2, s3, s4, s5, s6, s7,
+ t8, t9, k0, k1, gp, sp, fp, ra,
+};
+
+bool cps_pm_support_state(enum cps_pm_state state)
+{
+ return test_bit(state, state_support);
+}
+
+static void coupled_barrier(atomic_t *a, unsigned online)
+{
+ /*
+ * This function is effectively the same as
+ * cpuidle_coupled_parallel_barrier, which can't be used here since
+ * there's no cpuidle device.
+ */
+
+ if (!coupled_coherence)
+ return;
+
+ smp_mb__before_atomic_inc();
+ atomic_inc(a);
+
+ while (atomic_read(a) < online)
+ cpu_relax();
+
+ if (atomic_inc_return(a) == online * 2) {
+ atomic_set(a, 0);
+ return;
+ }
+
+ while (atomic_read(a) > online)
+ cpu_relax();
+}
+
+int cps_pm_enter_state(enum cps_pm_state state)
+{
+ unsigned cpu = smp_processor_id();
+ unsigned core = current_cpu_data.core;
+ unsigned online, left;
+ cpumask_t *coupled_mask = this_cpu_ptr(&online_coupled);
+ u32 *core_ready_count, *nc_core_ready_count;
+ void *nc_addr;
+ cps_nc_entry_fn entry;
+ struct core_boot_config *core_cfg;
+ struct vpe_boot_config *vpe_cfg;
+
+ /* Check that there is an entry function for this state */
+ entry = per_cpu(nc_asm_enter, core)[state];
+ if (!entry)
+ return -EINVAL;
+
+ /* Calculate which coupled CPUs (VPEs) are online */
+#ifdef CONFIG_MIPS_MT
+ if (cpu_online(cpu)) {
+ cpumask_and(coupled_mask, cpu_online_mask,
+ &cpu_sibling_map[cpu]);
+ online = cpumask_weight(coupled_mask);
+ cpumask_clear_cpu(cpu, coupled_mask);
+ } else
+#endif
+ {
+ cpumask_clear(coupled_mask);
+ online = 1;
+ }
+
+ /* Setup the VPE to run mips_cps_pm_restore when started again */
+ if (config_enabled(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
+ core_cfg = &mips_cps_core_bootcfg[core];
+ vpe_cfg = &core_cfg->vpe_config[current_cpu_data.vpe_id];
+ vpe_cfg->pc = (unsigned long)mips_cps_pm_restore;
+ vpe_cfg->gp = (unsigned long)current_thread_info();
+ vpe_cfg->sp = 0;
+ }
+
+ /* Indicate that this CPU might not be coherent */
+ cpumask_clear_cpu(cpu, &cpu_coherent_mask);
+ smp_mb__after_clear_bit();
+
+ /* Create a non-coherent mapping of the core ready_count */
+ core_ready_count = per_cpu(ready_count, core);
+ nc_addr = kmap_noncoherent(virt_to_page(core_ready_count),
+ (unsigned long)core_ready_count);
+ nc_addr += ((unsigned long)core_ready_count & ~PAGE_MASK);
+ nc_core_ready_count = nc_addr;
+
+ /* Ensure ready_count is zero-initialised before the assembly runs */
+ ACCESS_ONCE(*nc_core_ready_count) = 0;
+ coupled_barrier(&per_cpu(pm_barrier, core), online);
+
+ /* Run the generated entry code */
+ left = entry(online, nc_core_ready_count);
+
+ /* Remove the non-coherent mapping of ready_count */
+ kunmap_noncoherent();
+
+ /* Indicate that this CPU is definitely coherent */
+ cpumask_set_cpu(cpu, &cpu_coherent_mask);
+
+ /*
+ * If this VPE is the first to leave the non-coherent wait state then
+ * it needs to wake up any coupled VPEs still running their wait
+ * instruction so that they return to cpuidle, which can then complete
+ * coordination between the coupled VPEs & provide the governor with
+ * a chance to reflect on the length of time the VPEs were in the
+ * idle state.
+ */
+ if (coupled_coherence && (state == CPS_PM_NC_WAIT) && (left == online))
+ arch_send_call_function_ipi_mask(coupled_mask);
+
+ return 0;
+}
+
+static void __init cps_gen_cache_routine(u32 **pp, struct uasm_label **pl,
+ struct uasm_reloc **pr,
+ const struct cache_desc *cache,
+ unsigned op, int lbl)
+{
+ unsigned cache_size = cache->ways << cache->waybit;
+ unsigned i;
+ const unsigned unroll_lines = 32;
+
+ /* If the cache isn't present this function has it easy */
+ if (cache->flags & MIPS_CACHE_NOT_PRESENT)
+ return;
+
+ /* Load base address */
+ UASM_i_LA(pp, t0, (long)CKSEG0);
+
+ /* Calculate end address */
+ if (cache_size < 0x8000)
+ uasm_i_addiu(pp, t1, t0, cache_size);
+ else
+ UASM_i_LA(pp, t1, (long)(CKSEG0 + cache_size));
+
+ /* Start of cache op loop */
+ uasm_build_label(pl, *pp, lbl);
+
+ /* Generate the cache ops */
+ for (i = 0; i < unroll_lines; i++)
+ uasm_i_cache(pp, op, i * cache->linesz, t0);
+
+ /* Update the base address */
+ uasm_i_addiu(pp, t0, t0, unroll_lines * cache->linesz);
+
+ /* Loop if we haven't reached the end address yet */
+ uasm_il_bne(pp, pr, t0, t1, lbl);
+ uasm_i_nop(pp);
+}
+
+static int __init cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl,
+ struct uasm_reloc **pr,
+ const struct cpuinfo_mips *cpu_info,
+ int lbl)
+{
+ unsigned i, fsb_size = 8;
+ unsigned num_loads = (fsb_size * 3) / 2;
+ unsigned line_stride = 2;
+ unsigned line_size = cpu_info->dcache.linesz;
+ unsigned perf_counter, perf_event;
+ unsigned revision = cpu_info->processor_id & PRID_REV_MASK;
+
+ /*
+ * Determine whether this CPU requires an FSB flush, and if so which
+ * performance counter/event reflect stalls due to a full FSB.
+ */
+ switch (__get_cpu_type(cpu_info->cputype)) {
+ case CPU_INTERAPTIV:
+ perf_counter = 1;
+ perf_event = 51;
+ break;
+
+ case CPU_PROAPTIV:
+ /* Newer proAptiv cores don't require this workaround */
+ if (revision >= PRID_REV_ENCODE_332(1, 1, 0))
+ return 0;
+
+ /* On older ones it's unavailable */
+ return -1;
+
+ /* CPUs which do not require the workaround */
+ case CPU_P5600:
+ return 0;
+
+ default:
+ WARN_ONCE(1, "pm-cps: FSB flush unsupported for this CPU\n");
+ return -1;
+ }
+
+ /*
+ * Ensure that the fill/store buffer (FSB) is not holding the results
+ * of a prefetch, since if it is then the CPC sequencer may become
+ * stuck in the D3 (ClrBus) state whilst entering a low power state.
+ */
+
+ /* Preserve perf counter setup */
+ uasm_i_mfc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */
+ uasm_i_mfc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */
+
+ /* Setup perf counter to count FSB full pipeline stalls */
+ uasm_i_addiu(pp, t0, zero, (perf_event << 5) | 0xf);
+ uasm_i_mtc0(pp, t0, 25, (perf_counter * 2) + 0); /* PerfCtlN */
+ uasm_i_ehb(pp);
+ uasm_i_mtc0(pp, zero, 25, (perf_counter * 2) + 1); /* PerfCntN */
+ uasm_i_ehb(pp);
+
+ /* Base address for loads */
+ UASM_i_LA(pp, t0, (long)CKSEG0);
+
+ /* Start of clear loop */
+ uasm_build_label(pl, *pp, lbl);
+
+ /* Perform some loads to fill the FSB */
+ for (i = 0; i < num_loads; i++)
+ uasm_i_lw(pp, zero, i * line_size * line_stride, t0);
+
+ /*
+ * Invalidate the new D-cache entries so that the cache will need
+ * refilling (via the FSB) if the loop is executed again.
+ */
+ for (i = 0; i < num_loads; i++) {
+ uasm_i_cache(pp, Hit_Invalidate_D,
+ i * line_size * line_stride, t0);
+ uasm_i_cache(pp, Hit_Writeback_Inv_SD,
+ i * line_size * line_stride, t0);
+ }
+
+ /* Completion barrier */
+ uasm_i_sync(pp, stype_memory);
+ uasm_i_ehb(pp);
+
+ /* Check whether the pipeline stalled due to the FSB being full */
+ uasm_i_mfc0(pp, t1, 25, (perf_counter * 2) + 1); /* PerfCntN */
+
+ /* Loop if it didn't */
+ uasm_il_beqz(pp, pr, t1, lbl);
+ uasm_i_nop(pp);
+
+ /* Restore perf counter 1. The count may well now be wrong... */
+ uasm_i_mtc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */
+ uasm_i_ehb(pp);
+ uasm_i_mtc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */
+ uasm_i_ehb(pp);
+
+ return 0;
+}
+
+static void __init cps_gen_set_top_bit(u32 **pp, struct uasm_label **pl,
+ struct uasm_reloc **pr,
+ unsigned r_addr, int lbl)
+{
+ uasm_i_lui(pp, t0, uasm_rel_hi(0x80000000));
+ uasm_build_label(pl, *pp, lbl);
+ uasm_i_ll(pp, t1, 0, r_addr);
+ uasm_i_or(pp, t1, t1, t0);
+ uasm_i_sc(pp, t1, 0, r_addr);
+ uasm_il_beqz(pp, pr, t1, lbl);
+ uasm_i_nop(pp);
+}
+
+static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
+{
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+ u32 *buf, *p;
+ const unsigned r_online = a0;
+ const unsigned r_nc_count = a1;
+ const unsigned r_pcohctl = t7;
+ const unsigned max_instrs = 256;
+ unsigned cpc_cmd;
+ int err;
+ enum {
+ lbl_incready = 1,
+ lbl_poll_cont,
+ lbl_secondary_hang,
+ lbl_disable_coherence,
+ lbl_flush_fsb,
+ lbl_invicache,
+ lbl_flushdcache,
+ lbl_hang,
+ lbl_set_cont,
+ lbl_secondary_cont,
+ lbl_decready,
+ };
+
+ /* Allocate a buffer to hold the generated code */
+ p = buf = kcalloc(max_instrs, sizeof(u32), GFP_KERNEL);
+ if (!buf)
+ return NULL;
+
+ /* Clear labels & relocs ready for (re)use */
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+
+ if (config_enabled(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
+ /*
+ * Save CPU state. Note the non-standard calling convention
+ * with the return address placed in v0 to avoid clobbering
+ * the ra register before it is saved.
+ */
+ UASM_i_LA(&p, t0, (long)mips_cps_pm_save);
+ uasm_i_jalr(&p, v0, t0);
+ uasm_i_nop(&p);
+ }
+
+ /*
+ * Load addresses of required CM & CPC registers. This is done early
+ * because they're needed in both the enable & disable coherence steps
+ * but in the coupled case the enable step will only run on one VPE.
+ */
+ UASM_i_LA(&p, r_pcohctl, (long)addr_gcr_cl_coherence());
+
+ if (coupled_coherence) {
+ /* Increment ready_count */
+ uasm_i_sync(&p, stype_ordering);
+ uasm_build_label(&l, p, lbl_incready);
+ uasm_i_ll(&p, t1, 0, r_nc_count);
+ uasm_i_addiu(&p, t2, t1, 1);
+ uasm_i_sc(&p, t2, 0, r_nc_count);
+ uasm_il_beqz(&p, &r, t2, lbl_incready);
+ uasm_i_addiu(&p, t1, t1, 1);
+
+ /* Ordering barrier */
+ uasm_i_sync(&p, stype_ordering);
+
+ /*
+ * If this is the last VPE to become ready for non-coherence
+ * then it should branch below.
+ */
+ uasm_il_beq(&p, &r, t1, r_online, lbl_disable_coherence);
+ uasm_i_nop(&p);
+
+ if (state < CPS_PM_POWER_GATED) {
+ /*
+ * Otherwise this is not the last VPE to become ready
+ * for non-coherence. It needs to wait until coherence
+ * has been disabled before proceeding, which it will do
+ * by polling for the top bit of ready_count being set.
+ */
+ uasm_i_addiu(&p, t1, zero, -1);
+ uasm_build_label(&l, p, lbl_poll_cont);
+ uasm_i_lw(&p, t0, 0, r_nc_count);
+ uasm_il_bltz(&p, &r, t0, lbl_secondary_cont);
+ uasm_i_ehb(&p);
+ uasm_i_yield(&p, zero, t1);
+ uasm_il_b(&p, &r, lbl_poll_cont);
+ uasm_i_nop(&p);
+ } else {
+ /*
+ * The core will lose power & this VPE will not continue
+ * so it can simply halt here.
+ */
+ uasm_i_addiu(&p, t0, zero, TCHALT_H);
+ uasm_i_mtc0(&p, t0, 2, 4);
+ uasm_build_label(&l, p, lbl_secondary_hang);
+ uasm_il_b(&p, &r, lbl_secondary_hang);
+ uasm_i_nop(&p);
+ }
+ }
+
+ /*
+ * This is the point of no return - this VPE will now proceed to
+ * disable coherence. At this point we *must* be sure that no other
+ * VPE within the core will interfere with the L1 dcache.
+ */
+ uasm_build_label(&l, p, lbl_disable_coherence);
+
+ /* Invalidate the L1 icache */
+ cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].icache,
+ Index_Invalidate_I, lbl_invicache);
+
+ /* Writeback & invalidate the L1 dcache */
+ cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].dcache,
+ Index_Writeback_Inv_D, lbl_flushdcache);
+
+ /* Completion barrier */
+ uasm_i_sync(&p, stype_memory);
+ uasm_i_ehb(&p);
+
+ /*
+ * Disable all but self interventions. The load from COHCTL is defined
+ * by the interAptiv & proAptiv SUMs as ensuring that the operation
+ * resulting from the preceeding store is complete.
+ */
+ uasm_i_addiu(&p, t0, zero, 1 << cpu_data[cpu].core);
+ uasm_i_sw(&p, t0, 0, r_pcohctl);
+ uasm_i_lw(&p, t0, 0, r_pcohctl);
+
+ /* Sync to ensure previous interventions are complete */
+ uasm_i_sync(&p, stype_intervention);
+ uasm_i_ehb(&p);
+
+ /* Disable coherence */
+ uasm_i_sw(&p, zero, 0, r_pcohctl);
+ uasm_i_lw(&p, t0, 0, r_pcohctl);
+
+ if (state >= CPS_PM_CLOCK_GATED) {
+ err = cps_gen_flush_fsb(&p, &l, &r, &cpu_data[cpu],
+ lbl_flush_fsb);
+ if (err)
+ goto out_err;
+
+ /* Determine the CPC command to issue */
+ switch (state) {
+ case CPS_PM_CLOCK_GATED:
+ cpc_cmd = CPC_Cx_CMD_CLOCKOFF;
+ break;
+ case CPS_PM_POWER_GATED:
+ cpc_cmd = CPC_Cx_CMD_PWRDOWN;
+ break;
+ default:
+ BUG();
+ goto out_err;
+ }
+
+ /* Issue the CPC command */
+ UASM_i_LA(&p, t0, (long)addr_cpc_cl_cmd());
+ uasm_i_addiu(&p, t1, zero, cpc_cmd);
+ uasm_i_sw(&p, t1, 0, t0);
+
+ if (state == CPS_PM_POWER_GATED) {
+ /* If anything goes wrong just hang */
+ uasm_build_label(&l, p, lbl_hang);
+ uasm_il_b(&p, &r, lbl_hang);
+ uasm_i_nop(&p);
+
+ /*
+ * There's no point generating more code, the core is
+ * powered down & if powered back up will run from the
+ * reset vector not from here.
+ */
+ goto gen_done;
+ }
+
+ /* Completion barrier */
+ uasm_i_sync(&p, stype_memory);
+ uasm_i_ehb(&p);
+ }
+
+ if (state == CPS_PM_NC_WAIT) {
+ /*
+ * At this point it is safe for all VPEs to proceed with
+ * execution. This VPE will set the top bit of ready_count
+ * to indicate to the other VPEs that they may continue.
+ */
+ if (coupled_coherence)
+ cps_gen_set_top_bit(&p, &l, &r, r_nc_count,
+ lbl_set_cont);
+
+ /*
+ * VPEs which did not disable coherence will continue
+ * executing, after coherence has been disabled, from this
+ * point.
+ */
+ uasm_build_label(&l, p, lbl_secondary_cont);
+
+ /* Now perform our wait */
+ uasm_i_wait(&p, 0);
+ }
+
+ /*
+ * Re-enable coherence. Note that for CPS_PM_NC_WAIT all coupled VPEs
+ * will run this. The first will actually re-enable coherence & the
+ * rest will just be performing a rather unusual nop.
+ */
+ uasm_i_addiu(&p, t0, zero, CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK);
+ uasm_i_sw(&p, t0, 0, r_pcohctl);
+ uasm_i_lw(&p, t0, 0, r_pcohctl);
+
+ /* Completion barrier */
+ uasm_i_sync(&p, stype_memory);
+ uasm_i_ehb(&p);
+
+ if (coupled_coherence && (state == CPS_PM_NC_WAIT)) {
+ /* Decrement ready_count */
+ uasm_build_label(&l, p, lbl_decready);
+ uasm_i_sync(&p, stype_ordering);
+ uasm_i_ll(&p, t1, 0, r_nc_count);
+ uasm_i_addiu(&p, t2, t1, -1);
+ uasm_i_sc(&p, t2, 0, r_nc_count);
+ uasm_il_beqz(&p, &r, t2, lbl_decready);
+ uasm_i_andi(&p, v0, t1, (1 << fls(smp_num_siblings)) - 1);
+
+ /* Ordering barrier */
+ uasm_i_sync(&p, stype_ordering);
+ }
+
+ if (coupled_coherence && (state == CPS_PM_CLOCK_GATED)) {
+ /*
+ * At this point it is safe for all VPEs to proceed with
+ * execution. This VPE will set the top bit of ready_count
+ * to indicate to the other VPEs that they may continue.
+ */
+ cps_gen_set_top_bit(&p, &l, &r, r_nc_count, lbl_set_cont);
+
+ /*
+ * This core will be reliant upon another core sending a
+ * power-up command to the CPC in order to resume operation.
+ * Thus an arbitrary VPE can't trigger the core leaving the
+ * idle state and the one that disables coherence might as well
+ * be the one to re-enable it. The rest will continue from here
+ * after that has been done.
+ */
+ uasm_build_label(&l, p, lbl_secondary_cont);
+
+ /* Ordering barrier */
+ uasm_i_sync(&p, stype_ordering);
+ }
+
+ /* The core is coherent, time to return to C code */
+ uasm_i_jr(&p, ra);
+ uasm_i_nop(&p);
+
+gen_done:
+ /* Ensure the code didn't exceed the resources allocated for it */
+ BUG_ON((p - buf) > max_instrs);
+ BUG_ON((l - labels) > ARRAY_SIZE(labels));
+ BUG_ON((r - relocs) > ARRAY_SIZE(relocs));
+
+ /* Patch branch offsets */
+ uasm_resolve_relocs(relocs, labels);
+
+ /* Flush the icache */
+ local_flush_icache_range((unsigned long)buf, (unsigned long)p);
+
+ return buf;
+out_err:
+ kfree(buf);
+ return NULL;
+}
+
+static int __init cps_gen_core_entries(unsigned cpu)
+{
+ enum cps_pm_state state;
+ unsigned core = cpu_data[cpu].core;
+ unsigned dlinesz = cpu_data[cpu].dcache.linesz;
+ void *entry_fn, *core_rc;
+
+ for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) {
+ if (per_cpu(nc_asm_enter, core)[state])
+ continue;
+ if (!test_bit(state, state_support))
+ continue;
+
+ entry_fn = cps_gen_entry_code(cpu, state);
+ if (!entry_fn) {
+ pr_err("Failed to generate core %u state %u entry\n",
+ core, state);
+ clear_bit(state, state_support);
+ }
+
+ per_cpu(nc_asm_enter, core)[state] = entry_fn;
+ }
+
+ if (!per_cpu(ready_count, core)) {
+ core_rc = kmalloc(dlinesz * 2, GFP_KERNEL);
+ if (!core_rc) {
+ pr_err("Failed allocate core %u ready_count\n", core);
+ return -ENOMEM;
+ }
+ per_cpu(ready_count_alloc, core) = core_rc;
+
+ /* Ensure ready_count is aligned to a cacheline boundary */
+ core_rc += dlinesz - 1;
+ core_rc = (void *)((unsigned long)core_rc & ~(dlinesz - 1));
+ per_cpu(ready_count, core) = core_rc;
+ }
+
+ return 0;
+}
+
+static int __init cps_pm_init(void)
+{
+ unsigned cpu;
+ int err;
+
+ /* Detect appropriate sync types for the system */
+ switch (current_cpu_data.cputype) {
+ case CPU_INTERAPTIV:
+ case CPU_PROAPTIV:
+ case CPU_M5150:
+ case CPU_P5600:
+ stype_intervention = 0x2;
+ stype_memory = 0x3;
+ stype_ordering = 0x10;
+ break;
+
+ default:
+ pr_warn("Power management is using heavyweight sync 0\n");
+ }
+
+ /* A CM is required for all non-coherent states */
+ if (!mips_cm_present()) {
+ pr_warn("pm-cps: no CM, non-coherent states unavailable\n");
+ goto out;
+ }
+
+ /*
+ * If interrupts were enabled whilst running a wait instruction on a
+ * non-coherent core then the VPE may end up processing interrupts
+ * whilst non-coherent. That would be bad.
+ */
+ if (cpu_wait == r4k_wait_irqoff)
+ set_bit(CPS_PM_NC_WAIT, state_support);
+ else
+ pr_warn("pm-cps: non-coherent wait unavailable\n");
+
+ /* Detect whether a CPC is present */
+ if (mips_cpc_present()) {
+ /* Detect whether clock gating is implemented */
+ if (read_cpc_cl_stat_conf() & CPC_Cx_STAT_CONF_CLKGAT_IMPL_MSK)
+ set_bit(CPS_PM_CLOCK_GATED, state_support);
+ else
+ pr_warn("pm-cps: CPC does not support clock gating\n");
+
+ /* Power gating is available with CPS SMP & any CPC */
+ if (mips_cps_smp_in_use())
+ set_bit(CPS_PM_POWER_GATED, state_support);
+ else
+ pr_warn("pm-cps: CPS SMP not in use, power gating unavailable\n");
+ } else {
+ pr_warn("pm-cps: no CPC, clock & power gating unavailable\n");
+ }
+
+ for_each_present_cpu(cpu) {
+ err = cps_gen_core_entries(cpu);
+ if (err)
+ return err;
+ }
+out:
+ return 0;
+}
+arch_initcall(cps_pm_init);
diff --git a/arch/mips/kernel/pm.c b/arch/mips/kernel/pm.c
new file mode 100644
index 000000000000..fefdf39d3df3
--- /dev/null
+++ b/arch/mips/kernel/pm.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2014 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * CPU PM notifiers for saving/restoring general CPU state.
+ */
+
+#include <linux/cpu_pm.h>
+#include <linux/init.h>
+
+#include <asm/dsp.h>
+#include <asm/fpu.h>
+#include <asm/mmu_context.h>
+#include <asm/pm.h>
+#include <asm/watch.h>
+
+/* Used by PM helper macros in asm/pm.h */
+struct mips_static_suspend_state mips_static_suspend_state;
+
+/**
+ * mips_cpu_save() - Save general CPU state.
+ * Ensures that general CPU context is saved, notably FPU and DSP.
+ */
+static int mips_cpu_save(void)
+{
+ /* Save FPU state */
+ lose_fpu(1);
+
+ /* Save DSP state */
+ save_dsp(current);
+
+ return 0;
+}
+
+/**
+ * mips_cpu_restore() - Restore general CPU state.
+ * Restores important CPU context.
+ */
+static void mips_cpu_restore(void)
+{
+ unsigned int cpu = smp_processor_id();
+
+ /* Restore ASID */
+ if (current->mm)
+ write_c0_entryhi(cpu_asid(cpu, current->mm));
+
+ /* Restore DSP state */
+ restore_dsp(current);
+
+ /* Restore UserLocal */
+ if (cpu_has_userlocal)
+ write_c0_userlocal(current_thread_info()->tp_value);
+
+ /* Restore watch registers */
+ __restore_watch();
+}
+
+/**
+ * mips_pm_notifier() - Notifier for preserving general CPU context.
+ * @self: Notifier block.
+ * @cmd: CPU PM event.
+ * @v: Private data (unused).
+ *
+ * This is called when a CPU power management event occurs, and is used to
+ * ensure that important CPU context is preserved across a CPU power down.
+ */
+static int mips_pm_notifier(struct notifier_block *self, unsigned long cmd,
+ void *v)
+{
+ int ret;
+
+ switch (cmd) {
+ case CPU_PM_ENTER:
+ ret = mips_cpu_save();
+ if (ret)
+ return NOTIFY_STOP;
+ break;
+ case CPU_PM_ENTER_FAILED:
+ case CPU_PM_EXIT:
+ mips_cpu_restore();
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block mips_pm_notifier_block = {
+ .notifier_call = mips_pm_notifier,
+};
+
+static int __init mips_pm_init(void)
+{
+ return cpu_pm_register_notifier(&mips_pm_notifier_block);
+}
+arch_initcall(mips_pm_init);
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
index bb36b4e6b55f..df0598d9bfdd 100644
--- a/arch/mips/kernel/smp-cps.c
+++ b/arch/mips/kernel/smp-cps.c
@@ -20,104 +20,43 @@
#include <asm/mips-cpc.h>
#include <asm/mips_mt.h>
#include <asm/mipsregs.h>
+#include <asm/pm-cps.h>
#include <asm/smp-cps.h>
#include <asm/time.h>
#include <asm/uasm.h>
static DECLARE_BITMAP(core_power, NR_CPUS);
-struct boot_config mips_cps_bootcfg;
+struct core_boot_config *mips_cps_core_bootcfg;
-static void init_core(void)
+static unsigned core_vpe_count(unsigned core)
{
- unsigned int nvpes, t;
- u32 mvpconf0, vpeconf0, vpecontrol, tcstatus, tcbind, status;
+ unsigned cfg;
- if (!cpu_has_mipsmt)
- return;
-
- /* Enter VPE configuration state */
- dvpe();
- set_c0_mvpcontrol(MVPCONTROL_VPC);
-
- /* Retrieve the count of VPEs in this core */
- mvpconf0 = read_c0_mvpconf0();
- nvpes = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
- smp_num_siblings = nvpes;
-
- for (t = 1; t < nvpes; t++) {
- /* Use a 1:1 mapping of TC index to VPE index */
- settc(t);
-
- /* Bind 1 TC to this VPE */
- tcbind = read_tc_c0_tcbind();
- tcbind &= ~TCBIND_CURVPE;
- tcbind |= t << TCBIND_CURVPE_SHIFT;
- write_tc_c0_tcbind(tcbind);
-
- /* Set exclusive TC, non-active, master */
- vpeconf0 = read_vpe_c0_vpeconf0();
- vpeconf0 &= ~(VPECONF0_XTC | VPECONF0_VPA);
- vpeconf0 |= t << VPECONF0_XTC_SHIFT;
- vpeconf0 |= VPECONF0_MVP;
- write_vpe_c0_vpeconf0(vpeconf0);
-
- /* Declare TC non-active, non-allocatable & interrupt exempt */
- tcstatus = read_tc_c0_tcstatus();
- tcstatus &= ~(TCSTATUS_A | TCSTATUS_DA);
- tcstatus |= TCSTATUS_IXMT;
- write_tc_c0_tcstatus(tcstatus);
-
- /* Halt the TC */
- write_tc_c0_tchalt(TCHALT_H);
-
- /* Allow only 1 TC to execute */
- vpecontrol = read_vpe_c0_vpecontrol();
- vpecontrol &= ~VPECONTROL_TE;
- write_vpe_c0_vpecontrol(vpecontrol);
-
- /* Copy (most of) Status from VPE 0 */
- status = read_c0_status();
- status &= ~(ST0_IM | ST0_IE | ST0_KSU);
- status |= ST0_CU0;
- write_vpe_c0_status(status);
-
- /* Copy Config from VPE 0 */
- write_vpe_c0_config(read_c0_config());
- write_vpe_c0_config7(read_c0_config7());
-
- /* Ensure no software interrupts are pending */
- write_vpe_c0_cause(0);
-
- /* Sync Count */
- write_vpe_c0_count(read_c0_count());
- }
+ if (!config_enabled(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt)
+ return 1;
- /* Leave VPE configuration state */
- clear_c0_mvpcontrol(MVPCONTROL_VPC);
+ write_gcr_cl_other(core << CM_GCR_Cx_OTHER_CORENUM_SHF);
+ cfg = read_gcr_co_config() & CM_GCR_Cx_CONFIG_PVPE_MSK;
+ return (cfg >> CM_GCR_Cx_CONFIG_PVPE_SHF) + 1;
}
static void __init cps_smp_setup(void)
{
unsigned int ncores, nvpes, core_vpes;
int c, v;
- u32 core_cfg, *entry_code;
/* Detect & record VPE topology */
ncores = mips_cm_numcores();
pr_info("VPE topology ");
for (c = nvpes = 0; c < ncores; c++) {
- if (cpu_has_mipsmt && config_enabled(CONFIG_MIPS_MT_SMP)) {
- write_gcr_cl_other(c << CM_GCR_Cx_OTHER_CORENUM_SHF);
- core_cfg = read_gcr_co_config();
- core_vpes = ((core_cfg & CM_GCR_Cx_CONFIG_PVPE_MSK) >>
- CM_GCR_Cx_CONFIG_PVPE_SHF) + 1;
- } else {
- core_vpes = 1;
- }
-
+ core_vpes = core_vpe_count(c);
pr_cont("%c%u", c ? ',' : '{', core_vpes);
+ /* Use the number of VPEs in core 0 for smp_num_siblings */
+ if (!c)
+ smp_num_siblings = core_vpes;
+
for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) {
cpu_data[nvpes + v].core = c;
#ifdef CONFIG_MIPS_MT_SMP
@@ -137,19 +76,14 @@ static void __init cps_smp_setup(void)
__cpu_logical_map[v] = v;
}
+ /* Set a coherent default CCA (CWB) */
+ change_c0_config(CONF_CM_CMASK, 0x5);
+
/* Core 0 is powered up (we're running on it) */
bitmap_set(core_power, 0, 1);
- /* Disable MT - we only want to run 1 TC per VPE */
- if (cpu_has_mipsmt)
- dmt();
-
/* Initialise core 0 */
- init_core();
-
- /* Patch the start of mips_cps_core_entry to provide the CM base */
- entry_code = (u32 *)&mips_cps_core_entry;
- UASM_i_LA(&entry_code, 3, (long)mips_cm_base);
+ mips_cps_core_init();
/* Make core 0 coherent with everything */
write_gcr_cl_coherence(0xff);
@@ -157,15 +91,99 @@ static void __init cps_smp_setup(void)
static void __init cps_prepare_cpus(unsigned int max_cpus)
{
+ unsigned ncores, core_vpes, c, cca;
+ bool cca_unsuitable;
+ u32 *entry_code;
+
mips_mt_set_cpuoptions();
+
+ /* Detect whether the CCA is unsuited to multi-core SMP */
+ cca = read_c0_config() & CONF_CM_CMASK;
+ switch (cca) {
+ case 0x4: /* CWBE */
+ case 0x5: /* CWB */
+ /* The CCA is coherent, multi-core is fine */
+ cca_unsuitable = false;
+ break;
+
+ default:
+ /* CCA is not coherent, multi-core is not usable */
+ cca_unsuitable = true;
+ }
+
+ /* Warn the user if the CCA prevents multi-core */
+ ncores = mips_cm_numcores();
+ if (cca_unsuitable && ncores > 1) {
+ pr_warn("Using only one core due to unsuitable CCA 0x%x\n",
+ cca);
+
+ for_each_present_cpu(c) {
+ if (cpu_data[c].core)
+ set_cpu_present(c, false);
+ }
+ }
+
+ /*
+ * Patch the start of mips_cps_core_entry to provide:
+ *
+ * v0 = CM base address
+ * s0 = kseg0 CCA
+ */
+ entry_code = (u32 *)&mips_cps_core_entry;
+ UASM_i_LA(&entry_code, 3, (long)mips_cm_base);
+ uasm_i_addiu(&entry_code, 16, 0, cca);
+ dma_cache_wback_inv((unsigned long)&mips_cps_core_entry,
+ (void *)entry_code - (void *)&mips_cps_core_entry);
+
+ /* Allocate core boot configuration structs */
+ mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg),
+ GFP_KERNEL);
+ if (!mips_cps_core_bootcfg) {
+ pr_err("Failed to allocate boot config for %u cores\n", ncores);
+ goto err_out;
+ }
+
+ /* Allocate VPE boot configuration structs */
+ for (c = 0; c < ncores; c++) {
+ core_vpes = core_vpe_count(c);
+ mips_cps_core_bootcfg[c].vpe_config = kcalloc(core_vpes,
+ sizeof(*mips_cps_core_bootcfg[c].vpe_config),
+ GFP_KERNEL);
+ if (!mips_cps_core_bootcfg[c].vpe_config) {
+ pr_err("Failed to allocate %u VPE boot configs\n",
+ core_vpes);
+ goto err_out;
+ }
+ }
+
+ /* Mark this CPU as booted */
+ atomic_set(&mips_cps_core_bootcfg[current_cpu_data.core].vpe_mask,
+ 1 << cpu_vpe_id(&current_cpu_data));
+
+ return;
+err_out:
+ /* Clean up allocations */
+ if (mips_cps_core_bootcfg) {
+ for (c = 0; c < ncores; c++)
+ kfree(mips_cps_core_bootcfg[c].vpe_config);
+ kfree(mips_cps_core_bootcfg);
+ mips_cps_core_bootcfg = NULL;
+ }
+
+ /* Effectively disable SMP by declaring CPUs not present */
+ for_each_possible_cpu(c) {
+ if (c == 0)
+ continue;
+ set_cpu_present(c, false);
+ }
}
-static void boot_core(struct boot_config *cfg)
+static void boot_core(unsigned core)
{
u32 access;
/* Select the appropriate core */
- write_gcr_cl_other(cfg->core << CM_GCR_Cx_OTHER_CORENUM_SHF);
+ write_gcr_cl_other(core << CM_GCR_Cx_OTHER_CORENUM_SHF);
/* Set its reset vector */
write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry));
@@ -175,104 +193,74 @@ static void boot_core(struct boot_config *cfg)
/* Ensure the core can access the GCRs */
access = read_gcr_access();
- access |= 1 << (CM_GCR_ACCESS_ACCESSEN_SHF + cfg->core);
+ access |= 1 << (CM_GCR_ACCESS_ACCESSEN_SHF + core);
write_gcr_access(access);
- /* Copy cfg */
- mips_cps_bootcfg = *cfg;
-
if (mips_cpc_present()) {
- /* Select the appropriate core */
- write_cpc_cl_other(cfg->core << CPC_Cx_OTHER_CORENUM_SHF);
-
/* Reset the core */
+ mips_cpc_lock_other(core);
write_cpc_co_cmd(CPC_Cx_CMD_RESET);
+ mips_cpc_unlock_other();
} else {
/* Take the core out of reset */
write_gcr_co_reset_release(0);
}
/* The core is now powered up */
- bitmap_set(core_power, cfg->core, 1);
+ bitmap_set(core_power, core, 1);
}
-static void boot_vpe(void *info)
+static void remote_vpe_boot(void *dummy)
{
- struct boot_config *cfg = info;
- u32 tcstatus, vpeconf0;
-
- /* Enter VPE configuration state */
- dvpe();
- set_c0_mvpcontrol(MVPCONTROL_VPC);
-
- settc(cfg->vpe);
-
- /* Set the TC restart PC */
- write_tc_c0_tcrestart((unsigned long)&smp_bootstrap);
-
- /* Activate the TC, allow interrupts */
- tcstatus = read_tc_c0_tcstatus();
- tcstatus &= ~TCSTATUS_IXMT;
- tcstatus |= TCSTATUS_A;
- write_tc_c0_tcstatus(tcstatus);
-
- /* Clear the TC halt bit */
- write_tc_c0_tchalt(0);
-
- /* Activate the VPE */
- vpeconf0 = read_vpe_c0_vpeconf0();
- vpeconf0 |= VPECONF0_VPA;
- write_vpe_c0_vpeconf0(vpeconf0);
-
- /* Set the stack & global pointer registers */
- write_tc_gpr_sp(cfg->sp);
- write_tc_gpr_gp(cfg->gp);
-
- /* Leave VPE configuration state */
- clear_c0_mvpcontrol(MVPCONTROL_VPC);
-
- /* Enable other VPEs to execute */
- evpe(EVPE_ENABLE);
+ mips_cps_boot_vpes();
}
static void cps_boot_secondary(int cpu, struct task_struct *idle)
{
- struct boot_config cfg;
+ unsigned core = cpu_data[cpu].core;
+ unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
+ struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
+ struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id];
unsigned int remote;
int err;
- cfg.core = cpu_data[cpu].core;
- cfg.vpe = cpu_vpe_id(&cpu_data[cpu]);
- cfg.pc = (unsigned long)&smp_bootstrap;
- cfg.sp = __KSTK_TOS(idle);
- cfg.gp = (unsigned long)task_thread_info(idle);
+ vpe_cfg->pc = (unsigned long)&smp_bootstrap;
+ vpe_cfg->sp = __KSTK_TOS(idle);
+ vpe_cfg->gp = (unsigned long)task_thread_info(idle);
+
+ atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask);
+
+ preempt_disable();
- if (!test_bit(cfg.core, core_power)) {
+ if (!test_bit(core, core_power)) {
/* Boot a VPE on a powered down core */
- boot_core(&cfg);
- return;
+ boot_core(core);
+ goto out;
}
- if (cfg.core != current_cpu_data.core) {
+ if (core != current_cpu_data.core) {
/* Boot a VPE on another powered up core */
for (remote = 0; remote < NR_CPUS; remote++) {
- if (cpu_data[remote].core != cfg.core)
+ if (cpu_data[remote].core != core)
continue;
if (cpu_online(remote))
break;
}
BUG_ON(remote >= NR_CPUS);
- err = smp_call_function_single(remote, boot_vpe, &cfg, 1);
+ err = smp_call_function_single(remote, remote_vpe_boot,
+ NULL, 1);
if (err)
panic("Failed to call remote CPU\n");
- return;
+ goto out;
}
BUG_ON(!cpu_has_mipsmt);
/* Boot a VPE on this core */
- boot_vpe(&cfg);
+ mips_cps_boot_vpes();
+out:
+ preempt_enable();
}
static void cps_init_secondary(void)
@@ -281,10 +269,6 @@ static void cps_init_secondary(void)
if (cpu_has_mipsmt)
dmt();
- /* TODO: revisit this assumption once hotplug is implemented */
- if (cpu_vpe_id(&current_cpu_data) == 0)
- init_core();
-
change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 |
STATUSF_IP6 | STATUSF_IP7);
}
@@ -302,6 +286,148 @@ static void cps_smp_finish(void)
local_irq_enable();
}
+#ifdef CONFIG_HOTPLUG_CPU
+
+static int cps_cpu_disable(void)
+{
+ unsigned cpu = smp_processor_id();
+ struct core_boot_config *core_cfg;
+
+ if (!cpu)
+ return -EBUSY;
+
+ if (!cps_pm_support_state(CPS_PM_POWER_GATED))
+ return -EINVAL;
+
+ core_cfg = &mips_cps_core_bootcfg[current_cpu_data.core];
+ atomic_sub(1 << cpu_vpe_id(&current_cpu_data), &core_cfg->vpe_mask);
+ smp_mb__after_atomic_dec();
+ set_cpu_online(cpu, false);
+ cpu_clear(cpu, cpu_callin_map);
+
+ return 0;
+}
+
+static DECLARE_COMPLETION(cpu_death_chosen);
+static unsigned cpu_death_sibling;
+static enum {
+ CPU_DEATH_HALT,
+ CPU_DEATH_POWER,
+} cpu_death;
+
+void play_dead(void)
+{
+ unsigned cpu, core;
+
+ local_irq_disable();
+ idle_task_exit();
+ cpu = smp_processor_id();
+ cpu_death = CPU_DEATH_POWER;
+
+ if (cpu_has_mipsmt) {
+ core = cpu_data[cpu].core;
+
+ /* Look for another online VPE within the core */
+ for_each_online_cpu(cpu_death_sibling) {
+ if (cpu_data[cpu_death_sibling].core != core)
+ continue;
+
+ /*
+ * There is an online VPE within the core. Just halt
+ * this TC and leave the core alone.
+ */
+ cpu_death = CPU_DEATH_HALT;
+ break;
+ }
+ }
+
+ /* This CPU has chosen its way out */
+ complete(&cpu_death_chosen);
+
+ if (cpu_death == CPU_DEATH_HALT) {
+ /* Halt this TC */
+ write_c0_tchalt(TCHALT_H);
+ instruction_hazard();
+ } else {
+ /* Power down the core */
+ cps_pm_enter_state(CPS_PM_POWER_GATED);
+ }
+
+ /* This should never be reached */
+ panic("Failed to offline CPU %u", cpu);
+}
+
+static void wait_for_sibling_halt(void *ptr_cpu)
+{
+ unsigned cpu = (unsigned)ptr_cpu;
+ unsigned vpe_id = cpu_data[cpu].vpe_id;
+ unsigned halted;
+ unsigned long flags;
+
+ do {
+ local_irq_save(flags);
+ settc(vpe_id);
+ halted = read_tc_c0_tchalt();
+ local_irq_restore(flags);
+ } while (!(halted & TCHALT_H));
+}
+
+static void cps_cpu_die(unsigned int cpu)
+{
+ unsigned core = cpu_data[cpu].core;
+ unsigned stat;
+ int err;
+
+ /* Wait for the cpu to choose its way out */
+ if (!wait_for_completion_timeout(&cpu_death_chosen,
+ msecs_to_jiffies(5000))) {
+ pr_err("CPU%u: didn't offline\n", cpu);
+ return;
+ }
+
+ /*
+ * Now wait for the CPU to actually offline. Without doing this that
+ * offlining may race with one or more of:
+ *
+ * - Onlining the CPU again.
+ * - Powering down the core if another VPE within it is offlined.
+ * - A sibling VPE entering a non-coherent state.
+ *
+ * In the non-MT halt case (ie. infinite loop) the CPU is doing nothing
+ * with which we could race, so do nothing.
+ */
+ if (cpu_death == CPU_DEATH_POWER) {
+ /*
+ * Wait for the core to enter a powered down or clock gated
+ * state, the latter happening when a JTAG probe is connected
+ * in which case the CPC will refuse to power down the core.
+ */
+ do {
+ mips_cpc_lock_other(core);
+ stat = read_cpc_co_stat_conf();
+ stat &= CPC_Cx_STAT_CONF_SEQSTATE_MSK;
+ mips_cpc_unlock_other();
+ } while (stat != CPC_Cx_STAT_CONF_SEQSTATE_D0 &&
+ stat != CPC_Cx_STAT_CONF_SEQSTATE_D2 &&
+ stat != CPC_Cx_STAT_CONF_SEQSTATE_U2);
+
+ /* Indicate the core is powered off */
+ bitmap_clear(core_power, core, 1);
+ } else if (cpu_has_mipsmt) {
+ /*
+ * Have a CPU with access to the offlined CPUs registers wait
+ * for its TC to halt.
+ */
+ err = smp_call_function_single(cpu_death_sibling,
+ wait_for_sibling_halt,
+ (void *)cpu, 1);
+ if (err)
+ panic("Failed to call remote sibling CPU\n");
+ }
+}
+
+#endif /* CONFIG_HOTPLUG_CPU */
+
static struct plat_smp_ops cps_smp_ops = {
.smp_setup = cps_smp_setup,
.prepare_cpus = cps_prepare_cpus,
@@ -310,8 +436,18 @@ static struct plat_smp_ops cps_smp_ops = {
.smp_finish = cps_smp_finish,
.send_ipi_single = gic_send_ipi_single,
.send_ipi_mask = gic_send_ipi_mask,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_disable = cps_cpu_disable,
+ .cpu_die = cps_cpu_die,
+#endif
};
+bool mips_cps_smp_in_use(void)
+{
+ extern struct plat_smp_ops *mp_ops;
+ return mp_ops == &cps_smp_ops;
+}
+
int register_cps_smp_ops(void)
{
if (!mips_cm_present()) {
diff --git a/arch/mips/kernel/smp-gic.c b/arch/mips/kernel/smp-gic.c
index 3bb1f92ab525..3b21a96d1ccb 100644
--- a/arch/mips/kernel/smp-gic.c
+++ b/arch/mips/kernel/smp-gic.c
@@ -15,12 +15,14 @@
#include <linux/printk.h>
#include <asm/gic.h>
+#include <asm/mips-cpc.h>
#include <asm/smp-ops.h>
void gic_send_ipi_single(int cpu, unsigned int action)
{
unsigned long flags;
unsigned int intr;
+ unsigned int core = cpu_data[cpu].core;
pr_debug("CPU%d: %s cpu %d action %u status %08x\n",
smp_processor_id(), __func__, cpu, action, read_c0_status());
@@ -41,6 +43,15 @@ void gic_send_ipi_single(int cpu, unsigned int action)
}
gic_send_ipi(intr);
+
+ if (mips_cpc_present() && (core != current_cpu_data.core)) {
+ while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
+ mips_cpc_lock_other(core);
+ write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
+ mips_cpc_unlock_other();
+ }
+ }
+
local_irq_restore(flags);
}
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index ce7677523b68..9bad52ede903 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -62,6 +62,8 @@ EXPORT_SYMBOL(cpu_sibling_map);
/* representing cpus for which sibling maps can be computed */
static cpumask_t cpu_sibling_setup_map;
+cpumask_t cpu_coherent_mask;
+
static inline void set_cpu_sibling_map(int cpu)
{
int i;
@@ -114,6 +116,7 @@ asmlinkage void start_secondary(void)
cpu = smp_processor_id();
cpu_data[cpu].udelay_val = loops_per_jiffy;
+ cpu_set(cpu, cpu_coherent_mask);
notify_cpu_starting(cpu);
set_cpu_online(cpu, true);
@@ -175,6 +178,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
#ifndef CONFIG_HOTPLUG_CPU
init_cpu_present(cpu_possible_mask);
#endif
+ cpumask_copy(&cpu_coherent_mask, cpu_possible_mask);
}
/* preload SMP state for boot cpu */
@@ -390,3 +394,46 @@ void dump_send_ipi(void (*dump_ipi_callback)(void *))
}
EXPORT_SYMBOL(dump_send_ipi);
#endif
+
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+
+static DEFINE_PER_CPU(atomic_t, tick_broadcast_count);
+static DEFINE_PER_CPU(struct call_single_data, tick_broadcast_csd);
+
+void tick_broadcast(const struct cpumask *mask)
+{
+ atomic_t *count;
+ struct call_single_data *csd;
+ int cpu;
+
+ for_each_cpu(cpu, mask) {
+ count = &per_cpu(tick_broadcast_count, cpu);
+ csd = &per_cpu(tick_broadcast_csd, cpu);
+
+ if (atomic_inc_return(count) == 1)
+ smp_call_function_single_async(cpu, csd);
+ }
+}
+
+static void tick_broadcast_callee(void *info)
+{
+ int cpu = smp_processor_id();
+ tick_receive_broadcast();
+ atomic_set(&per_cpu(tick_broadcast_count, cpu), 0);
+}
+
+static int __init tick_broadcast_init(void)
+{
+ struct call_single_data *csd;
+ int cpu;
+
+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
+ csd = &per_cpu(tick_broadcast_csd, cpu);
+ csd->func = tick_broadcast_callee;
+ }
+
+ return 0;
+}
+early_initcall(tick_broadcast_init);
+
+#endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 3a2672907f80..1fd1a0c4f104 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -15,6 +15,7 @@
#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/context_tracking.h>
+#include <linux/cpu_pm.h>
#include <linux/kexec.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -1837,18 +1838,16 @@ static int __init ulri_disable(char *s)
}
__setup("noulri", ulri_disable);
-void per_cpu_trap_init(bool is_boot_cpu)
+/* configure STATUS register */
+static void configure_status(void)
{
- unsigned int cpu = smp_processor_id();
- unsigned int status_set = ST0_CU0;
- unsigned int hwrena = cpu_hwrena_impl_bits;
-
/*
* Disable coprocessors and select 32-bit or 64-bit addressing
* and the 16/32 or 32/32 FPR register model. Reset the BEV
* flag that some firmware may have left set and the TS bit (for
* IP27). Set XX for ISA IV code to work.
*/
+ unsigned int status_set = ST0_CU0;
#ifdef CONFIG_64BIT
status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
#endif
@@ -1859,6 +1858,12 @@ void per_cpu_trap_init(bool is_boot_cpu)
change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
status_set);
+}
+
+/* configure HWRENA register */
+static void configure_hwrena(void)
+{
+ unsigned int hwrena = cpu_hwrena_impl_bits;
if (cpu_has_mips_r2)
hwrena |= 0x0000000f;
@@ -1868,7 +1873,10 @@ void per_cpu_trap_init(bool is_boot_cpu)
if (hwrena)
write_c0_hwrena(hwrena);
+}
+static void configure_exception_vector(void)
+{
if (cpu_has_veic || cpu_has_vint) {
unsigned long sr = set_c0_status(ST0_BEV);
write_c0_ebase(ebase);
@@ -1884,6 +1892,16 @@ void per_cpu_trap_init(bool is_boot_cpu)
} else
set_c0_cause(CAUSEF_IV);
}
+}
+
+void per_cpu_trap_init(bool is_boot_cpu)
+{
+ unsigned int cpu = smp_processor_id();
+
+ configure_status();
+ configure_hwrena();
+
+ configure_exception_vector();
/*
* Before R2 both interrupt numbers were fixed to 7, so on R2 only:
@@ -2122,3 +2140,32 @@ void __init trap_init(void)
cu2_notifier(default_cu2_call, 0x80000000); /* Run last */
}
+
+static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd,
+ void *v)
+{
+ switch (cmd) {
+ case CPU_PM_ENTER_FAILED:
+ case CPU_PM_EXIT:
+ configure_status();
+ configure_hwrena();
+ configure_exception_vector();
+
+ /* Restore register with CPU number for TLB handlers */
+ TLBMISS_HANDLER_RESTORE();
+
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block trap_pm_notifier_block = {
+ .notifier_call = trap_pm_notifier,
+};
+
+static int __init trap_pm_init(void)
+{
+ return cpu_pm_register_notifier(&trap_pm_notifier_block);
+}
+arch_initcall(trap_pm_init);