aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/netlogic/common
diff options
context:
space:
mode:
authorJayachandran C <jayachandranc@netlogicmicro.com>2011-11-16 00:21:29 +0000
committerRalf Baechle <ralf@linux-mips.org>2011-12-07 22:04:56 +0000
commit66d29985fab8207b1b2c03ac34a2c294c5b47a30 (patch)
treef73145e09c53606716266577eef5e14262129ed9 /arch/mips/netlogic/common
parentMIPS: Netlogic: Add default XLP config. (diff)
downloadlinux-dev-66d29985fab8207b1b2c03ac34a2c294c5b47a30.tar.xz
linux-dev-66d29985fab8207b1b2c03ac34a2c294c5b47a30.zip
MIPS: Netlogic: Merge some of XLR/XLP wakup code
Create a common NMI and reset handler in smpboot.S and use this for both XLR and XLP. In the earlier code, the woken up CPUs would busy wait until released, switch this to wakeup by NMI. The initial wakeup code or XLR and XLP are differ since they are started from different bootloaders (XLP from u-boot and XLR from netlogic bootloader). But in both platforms the woken up CPUs wait and are released by sending an NMI. Add support for starting XLR and XLP in 1/2/4 threads per core. Signed-off-by: Jayachandran C <jayachandranc@netlogicmicro.com> Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/2970/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/netlogic/common')
-rw-r--r--arch/mips/netlogic/common/Makefile2
-rw-r--r--arch/mips/netlogic/common/smp.c75
-rw-r--r--arch/mips/netlogic/common/smpboot.S272
3 files changed, 346 insertions, 3 deletions
diff --git a/arch/mips/netlogic/common/Makefile b/arch/mips/netlogic/common/Makefile
index d4215783c3e5..291372a086f5 100644
--- a/arch/mips/netlogic/common/Makefile
+++ b/arch/mips/netlogic/common/Makefile
@@ -1,3 +1,3 @@
obj-y += irq.o time.o
-obj-$(CONFIG_SMP) += smp.o
+obj-$(CONFIG_SMP) += smp.o smpboot.o
obj-$(CONFIG_EARLY_PRINTK) += earlycons.o
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c
index c1960439f7e1..476c93ef3037 100644
--- a/arch/mips/netlogic/common/smp.c
+++ b/arch/mips/netlogic/common/smp.c
@@ -47,10 +47,12 @@
#if defined(CONFIG_CPU_XLP)
#include <asm/netlogic/xlp-hal/iomap.h>
+#include <asm/netlogic/xlp-hal/xlp.h>
#include <asm/netlogic/xlp-hal/pic.h>
#elif defined(CONFIG_CPU_XLR)
#include <asm/netlogic/xlr/iomap.h>
#include <asm/netlogic/xlr/pic.h>
+#include <asm/netlogic/xlr/xlr.h>
#else
#error "Unknown CPU"
#endif
@@ -125,10 +127,10 @@ void nlm_cpus_done(void)
* Boot all other cpus in the system, initialize them, and bring them into
* the boot function
*/
-int nlm_cpu_unblock[NR_CPUS];
int nlm_cpu_ready[NR_CPUS];
unsigned long nlm_next_gp;
unsigned long nlm_next_sp;
+
cpumask_t phys_cpu_present_map;
void nlm_boot_secondary(int logical_cpu, struct task_struct *idle)
@@ -142,7 +144,7 @@ void nlm_boot_secondary(int logical_cpu, struct task_struct *idle)
/* barrier */
__sync();
- nlm_cpu_unblock[cpu] = 1;
+ nlm_pic_send_ipi(nlm_pic_base, cpu, 1, 1);
}
void __init nlm_smp_setup(void)
@@ -178,12 +180,81 @@ void __init nlm_smp_setup(void)
(unsigned long)cpu_possible_map.bits[0]);
pr_info("Detected %i Slave CPU(s)\n", num_cpus);
+ nlm_set_nmi_handler(nlm_boot_secondary_cpus);
}
void nlm_prepare_cpus(unsigned int max_cpus)
{
}
+static int nlm_parse_cpumask(u32 cpu_mask)
+{
+ uint32_t core0_thr_mask, core_thr_mask;
+ int threadmode, i;
+
+ core0_thr_mask = cpu_mask & 0xf;
+ switch (core0_thr_mask) {
+ case 1:
+ nlm_threads_per_core = 1;
+ threadmode = 0;
+ break;
+ case 3:
+ nlm_threads_per_core = 2;
+ threadmode = 2;
+ break;
+ case 0xf:
+ nlm_threads_per_core = 4;
+ threadmode = 3;
+ break;
+ default:
+ goto unsupp;
+ }
+
+ /* Verify other cores CPU masks */
+ nlm_coremask = 1;
+ nlm_cpumask = core0_thr_mask;
+ for (i = 1; i < 8; i++) {
+ core_thr_mask = (cpu_mask >> (i * 4)) & 0xf;
+ if (core_thr_mask) {
+ if (core_thr_mask != core0_thr_mask)
+ goto unsupp;
+ nlm_coremask |= 1 << i;
+ nlm_cpumask |= core0_thr_mask << (4 * i);
+ }
+ }
+ return threadmode;
+
+unsupp:
+ panic("Unsupported CPU mask %x\n", cpu_mask);
+ return 0;
+}
+
+int __cpuinit nlm_wakeup_secondary_cpus(u32 wakeup_mask)
+{
+ unsigned long reset_vec;
+ char *reset_data;
+ int threadmode;
+
+ /* Update reset entry point with CPU init code */
+ reset_vec = CKSEG1ADDR(RESET_VEC_PHYS);
+ memcpy((void *)reset_vec, (void *)nlm_reset_entry,
+ (nlm_reset_entry_end - nlm_reset_entry));
+
+ /* verify the mask and setup core config variables */
+ threadmode = nlm_parse_cpumask(wakeup_mask);
+
+ /* Setup CPU init parameters */
+ reset_data = (char *)CKSEG1ADDR(RESET_DATA_PHYS);
+ *(int *)(reset_data + BOOT_THREAD_MODE) = threadmode;
+
+#ifdef CONFIG_CPU_XLP
+ xlp_wakeup_secondary_cpus();
+#else
+ xlr_wakeup_secondary_cpus();
+#endif
+ return 0;
+}
+
struct plat_smp_ops nlm_smp_ops = {
.send_ipi_single = nlm_send_ipi_single,
.send_ipi_mask = nlm_send_ipi_mask,
diff --git a/arch/mips/netlogic/common/smpboot.S b/arch/mips/netlogic/common/smpboot.S
new file mode 100644
index 000000000000..c138b1a6dec3
--- /dev/null
+++ b/arch/mips/netlogic/common/smpboot.S
@@ -0,0 +1,272 @@
+/*
+ * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
+ * reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the NetLogic
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/init.h>
+
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+#include <asm/asmmacro.h>
+#include <asm/addrspace.h>
+
+#include <asm/netlogic/common.h>
+
+#include <asm/netlogic/xlp-hal/iomap.h>
+#include <asm/netlogic/xlp-hal/xlp.h>
+#include <asm/netlogic/xlp-hal/sys.h>
+#include <asm/netlogic/xlp-hal/cpucontrol.h>
+
+#define CP0_EBASE $15
+#define SYS_CPU_COHERENT_BASE(node) CKSEG1ADDR(XLP_DEFAULT_IO_BASE) + \
+ XLP_IO_SYS_OFFSET(node) + XLP_IO_PCI_HDRSZ + \
+ SYS_CPU_NONCOHERENT_MODE * 4
+
+.macro __config_lsu
+ li t0, LSU_DEFEATURE
+ mfcr t1, t0
+
+ lui t2, 0x4080 /* Enable Unaligned Access, L2HPE */
+ or t1, t1, t2
+ li t2, ~0xe /* S1RCM */
+ and t1, t1, t2
+ mtcr t1, t0
+
+ li t0, SCHED_DEFEATURE
+ lui t1, 0x0100 /* Experimental: Disable BRU accepting ALU ops */
+ mtcr t1, t0
+.endm
+
+/*
+ * The cores can come start when they are woken up. This is also the NMI
+ * entry, so check that first.
+ *
+ * The data corresponding to reset is stored at RESET_DATA_PHYS location,
+ * this will have the thread mask (used when core is woken up) and the
+ * current NMI handler in case we reached here for an NMI.
+ *
+ * When a core or thread is newly woken up, it loops in a 'wait'. When
+ * the CPU really needs waking up, we send an NMI to it, with the NMI
+ * handler set to prom_boot_secondary_cpus
+ */
+
+ .set noreorder
+ .set noat
+ .set arch=xlr /* for mfcr/mtcr, XLR is sufficient */
+
+FEXPORT(nlm_reset_entry)
+ dmtc0 k0, $22, 6
+ dmtc0 k1, $22, 7
+ mfc0 k0, CP0_STATUS
+ li k1, 0x80000
+ and k1, k0, k1
+ beqz k1, 1f /* go to real reset entry */
+ nop
+ li k1, CKSEG1ADDR(RESET_DATA_PHYS) /* NMI */
+ ld k0, BOOT_NMI_HANDLER(k1)
+ jr k0
+ nop
+
+1: /* Entry point on core wakeup */
+ mfc0 t0, CP0_EBASE, 1
+ mfc0 t1, CP0_EBASE, 1
+ srl t1, 5
+ andi t1, 0x3 /* t1 <- node */
+ li t2, 0x40000
+ mul t3, t2, t1 /* t3 = node * 0x40000 */
+ srl t0, t0, 2
+ and t0, t0, 0x7 /* t0 <- core */
+ li t1, 0x1
+ sll t0, t1, t0
+ nor t0, t0, zero /* t0 <- ~(1 << core) */
+ li t2, SYS_CPU_COHERENT_BASE(0)
+ add t2, t2, t3 /* t2 <- SYS offset for node */
+ lw t1, 0(t2)
+ and t1, t1, t0
+ sw t1, 0(t2)
+
+ /* read back to ensure complete */
+ lw t1, 0(t2)
+ sync
+
+ /* Configure LSU on Non-0 Cores. */
+ __config_lsu
+
+/*
+ * Wake up sibling threads from the initial thread in
+ * a core.
+ */
+EXPORT(nlm_boot_siblings)
+ li t0, CKSEG1ADDR(RESET_DATA_PHYS)
+ lw t1, BOOT_THREAD_MODE(t0) /* t1 <- thread mode */
+ li t0, ((CPU_BLOCKID_MAP << 8) | MAP_THREADMODE)
+ mfcr t2, t0
+ or t2, t2, t1
+ mtcr t2, t0
+
+ /*
+ * The new hardware thread starts at the next instruction
+ * For all the cases other than core 0 thread 0, we will
+ * jump to the secondary wait function.
+ */
+ mfc0 v0, CP0_EBASE, 1
+ andi v0, 0x7f /* v0 <- node/core */
+
+#if 1
+ /* A0 errata - Write MMU_SETUP after changing thread mode register. */
+ andi v1, v0, 0x3 /* v1 <- thread id */
+ bnez v1, 2f
+ nop
+
+ li t0, MMU_SETUP
+ li t1, 0
+ mtcr t1, t0
+ ehb
+#endif
+
+2: beqz v0, 4f
+ nop
+
+ /* setup status reg */
+ mfc0 t1, CP0_STATUS
+ li t0, ST0_BEV
+ or t1, t0
+ xor t1, t0
+#ifdef CONFIG_64BIT
+ ori t1, ST0_KX
+#endif
+ mtc0 t1, CP0_STATUS
+ /* mark CPU ready */
+ PTR_LA t1, nlm_cpu_ready
+ sll v1, v0, 2
+ PTR_ADDU t1, v1
+ li t2, 1
+ sw t2, 0(t1)
+ /* Wait until NMI hits */
+3: wait
+ j 3b
+ nop
+
+ /*
+ * For the boot CPU, we have to restore registers and
+ * return
+ */
+4: dmfc0 t0, $4, 2 /* restore SP from UserLocal */
+ li t1, 0xfadebeef
+ dmtc0 t1, $4, 2 /* restore SP from UserLocal */
+ PTR_SUBU sp, t0, PT_SIZE
+ RESTORE_ALL
+ jr ra
+ nop
+EXPORT(nlm_reset_entry_end)
+
+FEXPORT(xlp_boot_core0_siblings) /* "Master" cpu starts from here */
+ __config_lsu
+ dmtc0 sp, $4, 2 /* SP saved in UserLocal */
+ SAVE_ALL
+ sync
+ /* find the location to which nlm_boot_siblings was relocated */
+ li t0, CKSEG1ADDR(RESET_VEC_PHYS)
+ dla t1, nlm_reset_entry
+ dla t2, nlm_boot_siblings
+ dsubu t2, t1
+ daddu t2, t0
+ /* call it */
+ jr t2
+ nop
+ /* not reached */
+
+ __CPUINIT
+NESTED(nlm_boot_secondary_cpus, 16, sp)
+ PTR_LA t1, nlm_next_sp
+ PTR_L sp, 0(t1)
+ PTR_LA t1, nlm_next_gp
+ PTR_L gp, 0(t1)
+
+ /* a0 has the processor id */
+ PTR_LA t0, nlm_early_init_secondary
+ jalr t0
+ nop
+
+ PTR_LA t0, smp_bootstrap
+ jr t0
+ nop
+END(nlm_boot_secondary_cpus)
+ __FINIT
+
+/*
+ * In case of RMIboot bootloader which is used on XLR boards, the CPUs
+ * be already woken up and waiting in bootloader code.
+ * This will get them out of the bootloader code and into linux. Needed
+ * because the bootloader area will be taken and initialized by linux.
+ */
+ __CPUINIT
+NESTED(nlm_rmiboot_preboot, 16, sp)
+ mfc0 t0, $15, 1 # read ebase
+ andi t0, 0x1f # t0 has the processor_id()
+ andi t2, t0, 0x3 # thread no
+ sll t0, 2 # offset in cpu array
+
+ PTR_LA t1, nlm_cpu_ready # mark CPU ready
+ PTR_ADDU t1, t0
+ li t3, 1
+ sw t3, 0(t1)
+
+ bnez t2, 1f # skip thread programming
+ nop # for non zero hw threads
+
+ /*
+ * MMU setup only for first thread in core
+ */
+ li t0, 0x400
+ mfcr t1, t0
+ li t2, 6 # XLR thread mode mask
+ nor t3, t2, zero
+ and t2, t1, t2 # t2 - current thread mode
+ li v0, CKSEG1ADDR(RESET_DATA_PHYS)
+ lw v1, BOOT_THREAD_MODE(v0) # v1 - new thread mode
+ sll v1, 1
+ beq v1, t2, 1f # same as request value
+ nop # nothing to do */
+
+ and t2, t1, t3 # mask out old thread mode
+ or t1, t2, v1 # put in new value
+ mtcr t1, t0 # update core control
+
+1: wait
+ j 1b
+ nop
+END(nlm_rmiboot_preboot)
+ __FINIT