aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/setup-common.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/setup-common.c')
-rw-r--r--arch/powerpc/kernel/setup-common.c189
1 files changed, 188 insertions, 1 deletions
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 8ca79b7503d8..714b4ba7ab86 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -35,6 +35,7 @@
#include <linux/percpu.h>
#include <linux/memblock.h>
#include <linux/of_platform.h>
+#include <linux/hugetlb.h>
#include <asm/io.h>
#include <asm/paca.h>
#include <asm/prom.h>
@@ -61,6 +62,12 @@
#include <asm/cputhreads.h>
#include <mm/mmu_decl.h>
#include <asm/fadump.h>
+#include <asm/udbg.h>
+#include <asm/hugetlb.h>
+#include <asm/livepatch.h>
+#include <asm/mmu_context.h>
+
+#include "setup.h"
#ifdef DEBUG
#include <asm/udbg.h>
@@ -494,7 +501,7 @@ void __init smp_setup_cpu_maps(void)
* On pSeries LPAR, we need to know how many cpus
* could possibly be added to this partition.
*/
- if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR) &&
+ if (firmware_has_feature(FW_FEATURE_LPAR) &&
(dn = of_find_node_by_path("/rtas"))) {
int num_addr_cell, num_size_cell, maxcpus;
const __be32 *ireg;
@@ -575,6 +582,7 @@ void probe_machine(void)
{
extern struct machdep_calls __machine_desc_start;
extern struct machdep_calls __machine_desc_end;
+ unsigned int i;
/*
* Iterate all ppc_md structures until we find the proper
@@ -582,6 +590,17 @@ void probe_machine(void)
*/
DBG("Probing machine type ...\n");
+ /*
+ * Check ppc_md is empty, if not we have a bug, ie, we setup an
+ * entry before probe_machine() which will be overwritten
+ */
+ for (i = 0; i < (sizeof(ppc_md) / sizeof(void *)); i++) {
+ if (((void **)&ppc_md)[i]) {
+ printk(KERN_ERR "Entry %d in ppc_md non empty before"
+ " machine probe !\n", i);
+ }
+ }
+
for (machine_id = &__machine_desc_start;
machine_id < &__machine_desc_end;
machine_id++) {
@@ -676,6 +695,8 @@ static struct notifier_block ppc_panic_block = {
void __init setup_panic(void)
{
+ if (!ppc_md.panic)
+ return;
atomic_notifier_chain_register(&panic_notifier_list, &ppc_panic_block);
}
@@ -744,3 +765,169 @@ void arch_setup_pdev_archdata(struct platform_device *pdev)
pdev->dev.dma_mask = &pdev->archdata.dma_mask;
set_dma_ops(&pdev->dev, &dma_direct_ops);
}
+
+static __init void print_system_info(void)
+{
+ pr_info("-----------------------------------------------------\n");
+#ifdef CONFIG_PPC_STD_MMU_64
+ pr_info("ppc64_pft_size = 0x%llx\n", ppc64_pft_size);
+#endif
+#ifdef CONFIG_PPC_STD_MMU_32
+ pr_info("Hash_size = 0x%lx\n", Hash_size);
+#endif
+ pr_info("phys_mem_size = 0x%llx\n",
+ (unsigned long long)memblock_phys_mem_size());
+
+ pr_info("dcache_bsize = 0x%x\n", dcache_bsize);
+ pr_info("icache_bsize = 0x%x\n", icache_bsize);
+ if (ucache_bsize != 0)
+ pr_info("ucache_bsize = 0x%x\n", ucache_bsize);
+
+ pr_info("cpu_features = 0x%016lx\n", cur_cpu_spec->cpu_features);
+ pr_info(" possible = 0x%016lx\n",
+ (unsigned long)CPU_FTRS_POSSIBLE);
+ pr_info(" always = 0x%016lx\n",
+ (unsigned long)CPU_FTRS_ALWAYS);
+ pr_info("cpu_user_features = 0x%08x 0x%08x\n",
+ cur_cpu_spec->cpu_user_features,
+ cur_cpu_spec->cpu_user_features2);
+ pr_info("mmu_features = 0x%08x\n", cur_cpu_spec->mmu_features);
+#ifdef CONFIG_PPC64
+ pr_info("firmware_features = 0x%016lx\n", powerpc_firmware_features);
+#endif
+
+#ifdef CONFIG_PPC_STD_MMU_64
+ if (htab_address)
+ pr_info("htab_address = 0x%p\n", htab_address);
+ if (htab_hash_mask)
+ pr_info("htab_hash_mask = 0x%lx\n", htab_hash_mask);
+#endif
+#ifdef CONFIG_PPC_STD_MMU_32
+ if (Hash)
+ pr_info("Hash = 0x%p\n", Hash);
+ if (Hash_mask)
+ pr_info("Hash_mask = 0x%lx\n", Hash_mask);
+#endif
+
+ if (PHYSICAL_START > 0)
+ pr_info("physical_start = 0x%llx\n",
+ (unsigned long long)PHYSICAL_START);
+ pr_info("-----------------------------------------------------\n");
+}
+
+/*
+ * Called into from start_kernel this initializes memblock, which is used
+ * to manage page allocation until mem_init is called.
+ */
+void __init setup_arch(char **cmdline_p)
+{
+ *cmdline_p = boot_command_line;
+
+ /* Set a half-reasonable default so udelay does something sensible */
+ loops_per_jiffy = 500000000 / HZ;
+
+ /* Unflatten the device-tree passed by prom_init or kexec */
+ unflatten_device_tree();
+
+ /*
+ * Initialize cache line/block info from device-tree (on ppc64) or
+ * just cputable (on ppc32).
+ */
+ initialize_cache_info();
+
+ /* Initialize RTAS if available. */
+ rtas_initialize();
+
+ /* Check if we have an initrd provided via the device-tree. */
+ check_for_initrd();
+
+ /* Probe the machine type, establish ppc_md. */
+ probe_machine();
+
+ /* Setup panic notifier if requested by the platform. */
+ setup_panic();
+
+ /*
+ * Configure ppc_md.power_save (ppc32 only, 64-bit machines do
+ * it from their respective probe() function.
+ */
+ setup_power_save();
+
+ /* Discover standard serial ports. */
+ find_legacy_serial_ports();
+
+ /* Register early console with the printk subsystem. */
+ register_early_udbg_console();
+
+ /* Setup the various CPU maps based on the device-tree. */
+ smp_setup_cpu_maps();
+
+ /* Initialize xmon. */
+ xmon_setup();
+
+ /* Check the SMT related command line arguments (ppc64). */
+ check_smt_enabled();
+
+ /* On BookE, setup per-core TLB data structures. */
+ setup_tlb_core_data();
+
+ /*
+ * Release secondary cpus out of their spinloops at 0x60 now that
+ * we can map physical -> logical CPU ids.
+ *
+ * Freescale Book3e parts spin in a loop provided by firmware,
+ * so smp_release_cpus() does nothing for them.
+ */
+#ifdef CONFIG_SMP
+ smp_release_cpus();
+#endif
+
+ /* Print various info about the machine that has been gathered so far. */
+ print_system_info();
+
+ /* Reserve large chunks of memory for use by CMA for KVM. */
+ kvm_cma_reserve();
+
+ /*
+ * Reserve any gigantic pages requested on the command line.
+ * memblock needs to have been initialized by the time this is
+ * called since this will reserve memory.
+ */
+ reserve_hugetlb_gpages();
+
+ klp_init_thread_info(&init_thread_info);
+
+ init_mm.start_code = (unsigned long)_stext;
+ init_mm.end_code = (unsigned long) _etext;
+ init_mm.end_data = (unsigned long) _edata;
+ init_mm.brk = klimit;
+#ifdef CONFIG_PPC_64K_PAGES
+ init_mm.context.pte_frag = NULL;
+#endif
+#ifdef CONFIG_SPAPR_TCE_IOMMU
+ mm_iommu_init(&init_mm.context);
+#endif
+ irqstack_early_init();
+ exc_lvl_early_init();
+ emergency_stack_init();
+
+ initmem_init();
+
+#ifdef CONFIG_DUMMY_CONSOLE
+ conswitchp = &dummy_con;
+#endif
+ if (ppc_md.setup_arch)
+ ppc_md.setup_arch();
+
+ paging_init();
+
+ /* Initialize the MMU context management stuff. */
+ mmu_context_init();
+
+#ifdef CONFIG_PPC64
+ /* Interrupt code needs to be 64K-aligned. */
+ if ((unsigned long)_stext & 0xffff)
+ panic("Kernelbase not 64K-aligned (0x%lx)!\n",
+ (unsigned long)_stext);
+#endif
+}