aboutsummaryrefslogtreecommitdiffstats
path: root/arch/metag/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/metag/kernel')
-rw-r--r--arch/metag/kernel/cachepart.c13
-rw-r--r--arch/metag/kernel/clock.c59
-rw-r--r--arch/metag/kernel/irq.c7
-rw-r--r--arch/metag/kernel/kick.c9
-rw-r--r--arch/metag/kernel/metag_ksyms.c5
-rw-r--r--arch/metag/kernel/perf/perf_event.c6
-rw-r--r--arch/metag/kernel/setup.c16
-rw-r--r--arch/metag/kernel/smp.c57
-rw-r--r--arch/metag/kernel/time.c14
-rw-r--r--arch/metag/kernel/traps.c7
10 files changed, 132 insertions, 61 deletions
diff --git a/arch/metag/kernel/cachepart.c b/arch/metag/kernel/cachepart.c
index 954548b1bea8..0a2385fa2a1d 100644
--- a/arch/metag/kernel/cachepart.c
+++ b/arch/metag/kernel/cachepart.c
@@ -100,22 +100,23 @@ void check_for_cache_aliasing(int thread_id)
thread_cache_size =
get_thread_cache_size(cache_type, thread_id);
if (thread_cache_size < 0)
- pr_emerg("Can't read %s cache size", \
+ pr_emerg("Can't read %s cache size\n",
cache_type ? "DCACHE" : "ICACHE");
else if (thread_cache_size == 0)
/* Cache is off. No need to check for aliasing */
continue;
if (thread_cache_size / CACHE_ASSOCIATIVITY > PAGE_SIZE) {
- pr_emerg("Cache aliasing detected in %s on Thread %d",
+ pr_emerg("Potential cache aliasing detected in %s on Thread %d\n",
cache_type ? "DCACHE" : "ICACHE", thread_id);
- pr_warn("Total %s size: %u bytes",
- cache_type ? "DCACHE" : "ICACHE ",
+ pr_warn("Total %s size: %u bytes\n",
+ cache_type ? "DCACHE" : "ICACHE",
cache_type ? get_dcache_size()
: get_icache_size());
- pr_warn("Thread %s size: %d bytes",
+ pr_warn("Thread %s size: %d bytes\n",
cache_type ? "CACHE" : "ICACHE",
thread_cache_size);
- pr_warn("Page Size: %lu bytes", PAGE_SIZE);
+ pr_warn("Page Size: %lu bytes\n", PAGE_SIZE);
+ panic("Potential cache aliasing detected");
}
}
}
diff --git a/arch/metag/kernel/clock.c b/arch/metag/kernel/clock.c
index defc84056f18..6339c9c6d0ab 100644
--- a/arch/metag/kernel/clock.c
+++ b/arch/metag/kernel/clock.c
@@ -8,8 +8,10 @@
* published by the Free Software Foundation.
*/
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/of.h>
#include <asm/param.h>
#include <asm/clock.h>
@@ -34,8 +36,63 @@ static unsigned long get_core_freq_default(void)
#endif
}
+static struct clk *clk_core;
+
+/* Clk based get_core_freq callback. */
+static unsigned long get_core_freq_clk(void)
+{
+ return clk_get_rate(clk_core);
+}
+
+/**
+ * init_metag_core_clock() - Set up core clock from devicetree.
+ *
+ * Checks to see if a "core" clock is provided in the device tree, and overrides
+ * the get_core_freq callback to use it.
+ */
+static void __init init_metag_core_clock(void)
+{
+ /*
+ * See if a core clock is provided by the devicetree (and
+ * registered by the init callback above).
+ */
+ struct device_node *node;
+ node = of_find_compatible_node(NULL, NULL, "img,meta");
+ if (!node) {
+ pr_warn("%s: no compatible img,meta DT node found\n",
+ __func__);
+ return;
+ }
+
+ clk_core = of_clk_get_by_name(node, "core");
+ if (IS_ERR(clk_core)) {
+ pr_warn("%s: no core clock found in DT\n",
+ __func__);
+ return;
+ }
+
+ /*
+ * Override the core frequency callback to use
+ * this clk.
+ */
+ _meta_clock.get_core_freq = get_core_freq_clk;
+}
+
+/**
+ * init_metag_clocks() - Set up clocks from devicetree.
+ *
+ * Set up important clocks from device tree. In particular any needed for clock
+ * sources.
+ */
+void __init init_metag_clocks(void)
+{
+ init_metag_core_clock();
+
+ pr_info("Core clock frequency: %lu Hz\n", get_coreclock());
+}
+
/**
- * setup_meta_clocks() - Set up the Meta clock.
+ * setup_meta_clocks() - Early set up of the Meta clock.
* @desc: Clock descriptor usually provided by machine description
*
* Ensures all callbacks are valid.
diff --git a/arch/metag/kernel/irq.c b/arch/metag/kernel/irq.c
index 87707efeb0a3..2a2c9d55187e 100644
--- a/arch/metag/kernel/irq.c
+++ b/arch/metag/kernel/irq.c
@@ -25,7 +25,7 @@ static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
#endif
-struct irq_domain *root_domain;
+static struct irq_domain *root_domain;
static unsigned int startup_meta_irq(struct irq_data *data)
{
@@ -279,11 +279,12 @@ static void route_irq(struct irq_data *data, unsigned int irq, unsigned int cpu)
{
struct irq_desc *desc = irq_to_desc(irq);
struct irq_chip *chip = irq_data_get_irq_chip(data);
+ unsigned long flags;
- raw_spin_lock_irq(&desc->lock);
+ raw_spin_lock_irqsave(&desc->lock, flags);
if (chip->irq_set_affinity)
chip->irq_set_affinity(data, cpumask_of(cpu), false);
- raw_spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
/*
diff --git a/arch/metag/kernel/kick.c b/arch/metag/kernel/kick.c
index 50fcbec98cd2..beb377621322 100644
--- a/arch/metag/kernel/kick.c
+++ b/arch/metag/kernel/kick.c
@@ -26,6 +26,8 @@
* pass it as an argument.
*/
#include <linux/export.h>
+#include <linux/hardirq.h>
+#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/types.h>
@@ -66,6 +68,7 @@ EXPORT_SYMBOL(kick_unregister_func);
TBIRES
kick_handler(TBIRES State, int SigNum, int Triggers, int Inst, PTBI pTBI)
{
+ struct pt_regs *old_regs;
struct kick_irq_handler *kh;
struct list_head *lh;
int handled = 0;
@@ -79,6 +82,9 @@ kick_handler(TBIRES State, int SigNum, int Triggers, int Inst, PTBI pTBI)
trace_hardirqs_off();
+ old_regs = set_irq_regs((struct pt_regs *)State.Sig.pCtx);
+ irq_enter();
+
/*
* There is no need to disable interrupts here because we
* can't nest KICK interrupts in a KICK interrupt handler.
@@ -97,5 +103,8 @@ kick_handler(TBIRES State, int SigNum, int Triggers, int Inst, PTBI pTBI)
WARN_ON(!handled);
+ irq_exit();
+ set_irq_regs(old_regs);
+
return tail_end(ret);
}
diff --git a/arch/metag/kernel/metag_ksyms.c b/arch/metag/kernel/metag_ksyms.c
index ec872ef14eb1..215c94ad63ac 100644
--- a/arch/metag/kernel/metag_ksyms.c
+++ b/arch/metag/kernel/metag_ksyms.c
@@ -1,5 +1,7 @@
#include <linux/export.h>
+#include <linux/types.h>
+#include <asm/checksum.h>
#include <asm/div64.h>
#include <asm/ftrace.h>
#include <asm/page.h>
@@ -15,6 +17,9 @@ EXPORT_SYMBOL(max_pfn);
EXPORT_SYMBOL(min_low_pfn);
#endif
+/* Network checksum functions */
+EXPORT_SYMBOL(csum_partial);
+
/* TBI symbols */
EXPORT_SYMBOL(__TBI);
EXPORT_SYMBOL(__TBIFindSeg);
diff --git a/arch/metag/kernel/perf/perf_event.c b/arch/metag/kernel/perf/perf_event.c
index 5b18888ee364..5cc4d4dcf3cf 100644
--- a/arch/metag/kernel/perf/perf_event.c
+++ b/arch/metag/kernel/perf/perf_event.c
@@ -813,8 +813,8 @@ static struct metag_pmu _metag_pmu = {
};
/* PMU CPU hotplug notifier */
-static int __cpuinit metag_pmu_cpu_notify(struct notifier_block *b,
- unsigned long action, void *hcpu)
+static int metag_pmu_cpu_notify(struct notifier_block *b, unsigned long action,
+ void *hcpu)
{
unsigned int cpu = (unsigned int)hcpu;
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
@@ -828,7 +828,7 @@ static int __cpuinit metag_pmu_cpu_notify(struct notifier_block *b,
return NOTIFY_OK;
}
-static struct notifier_block __cpuinitdata metag_pmu_notifier = {
+static struct notifier_block metag_pmu_notifier = {
.notifier_call = metag_pmu_cpu_notify,
};
diff --git a/arch/metag/kernel/setup.c b/arch/metag/kernel/setup.c
index 4f5726f1a55b..c396cd0b425f 100644
--- a/arch/metag/kernel/setup.c
+++ b/arch/metag/kernel/setup.c
@@ -20,6 +20,7 @@
#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/of_fdt.h>
+#include <linux/of_platform.h>
#include <linux/pfn.h>
#include <linux/root_dev.h>
#include <linux/sched.h>
@@ -424,6 +425,9 @@ static int __init customize_machine(void)
/* customizes platform devices, or adds new ones */
if (machine_desc->init_machine)
machine_desc->init_machine();
+ else
+ of_platform_populate(NULL, of_default_bus_match_table, NULL,
+ NULL);
return 0;
}
arch_initcall(customize_machine);
@@ -587,20 +591,20 @@ PTBI pTBI_get(unsigned int cpu)
EXPORT_SYMBOL(pTBI_get);
#if defined(CONFIG_METAG_DSP) && defined(CONFIG_METAG_FPU)
-char capabilites[] = "dsp fpu";
+static char capabilities[] = "dsp fpu";
#elif defined(CONFIG_METAG_DSP)
-char capabilites[] = "dsp";
+static char capabilities[] = "dsp";
#elif defined(CONFIG_METAG_FPU)
-char capabilites[] = "fpu";
+static char capabilities[] = "fpu";
#else
-char capabilites[] = "";
+static char capabilities[] = "";
#endif
static struct ctl_table caps_kern_table[] = {
{
.procname = "capabilities",
- .data = capabilites,
- .maxlen = sizeof(capabilites),
+ .data = capabilities,
+ .maxlen = sizeof(capabilities),
.mode = 0444,
.proc_handler = proc_dostring,
},
diff --git a/arch/metag/kernel/smp.c b/arch/metag/kernel/smp.c
index f443ec9a7cbe..7c0113142981 100644
--- a/arch/metag/kernel/smp.c
+++ b/arch/metag/kernel/smp.c
@@ -8,6 +8,7 @@
* published by the Free Software Foundation.
*/
#include <linux/atomic.h>
+#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/spinlock.h>
@@ -62,10 +63,12 @@ static DEFINE_PER_CPU(struct ipi_data, ipi_data) = {
static DEFINE_SPINLOCK(boot_lock);
+static DECLARE_COMPLETION(cpu_running);
+
/*
* "thread" is assumed to be a valid Meta hardware thread ID.
*/
-int __cpuinit boot_secondary(unsigned int thread, struct task_struct *idle)
+int boot_secondary(unsigned int thread, struct task_struct *idle)
{
u32 val;
@@ -115,11 +118,9 @@ int __cpuinit boot_secondary(unsigned int thread, struct task_struct *idle)
* If the cache partition has changed, prints a message to the log describing
* those changes.
*/
-static __cpuinit void describe_cachepart_change(unsigned int thread,
- const char *label,
- unsigned int sz,
- unsigned int old,
- unsigned int new)
+static void describe_cachepart_change(unsigned int thread, const char *label,
+ unsigned int sz, unsigned int old,
+ unsigned int new)
{
unsigned int lor1, land1, gor1, gand1;
unsigned int lor2, land2, gor2, gand2;
@@ -167,7 +168,7 @@ static __cpuinit void describe_cachepart_change(unsigned int thread,
* Ensures that coherency is enabled and that the threads share the same cache
* partitions.
*/
-static __cpuinit void setup_smp_cache(unsigned int thread)
+static void setup_smp_cache(unsigned int thread)
{
unsigned int this_thread, lflags;
unsigned int dcsz, dcpart_this, dcpart_old, dcpart_new;
@@ -212,7 +213,7 @@ static __cpuinit void setup_smp_cache(unsigned int thread)
icpart_old, icpart_new);
}
-int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
+int __cpu_up(unsigned int cpu, struct task_struct *idle)
{
unsigned int thread = cpu_2_hwthread_id[cpu];
int ret;
@@ -235,20 +236,12 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
*/
ret = boot_secondary(thread, idle);
if (ret == 0) {
- unsigned long timeout;
-
/*
* CPU was successfully started, wait for it
* to come online or time out.
*/
- timeout = jiffies + HZ;
- while (time_before(jiffies, timeout)) {
- if (cpu_online(cpu))
- break;
-
- udelay(10);
- barrier();
- }
+ wait_for_completion_timeout(&cpu_running,
+ msecs_to_jiffies(1000));
if (!cpu_online(cpu))
ret = -EIO;
@@ -273,10 +266,9 @@ static DECLARE_COMPLETION(cpu_killed);
/*
* __cpu_disable runs on the processor to be shutdown.
*/
-int __cpuexit __cpu_disable(void)
+int __cpu_disable(void)
{
unsigned int cpu = smp_processor_id();
- struct task_struct *p;
/*
* Take this CPU offline. Once we clear this, we can't return,
@@ -296,12 +288,7 @@ int __cpuexit __cpu_disable(void)
flush_cache_all();
local_flush_tlb_all();
- read_lock(&tasklist_lock);
- for_each_process(p) {
- if (p->mm)
- cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
- }
- read_unlock(&tasklist_lock);
+ clear_tasks_mm_cpumask(cpu);
return 0;
}
@@ -310,7 +297,7 @@ int __cpuexit __cpu_disable(void)
* called on the thread which is asking for a CPU to be shutdown -
* waits until shutdown has completed, or it is timed out.
*/
-void __cpuexit __cpu_die(unsigned int cpu)
+void __cpu_die(unsigned int cpu)
{
if (!wait_for_completion_timeout(&cpu_killed, msecs_to_jiffies(1)))
pr_err("CPU%u: unable to kill\n", cpu);
@@ -322,7 +309,7 @@ void __cpuexit __cpu_die(unsigned int cpu)
* Note that we do not return from this function. If this cpu is
* brought online again it will need to run secondary_startup().
*/
-void __cpuexit cpu_die(void)
+void cpu_die(void)
{
local_irq_disable();
idle_task_exit();
@@ -337,7 +324,7 @@ void __cpuexit cpu_die(void)
* Called by both boot and secondaries to move global data into
* per-processor storage.
*/
-void __cpuinit smp_store_cpu_info(unsigned int cpuid)
+void smp_store_cpu_info(unsigned int cpuid)
{
struct cpuinfo_metag *cpu_info = &per_cpu(cpu_data, cpuid);
@@ -385,12 +372,7 @@ asmlinkage void secondary_start_kernel(void)
setup_priv();
- /*
- * Enable local interrupts.
- */
- tbi_startup_interrupt(TBID_SIGNUM_TRT);
notify_cpu_starting(cpu);
- local_irq_enable();
pr_info("CPU%u (thread %u): Booted secondary processor\n",
cpu, cpu_2_hwthread_id[cpu]);
@@ -402,12 +384,13 @@ asmlinkage void secondary_start_kernel(void)
* OK, now it's safe to let the boot CPU continue
*/
set_cpu_online(cpu, true);
+ complete(&cpu_running);
/*
- * Check for cache aliasing.
- * Preemption is disabled
+ * Enable local interrupts.
*/
- check_for_cache_aliasing(cpu);
+ tbi_startup_interrupt(TBID_SIGNUM_TRT);
+ local_irq_enable();
/*
* OK, it's off to the idle thread for us
diff --git a/arch/metag/kernel/time.c b/arch/metag/kernel/time.c
index 17dc10733b2f..f1c8c53dace7 100644
--- a/arch/metag/kernel/time.c
+++ b/arch/metag/kernel/time.c
@@ -5,11 +5,21 @@
*
*/
-#include <linux/init.h>
-
#include <clocksource/metag_generic.h>
+#include <linux/clk-provider.h>
+#include <linux/init.h>
+#include <asm/clock.h>
void __init time_init(void)
{
+#ifdef CONFIG_COMMON_CLK
+ /* Init clocks from device tree */
+ of_clk_init(NULL);
+#endif
+
+ /* Init meta clocks, particularly the core clock */
+ init_metag_clocks();
+
+ /* Set up the timer clock sources */
metag_generic_timer_init();
}
diff --git a/arch/metag/kernel/traps.c b/arch/metag/kernel/traps.c
index 2ceeaae5b199..25f9d1c2ffec 100644
--- a/arch/metag/kernel/traps.c
+++ b/arch/metag/kernel/traps.c
@@ -33,6 +33,7 @@
#include <asm/siginfo.h>
#include <asm/traps.h>
#include <asm/hwthread.h>
+#include <asm/setup.h>
#include <asm/switch.h>
#include <asm/user_gateway.h>
#include <asm/syscall.h>
@@ -87,8 +88,8 @@ const char *trap_name(int trapno)
static DEFINE_SPINLOCK(die_lock);
-void die(const char *str, struct pt_regs *regs, long err,
- unsigned long addr)
+void __noreturn die(const char *str, struct pt_regs *regs,
+ long err, unsigned long addr)
{
static int die_counter;
@@ -811,7 +812,7 @@ static void set_trigger_mask(unsigned int mask)
}
#endif
-void __cpuinit per_cpu_trap_init(unsigned long cpu)
+void per_cpu_trap_init(unsigned long cpu)
{
TBIRES int_context;
unsigned int thread = cpu_2_hwthread_id[cpu];