aboutsummaryrefslogtreecommitdiffstats
path: root/arch/blackfin/kernel
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--arch/blackfin/kernel/bfin_dma_5xx.c32
-rw-r--r--arch/blackfin/kernel/gptimers.c2
-rw-r--r--arch/blackfin/kernel/ipipe.c84
-rw-r--r--arch/blackfin/kernel/irqchip.c10
-rw-r--r--arch/blackfin/kernel/kgdb.c6
-rw-r--r--arch/blackfin/kernel/module.c45
-rw-r--r--arch/blackfin/kernel/nmi.c30
-rw-r--r--arch/blackfin/kernel/setup.c37
-rw-r--r--arch/blackfin/kernel/time-ts.c43
-rw-r--r--arch/blackfin/kernel/time.c6
-rw-r--r--arch/blackfin/kernel/trace.c7
-rw-r--r--arch/blackfin/kernel/traps.c2
-rw-r--r--arch/blackfin/kernel/vmlinux.lds.S3
13 files changed, 148 insertions, 159 deletions
diff --git a/arch/blackfin/kernel/bfin_dma_5xx.c b/arch/blackfin/kernel/bfin_dma_5xx.c
index 1e485dfdc9f2..6ce8dce753c9 100644
--- a/arch/blackfin/kernel/bfin_dma_5xx.c
+++ b/arch/blackfin/kernel/bfin_dma_5xx.c
@@ -84,6 +84,24 @@ static int __init proc_dma_init(void)
late_initcall(proc_dma_init);
#endif
+static void set_dma_peripheral_map(unsigned int channel, const char *device_id)
+{
+#ifdef CONFIG_BF54x
+ unsigned int per_map;
+
+ switch (channel) {
+ case CH_UART2_RX: per_map = 0xC << 12; break;
+ case CH_UART2_TX: per_map = 0xD << 12; break;
+ case CH_UART3_RX: per_map = 0xE << 12; break;
+ case CH_UART3_TX: per_map = 0xF << 12; break;
+ default: return;
+ }
+
+ if (strncmp(device_id, "BFIN_UART", 9) == 0)
+ dma_ch[channel].regs->peripheral_map = per_map;
+#endif
+}
+
/**
* request_dma - request a DMA channel
*
@@ -111,19 +129,7 @@ int request_dma(unsigned int channel, const char *device_id)
return -EBUSY;
}
-#ifdef CONFIG_BF54x
- if (channel >= CH_UART2_RX && channel <= CH_UART3_TX) {
- unsigned int per_map;
- per_map = dma_ch[channel].regs->peripheral_map & 0xFFF;
- if (strncmp(device_id, "BFIN_UART", 9) == 0)
- dma_ch[channel].regs->peripheral_map = per_map |
- ((channel - CH_UART2_RX + 0xC)<<12);
- else
- dma_ch[channel].regs->peripheral_map = per_map |
- ((channel - CH_UART2_RX + 0x6)<<12);
- }
-#endif
-
+ set_dma_peripheral_map(channel, device_id);
dma_ch[channel].device_id = device_id;
dma_ch[channel].irq = 0;
diff --git a/arch/blackfin/kernel/gptimers.c b/arch/blackfin/kernel/gptimers.c
index cdbe075de1dc..8b81dc04488a 100644
--- a/arch/blackfin/kernel/gptimers.c
+++ b/arch/blackfin/kernel/gptimers.c
@@ -268,7 +268,7 @@ void disable_gptimers(uint16_t mask)
_disable_gptimers(mask);
for (i = 0; i < MAX_BLACKFIN_GPTIMERS; ++i)
if (mask & (1 << i))
- group_regs[BFIN_TIMER_OCTET(i)]->status |= trun_mask[i];
+ group_regs[BFIN_TIMER_OCTET(i)]->status = trun_mask[i];
SSYNC();
}
EXPORT_SYMBOL(disable_gptimers);
diff --git a/arch/blackfin/kernel/ipipe.c b/arch/blackfin/kernel/ipipe.c
index 3b1da4aff2a1..f37019c847c9 100644
--- a/arch/blackfin/kernel/ipipe.c
+++ b/arch/blackfin/kernel/ipipe.c
@@ -154,7 +154,7 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
* pending for it.
*/
if (test_bit(IPIPE_AHEAD_FLAG, &this_domain->flags) &&
- ipipe_head_cpudom_var(irqpend_himask) == 0)
+ !__ipipe_ipending_p(ipipe_head_cpudom_ptr()))
goto out;
__ipipe_walk_pipeline(head);
@@ -185,25 +185,21 @@ void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
}
EXPORT_SYMBOL(__ipipe_disable_irqdesc);
-int __ipipe_syscall_root(struct pt_regs *regs)
+asmlinkage int __ipipe_syscall_root(struct pt_regs *regs)
{
struct ipipe_percpu_domain_data *p;
- unsigned long flags;
+ void (*hook)(void);
int ret;
+ WARN_ON_ONCE(irqs_disabled_hw());
+
/*
- * We need to run the IRQ tail hook whenever we don't
- * propagate a syscall to higher domains, because we know that
- * important operations might be pending there (e.g. Xenomai
- * deferred rescheduling).
+ * We need to run the IRQ tail hook each time we intercept a
+ * syscall, because we know that important operations might be
+ * pending there (e.g. Xenomai deferred rescheduling).
*/
-
- if (regs->orig_p0 < NR_syscalls) {
- void (*hook)(void) = (void (*)(void))__ipipe_irq_tail_hook;
- hook();
- if ((current->flags & PF_EVNOTIFY) == 0)
- return 0;
- }
+ hook = (__typeof__(hook))__ipipe_irq_tail_hook;
+ hook();
/*
* This routine either returns:
@@ -214,51 +210,47 @@ int __ipipe_syscall_root(struct pt_regs *regs)
* tail work has to be performed (for handling signals etc).
*/
- if (!__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL))
+ if (!__ipipe_syscall_watched_p(current, regs->orig_p0) ||
+ !__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL))
return 0;
ret = __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs);
- flags = hard_local_irq_save();
+ hard_local_irq_disable();
- if (!__ipipe_root_domain_p) {
- hard_local_irq_restore(flags);
- return 1;
+ /*
+ * This is the end of the syscall path, so we may
+ * safely assume a valid Linux task stack here.
+ */
+ if (current->ipipe_flags & PF_EVTRET) {
+ current->ipipe_flags &= ~PF_EVTRET;
+ __ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs);
}
- p = ipipe_root_cpudom_ptr();
- if ((p->irqpend_himask & IPIPE_IRQMASK_VIRT) != 0)
- __ipipe_sync_pipeline(IPIPE_IRQMASK_VIRT);
+ if (!__ipipe_root_domain_p)
+ ret = -1;
+ else {
+ p = ipipe_root_cpudom_ptr();
+ if (__ipipe_ipending_p(p))
+ __ipipe_sync_pipeline();
+ }
- hard_local_irq_restore(flags);
+ hard_local_irq_enable();
return -ret;
}
-unsigned long ipipe_critical_enter(void (*syncfn) (void))
-{
- unsigned long flags;
-
- flags = hard_local_irq_save();
-
- return flags;
-}
-
-void ipipe_critical_exit(unsigned long flags)
-{
- hard_local_irq_restore(flags);
-}
-
static void __ipipe_no_irqtail(void)
{
}
int ipipe_get_sysinfo(struct ipipe_sysinfo *info)
{
- info->ncpus = num_online_cpus();
- info->cpufreq = ipipe_cpu_freq();
- info->archdep.tmirq = IPIPE_TIMER_IRQ;
- info->archdep.tmfreq = info->cpufreq;
+ info->sys_nr_cpus = num_online_cpus();
+ info->sys_cpu_freq = ipipe_cpu_freq();
+ info->sys_hrtimer_irq = IPIPE_TIMER_IRQ;
+ info->sys_hrtimer_freq = __ipipe_core_clock;
+ info->sys_hrclock_freq = __ipipe_core_clock;
return 0;
}
@@ -289,6 +281,7 @@ int ipipe_trigger_irq(unsigned irq)
asmlinkage void __ipipe_sync_root(void)
{
void (*irq_tail_hook)(void) = (void (*)(void))__ipipe_irq_tail_hook;
+ struct ipipe_percpu_domain_data *p;
unsigned long flags;
BUG_ON(irqs_disabled());
@@ -300,19 +293,20 @@ asmlinkage void __ipipe_sync_root(void)
clear_thread_flag(TIF_IRQ_SYNC);
- if (ipipe_root_cpudom_var(irqpend_himask) != 0)
- __ipipe_sync_pipeline(IPIPE_IRQMASK_ANY);
+ p = ipipe_root_cpudom_ptr();
+ if (__ipipe_ipending_p(p))
+ __ipipe_sync_pipeline();
hard_local_irq_restore(flags);
}
-void ___ipipe_sync_pipeline(unsigned long syncmask)
+void ___ipipe_sync_pipeline(void)
{
if (__ipipe_root_domain_p &&
test_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status)))
return;
- __ipipe_sync_stage(syncmask);
+ __ipipe_sync_stage();
}
void __ipipe_disable_root_irqs_hw(void)
diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c
index 64cff54a8a58..1696d34f51c2 100644
--- a/arch/blackfin/kernel/irqchip.c
+++ b/arch/blackfin/kernel/irqchip.c
@@ -39,21 +39,23 @@ int show_interrupts(struct seq_file *p, void *v)
unsigned long flags;
if (i < NR_IRQS) {
- raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
- action = irq_desc[i].action;
+ struct irq_desc *desc = irq_to_desc(i);
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ action = desc->action;
if (!action)
goto skip;
seq_printf(p, "%3d: ", i);
for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
- seq_printf(p, " %8s", irq_desc[i].chip->name);
+ seq_printf(p, " %8s", irq_desc_get_chip(desc)->name);
seq_printf(p, " %s", action->name);
for (action = action->next; action; action = action->next)
seq_printf(p, " %s", action->name);
seq_putc(p, '\n');
skip:
- raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
} else if (i == NR_IRQS) {
seq_printf(p, "NMI: ");
for_each_online_cpu(j)
diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
index eb92592fd80c..9b80b152435e 100644
--- a/arch/blackfin/kernel/kgdb.c
+++ b/arch/blackfin/kernel/kgdb.c
@@ -181,7 +181,7 @@ static int bfin_set_hw_break(unsigned long addr, int len, enum kgdb_bptype type)
return -ENOSPC;
}
- /* Becasue hardware data watchpoint impelemented in current
+ /* Because hardware data watchpoint impelemented in current
* Blackfin can not trigger an exception event as the hardware
* instrction watchpoint does, we ignaore all data watch point here.
* They can be turned on easily after future blackfin design
@@ -422,11 +422,7 @@ int kgdb_arch_handle_exception(int vector, int signo,
struct kgdb_arch arch_kgdb_ops = {
.gdb_bpt_instr = {0xa1},
-#ifdef CONFIG_SMP
- .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
-#else
.flags = KGDB_HW_BREAKPOINT,
-#endif
.set_hw_breakpoint = bfin_set_hw_break,
.remove_hw_breakpoint = bfin_remove_hw_break,
.disable_hw_break = bfin_disable_hw_debug,
diff --git a/arch/blackfin/kernel/module.c b/arch/blackfin/kernel/module.c
index a6dfa6b71e63..35e350cad9d9 100644
--- a/arch/blackfin/kernel/module.c
+++ b/arch/blackfin/kernel/module.c
@@ -4,7 +4,7 @@
* Licensed under the GPL-2 or later
*/
-#define pr_fmt(fmt) "module %s: " fmt
+#define pr_fmt(fmt) "module %s: " fmt, mod->name
#include <linux/moduleloader.h>
#include <linux/elf.h>
@@ -57,8 +57,7 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
dest = l1_inst_sram_alloc(s->sh_size);
mod->arch.text_l1 = dest;
if (dest == NULL) {
- pr_err("L1 inst memory allocation failed\n",
- mod->name);
+ pr_err("L1 inst memory allocation failed\n");
return -1;
}
dma_memcpy(dest, (void *)s->sh_addr, s->sh_size);
@@ -70,8 +69,7 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
dest = l1_data_sram_alloc(s->sh_size);
mod->arch.data_a_l1 = dest;
if (dest == NULL) {
- pr_err("L1 data memory allocation failed\n",
- mod->name);
+ pr_err("L1 data memory allocation failed\n");
return -1;
}
memcpy(dest, (void *)s->sh_addr, s->sh_size);
@@ -83,8 +81,7 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
dest = l1_data_sram_zalloc(s->sh_size);
mod->arch.bss_a_l1 = dest;
if (dest == NULL) {
- pr_err("L1 data memory allocation failed\n",
- mod->name);
+ pr_err("L1 data memory allocation failed\n");
return -1;
}
@@ -93,8 +90,7 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
dest = l1_data_B_sram_alloc(s->sh_size);
mod->arch.data_b_l1 = dest;
if (dest == NULL) {
- pr_err("L1 data memory allocation failed\n",
- mod->name);
+ pr_err("L1 data memory allocation failed\n");
return -1;
}
memcpy(dest, (void *)s->sh_addr, s->sh_size);
@@ -104,8 +100,7 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
dest = l1_data_B_sram_alloc(s->sh_size);
mod->arch.bss_b_l1 = dest;
if (dest == NULL) {
- pr_err("L1 data memory allocation failed\n",
- mod->name);
+ pr_err("L1 data memory allocation failed\n");
return -1;
}
memset(dest, 0, s->sh_size);
@@ -117,8 +112,7 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
dest = l2_sram_alloc(s->sh_size);
mod->arch.text_l2 = dest;
if (dest == NULL) {
- pr_err("L2 SRAM allocation failed\n",
- mod->name);
+ pr_err("L2 SRAM allocation failed\n");
return -1;
}
memcpy(dest, (void *)s->sh_addr, s->sh_size);
@@ -130,8 +124,7 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
dest = l2_sram_alloc(s->sh_size);
mod->arch.data_l2 = dest;
if (dest == NULL) {
- pr_err("L2 SRAM allocation failed\n",
- mod->name);
+ pr_err("L2 SRAM allocation failed\n");
return -1;
}
memcpy(dest, (void *)s->sh_addr, s->sh_size);
@@ -143,8 +136,7 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
dest = l2_sram_zalloc(s->sh_size);
mod->arch.bss_l2 = dest;
if (dest == NULL) {
- pr_err("L2 SRAM allocation failed\n",
- mod->name);
+ pr_err("L2 SRAM allocation failed\n");
return -1;
}
@@ -160,9 +152,9 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
int
apply_relocate(Elf_Shdr * sechdrs, const char *strtab,
- unsigned int symindex, unsigned int relsec, struct module *me)
+ unsigned int symindex, unsigned int relsec, struct module *mod)
{
- pr_err(".rel unsupported\n", me->name);
+ pr_err(".rel unsupported\n");
return -ENOEXEC;
}
@@ -186,7 +178,7 @@ apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
Elf32_Sym *sym;
unsigned long location, value, size;
- pr_debug("applying relocate section %u to %u\n", mod->name,
+ pr_debug("applying relocate section %u to %u\n",
relsec, sechdrs[relsec].sh_info);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
@@ -203,14 +195,14 @@ apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
#ifdef CONFIG_SMP
if (location >= COREB_L1_DATA_A_START) {
- pr_err("cannot relocate in L1: %u (SMP kernel)",
- mod->name, ELF32_R_TYPE(rel[i].r_info));
+ pr_err("cannot relocate in L1: %u (SMP kernel)\n",
+ ELF32_R_TYPE(rel[i].r_info));
return -ENOEXEC;
}
#endif
pr_debug("location is %lx, value is %lx type is %d\n",
- mod->name, location, value, ELF32_R_TYPE(rel[i].r_info));
+ location, value, ELF32_R_TYPE(rel[i].r_info));
switch (ELF32_R_TYPE(rel[i].r_info)) {
@@ -230,11 +222,11 @@ apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
case R_BFIN_PCREL12_JUMP_S:
case R_BFIN_PCREL10:
pr_err("unsupported relocation: %u (no -mlong-calls?)\n",
- mod->name, ELF32_R_TYPE(rel[i].r_info));
+ ELF32_R_TYPE(rel[i].r_info));
return -ENOEXEC;
default:
- pr_err("unknown relocation: %u\n", mod->name,
+ pr_err("unknown relocation: %u\n",
ELF32_R_TYPE(rel[i].r_info));
return -ENOEXEC;
}
@@ -251,8 +243,7 @@ apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
isram_memcpy((void *)location, &value, size);
break;
default:
- pr_err("invalid relocation for %#lx\n",
- mod->name, location);
+ pr_err("invalid relocation for %#lx\n", location);
return -ENOEXEC;
}
}
diff --git a/arch/blackfin/kernel/nmi.c b/arch/blackfin/kernel/nmi.c
index 0b5f72f17fd0..401eb1d8e3b4 100644
--- a/arch/blackfin/kernel/nmi.c
+++ b/arch/blackfin/kernel/nmi.c
@@ -12,7 +12,7 @@
#include <linux/bitops.h>
#include <linux/hardirq.h>
-#include <linux/sysdev.h>
+#include <linux/syscore_ops.h>
#include <linux/pm.h>
#include <linux/nmi.h>
#include <linux/smp.h>
@@ -196,43 +196,31 @@ void touch_nmi_watchdog(void)
/* Suspend/resume support */
#ifdef CONFIG_PM
-static int nmi_wdt_suspend(struct sys_device *dev, pm_message_t state)
+static int nmi_wdt_suspend(void)
{
nmi_wdt_stop();
return 0;
}
-static int nmi_wdt_resume(struct sys_device *dev)
+static void nmi_wdt_resume(void)
{
if (nmi_active)
nmi_wdt_start();
- return 0;
}
-static struct sysdev_class nmi_sysclass = {
- .name = DRV_NAME,
+static struct syscore_ops nmi_syscore_ops = {
.resume = nmi_wdt_resume,
.suspend = nmi_wdt_suspend,
};
-static struct sys_device device_nmi_wdt = {
- .id = 0,
- .cls = &nmi_sysclass,
-};
-
-static int __init init_nmi_wdt_sysfs(void)
+static int __init init_nmi_wdt_syscore(void)
{
- int error;
-
- if (!nmi_active)
- return 0;
+ if (nmi_active)
+ register_syscore_ops(&nmi_syscore_ops);
- error = sysdev_class_register(&nmi_sysclass);
- if (!error)
- error = sysdev_register(&device_nmi_wdt);
- return error;
+ return 0;
}
-late_initcall(init_nmi_wdt_sysfs);
+late_initcall(init_nmi_wdt_syscore);
#endif /* CONFIG_PM */
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index ac71dc15cbdb..805c6132c779 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -215,11 +215,48 @@ void __init bfin_relocate_l1_mem(void)
early_dma_memcpy_done();
+#if defined(CONFIG_SMP) && defined(CONFIG_ICACHE_FLUSH_L1)
+ blackfin_iflush_l1_entry[0] = (unsigned long)blackfin_icache_flush_range_l1;
+#endif
+
/* if necessary, copy L2 text/data to L2 SRAM */
if (L2_LENGTH && l2_len)
memcpy(_stext_l2, _l2_lma, l2_len);
}
+#ifdef CONFIG_SMP
+void __init bfin_relocate_coreb_l1_mem(void)
+{
+ unsigned long text_l1_len = (unsigned long)_text_l1_len;
+ unsigned long data_l1_len = (unsigned long)_data_l1_len;
+ unsigned long data_b_l1_len = (unsigned long)_data_b_l1_len;
+
+ blackfin_dma_early_init();
+
+ /* if necessary, copy L1 text to L1 instruction SRAM */
+ if (L1_CODE_LENGTH && text_l1_len)
+ early_dma_memcpy((void *)COREB_L1_CODE_START, _text_l1_lma,
+ text_l1_len);
+
+ /* if necessary, copy L1 data to L1 data bank A SRAM */
+ if (L1_DATA_A_LENGTH && data_l1_len)
+ early_dma_memcpy((void *)COREB_L1_DATA_A_START, _data_l1_lma,
+ data_l1_len);
+
+ /* if necessary, copy L1 data B to L1 data bank B SRAM */
+ if (L1_DATA_B_LENGTH && data_b_l1_len)
+ early_dma_memcpy((void *)COREB_L1_DATA_B_START, _data_b_l1_lma,
+ data_b_l1_len);
+
+ early_dma_memcpy_done();
+
+#ifdef CONFIG_ICACHE_FLUSH_L1
+ blackfin_iflush_l1_entry[1] = (unsigned long)blackfin_icache_flush_range_l1 -
+ (unsigned long)_stext_l1 + COREB_L1_CODE_START;
+#endif
+}
+#endif
+
#ifdef CONFIG_ROMKERNEL
void __init bfin_relocate_xip_data(void)
{
diff --git a/arch/blackfin/kernel/time-ts.c b/arch/blackfin/kernel/time-ts.c
index 8c9a43daf80f..9e9b60d969dc 100644
--- a/arch/blackfin/kernel/time-ts.c
+++ b/arch/blackfin/kernel/time-ts.c
@@ -23,29 +23,6 @@
#include <asm/gptimers.h>
#include <asm/nmi.h>
-/* Accelerators for sched_clock()
- * convert from cycles(64bits) => nanoseconds (64bits)
- * basic equation:
- * ns = cycles / (freq / ns_per_sec)
- * ns = cycles * (ns_per_sec / freq)
- * ns = cycles * (10^9 / (cpu_khz * 10^3))
- * ns = cycles * (10^6 / cpu_khz)
- *
- * Then we use scaling math (suggested by george@mvista.com) to get:
- * ns = cycles * (10^6 * SC / cpu_khz) / SC
- * ns = cycles * cyc2ns_scale / SC
- *
- * And since SC is a constant power of two, we can convert the div
- * into a shift.
- *
- * We can use khz divisor instead of mhz to keep a better precision, since
- * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
- * (mathieu.desnoyers@polymtl.ca)
- *
- * -johnstul@us.ibm.com "math is hard, lets go shopping!"
- */
-
-#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
#if defined(CONFIG_CYCLES_CLOCKSOURCE)
@@ -63,7 +40,6 @@ static struct clocksource bfin_cs_cycles = {
.rating = 400,
.read = bfin_read_cycles,
.mask = CLOCKSOURCE_MASK(64),
- .shift = CYC2NS_SCALE_FACTOR,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
@@ -75,10 +51,7 @@ static inline unsigned long long bfin_cs_cycles_sched_clock(void)
static int __init bfin_cs_cycles_init(void)
{
- bfin_cs_cycles.mult = \
- clocksource_hz2mult(get_cclk(), bfin_cs_cycles.shift);
-
- if (clocksource_register(&bfin_cs_cycles))
+ if (clocksource_register_hz(&bfin_cs_cycles, get_cclk()))
panic("failed to register clocksource");
return 0;
@@ -111,7 +84,6 @@ static struct clocksource bfin_cs_gptimer0 = {
.rating = 350,
.read = bfin_read_gptimer0,
.mask = CLOCKSOURCE_MASK(32),
- .shift = CYC2NS_SCALE_FACTOR,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
@@ -125,10 +97,7 @@ static int __init bfin_cs_gptimer0_init(void)
{
setup_gptimer0();
- bfin_cs_gptimer0.mult = \
- clocksource_hz2mult(get_sclk(), bfin_cs_gptimer0.shift);
-
- if (clocksource_register(&bfin_cs_gptimer0))
+ if (clocksource_register_hz(&bfin_cs_gptimer0, get_sclk()))
panic("failed to register clocksource");
return 0;
@@ -206,8 +175,14 @@ irqreturn_t bfin_gptmr0_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
smp_mb();
- evt->event_handler(evt);
+ /*
+ * We want to ACK before we handle so that we can handle smaller timer
+ * intervals. This way if the timer expires again while we're handling
+ * things, we're more likely to see that 2nd int rather than swallowing
+ * it by ACKing the int at the end of this handler.
+ */
bfin_gptmr0_ack();
+ evt->event_handler(evt);
return IRQ_HANDLED;
}
diff --git a/arch/blackfin/kernel/time.c b/arch/blackfin/kernel/time.c
index c9113619029f..8d73724c0092 100644
--- a/arch/blackfin/kernel/time.c
+++ b/arch/blackfin/kernel/time.c
@@ -114,16 +114,14 @@ u32 arch_gettimeoffset(void)
/*
* timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "do_timer()" routine every clocktick
+ * as well as call the "xtime_update()" routine every clocktick
*/
#ifdef CONFIG_CORE_TIMER_IRQ_L1
__attribute__((l1_text))
#endif
irqreturn_t timer_interrupt(int irq, void *dummy)
{
- write_seqlock(&xtime_lock);
- do_timer(1);
- write_sequnlock(&xtime_lock);
+ xtime_update(1);
#ifdef CONFIG_IPIPE
update_root_process_times(get_irq_regs());
diff --git a/arch/blackfin/kernel/trace.c b/arch/blackfin/kernel/trace.c
index 05b550891ce5..050db44fe919 100644
--- a/arch/blackfin/kernel/trace.c
+++ b/arch/blackfin/kernel/trace.c
@@ -912,10 +912,11 @@ void show_regs(struct pt_regs *fp)
/* if no interrupts are going off, don't print this out */
if (fp->ipend & ~0x3F) {
for (i = 0; i < (NR_IRQS - 1); i++) {
+ struct irq_desc *desc = irq_to_desc(i);
if (!in_atomic)
- raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
- action = irq_desc[i].action;
+ action = desc->action;
if (!action)
goto unlock;
@@ -928,7 +929,7 @@ void show_regs(struct pt_regs *fp)
pr_cont("\n");
unlock:
if (!in_atomic)
- raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c
index 59c1df75e4de..655f25d139a7 100644
--- a/arch/blackfin/kernel/traps.c
+++ b/arch/blackfin/kernel/traps.c
@@ -98,7 +98,7 @@ asmlinkage notrace void trap_c(struct pt_regs *fp)
/* send the appropriate signal to the user program */
switch (trapnr) {
- /* This table works in conjuction with the one in ./mach-common/entry.S
+ /* This table works in conjunction with the one in ./mach-common/entry.S
* Some exceptions are handled there (in assembly, in exception space)
* Some are handled here, (in C, in interrupt space)
* Some, like CPLB, are handled in both, where the normal path is
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S
index 4122678529c0..854fa49f1c3e 100644
--- a/arch/blackfin/kernel/vmlinux.lds.S
+++ b/arch/blackfin/kernel/vmlinux.lds.S
@@ -136,7 +136,7 @@ SECTIONS
. = ALIGN(16);
INIT_DATA_SECTION(16)
- PERCPU(4)
+ PERCPU(32, PAGE_SIZE)
.exit.data :
{
@@ -176,6 +176,7 @@ SECTIONS
{
. = ALIGN(4);
__stext_l1 = .;
+ *(.l1.text.head)
*(.l1.text)
#ifdef CONFIG_SCHEDULE_L1
SCHED_TEXT