aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel/smp.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2007-03-05 15:28:37 -0800
committerDavid S. Miller <davem@sunset.davemloft.net>2007-04-26 01:54:15 -0700
commit112f48716d9f292c92a033cff9e3ce7405ed4280 (patch)
tree5be2952ca83adb519df5995c10e0686447fea88f /arch/sparc64/kernel/smp.c
parent[SPARC64]: Add tick_nohz_{stop,restart}_sched_tick() calls to cpu_idle(). (diff)
downloadlinux-dev-112f48716d9f292c92a033cff9e3ce7405ed4280.tar.xz
linux-dev-112f48716d9f292c92a033cff9e3ce7405ed4280.zip
[SPARC64]: Add clocksource/clockevents support.
I'd like to thank John Stul and others for helping me along the way. A lot of cleanups fell out of this. For example, the get_compare() tick_op was totally unused, so was deleted. And the most often used tick_op members were grouped together for cache-friendlyness. The sparc64 TSC is given to the kernel as a one-shot timer. tick_ops->init_timer() simply turns off the privileged bit in the tick register (when possible), and disables the interrupt by setting bit 63 in the compare register. The ->disable_irq() op also sets this bit. tick_ops->add_compare() is changed to: 1) Add the given delta to "tick" not to "compare" 2) Return a boolean which, if true, means that the tick value read after writing the compare value was found to have incremented past the initial tick value. This mirrors logic used in the HPET driver's ->next_event() method. Each tick_ops implementation also now provides a name string. And we feed this into the clocksource and clockevents layers. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/kernel/smp.c')
-rw-r--r--arch/sparc64/kernel/smp.c29
1 files changed, 3 insertions, 26 deletions
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 39deb0346eb5..d4f0a70f4845 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -123,7 +123,7 @@ void __init smp_store_cpu_info(int id)
cpu_data(id).ecache_size, cpu_data(id).ecache_line_size);
}
-static void smp_setup_percpu_timer(void);
+extern void setup_sparc64_timer(void);
static volatile unsigned long callin_flag = 0;
@@ -138,7 +138,7 @@ void __init smp_callin(void)
__flush_tlb_all();
- smp_setup_percpu_timer();
+ setup_sparc64_timer();
if (cheetah_pcache_forced_on)
cheetah_enable_pcache();
@@ -175,8 +175,6 @@ void cpu_panic(void)
panic("SMP bolixed\n");
}
-static unsigned long current_tick_offset __read_mostly;
-
/* This tick register synchronization scheme is taken entirely from
* the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
*
@@ -259,7 +257,7 @@ void smp_synchronize_tick_client(void)
} else
adj = -delta;
- tick_ops->add_tick(adj, current_tick_offset);
+ tick_ops->add_tick(adj);
}
#if DEBUG_TICK_SYNC
t[i].rt = rt;
@@ -1178,30 +1176,9 @@ void smp_penguin_jailcell(int irq, struct pt_regs *regs)
preempt_enable();
}
-static void __init smp_setup_percpu_timer(void)
-{
- unsigned long pstate;
-
- /* Guarantee that the following sequences execute
- * uninterrupted.
- */
- __asm__ __volatile__("rdpr %%pstate, %0\n\t"
- "wrpr %0, %1, %%pstate"
- : "=r" (pstate)
- : "i" (PSTATE_IE));
-
- tick_ops->init_tick(current_tick_offset);
-
- /* Restore PSTATE_IE. */
- __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
- : /* no outputs */
- : "r" (pstate));
-}
-
void __init smp_tick_init(void)
{
boot_cpu_id = hard_smp_processor_id();
- current_tick_offset = timer_tick_offset;
}
/* /proc/profile writes can call this, don't __init it please. */