From d7b906897e9caae452947e33674df0a2d6f7e10f Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 17 Apr 2008 07:46:24 +0200 Subject: [S390] genirq/clockevents: move irq affinity prototypes/inlines to interrupt.h > Generic code is not supposed to include irq.h. Replace this include > by linux/hardirq.h instead and add/replace an include of linux/irq.h > in asm header files where necessary. > This change should only matter for architectures that make use of > GENERIC_CLOCKEVENTS. > Architectures in question are mips, x86, arm, sh, powerpc, uml and sparc64. > > I did some cross compile tests for mips, x86_64, arm, powerpc and sparc64. > This patch fixes also build breakages caused by the include replacement in > tick-common.h. I generally dislike adding optional linux/* includes in asm/* includes - I'm nervous about this causing include loops. However, there's a separate point to be discussed here. That is, what interfaces are expected of every architecture in the kernel. If generic code wants to be able to set the affinity of interrupts, then that needs to become part of the interfaces listed in linux/interrupt.h rather than linux/irq.h. So what I suggest is this approach instead (against Linus' tree of a couple of days ago) - we move irq_set_affinity() and irq_can_set_affinity() to linux/interrupt.h, change the linux/irq.h includes to linux/interrupt.h and include asm/irq_regs.h where needed (asm/irq_regs.h is supposed to be rarely used include since not much touches the stacked parent context registers.) Build tested on ARM PXA family kernels and ARM's Realview platform kernels which both use genirq. [ tglx@linutronix.de: add GENERIC_HARDIRQ dependencies ] Signed-off-by: Russell King Signed-off-by: Thomas Gleixner Signed-off-by: Martin Schwidefsky Signed-off-by: Heiko Carstens --- kernel/time/tick-broadcast.c | 2 +- kernel/time/tick-common.c | 4 +++- kernel/time/tick-oneshot.c | 2 +- 3 files changed, 5 insertions(+), 3 deletions(-) (limited to 'kernel/time') diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index e1bd50cbbf5d..fdfa0c745bb6 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 1bea399a9ef0..4f3886562b8c 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -14,12 +14,14 @@ #include #include #include -#include +#include #include #include #include #include +#include + #include "tick-internal.h" /* diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c index 0258d3115d54..450c04935b66 100644 --- a/kernel/time/tick-oneshot.c +++ b/kernel/time/tick-oneshot.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #include #include -- cgit v1.2.3-59-g8ed1b From 903b8a8d4835a796f582033802c83283886f4a3d Mon Sep 17 00:00:00 2001 From: Karsten Wiese Date: Thu, 28 Feb 2008 15:10:50 +0100 Subject: clockevents: optimise tick_nohz_stop_sched_tick() a bit Call ts = &per_cpu(tick_cpu_sched, cpu); and cpu = smp_processor_id(); once instead of twice. No functional change done, as changed code runs with local irq off. Reduces source lines and text size (20bytes on x86_64). [ akpm@linux-foundation.org: Build fix ] Signed-off-by: Karsten Wiese Cc: Andrew Morton Signed-off-by: Thomas Gleixner --- kernel/time/tick-sched.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'kernel/time') diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 686da821d376..69dba0c71727 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -158,9 +158,8 @@ void tick_nohz_stop_idle(int cpu) } } -static ktime_t tick_nohz_start_idle(int cpu) +static ktime_t tick_nohz_start_idle(struct tick_sched *ts) { - struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); ktime_t now, delta; now = ktime_get(); @@ -201,8 +200,8 @@ void tick_nohz_stop_sched_tick(void) local_irq_save(flags); cpu = smp_processor_id(); - now = tick_nohz_start_idle(cpu); ts = &per_cpu(tick_cpu_sched, cpu); + now = tick_nohz_start_idle(ts); /* * If this cpu is offline and it is the one which updates @@ -222,7 +221,6 @@ void tick_nohz_stop_sched_tick(void) if (need_resched()) goto end; - cpu = smp_processor_id(); if (unlikely(local_softirq_pending())) { static int ratelimit; -- cgit v1.2.3-59-g8ed1b From 6993fc5bbc5d63ccd55985b39c34417e430e75e9 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 30 Jan 2008 13:30:02 +0100 Subject: clocksource: make clocksource watchdog cycle through online CPUs This way it checks if the clocks are synchronized between CPUs too. This might be able to detect slowly drifting TSCs which only go wrong over longer time. Signed-off-by: Andi Kleen Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- kernel/time/clocksource.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) (limited to 'kernel/time') diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 7f60097d443a..912156dd6005 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -141,8 +141,16 @@ static void clocksource_watchdog(unsigned long data) } if (!list_empty(&watchdog_list)) { - __mod_timer(&watchdog_timer, - watchdog_timer.expires + WATCHDOG_INTERVAL); + /* + * Cycle through CPUs to check if the CPUs stay + * synchronized to each other. + */ + int next_cpu = next_cpu(raw_smp_processor_id(), cpu_online_map); + + if (next_cpu >= NR_CPUS) + next_cpu = first_cpu(cpu_online_map); + watchdog_timer.expires += WATCHDOG_INTERVAL; + add_timer_on(&watchdog_timer, next_cpu); } spin_unlock(&watchdog_lock); } @@ -164,7 +172,8 @@ static void clocksource_check_watchdog(struct clocksource *cs) if (!started && watchdog) { watchdog_last = watchdog->read(); watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; - add_timer(&watchdog_timer); + add_timer_on(&watchdog_timer, + first_cpu(cpu_online_map)); } } else { if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) @@ -185,7 +194,8 @@ static void clocksource_check_watchdog(struct clocksource *cs) watchdog_last = watchdog->read(); watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; - add_timer(&watchdog_timer); + add_timer_on(&watchdog_timer, + first_cpu(cpu_online_map)); } } } -- cgit v1.2.3-59-g8ed1b From 7c3078b637882303b1dcf6a16229d0e35f6b60a5 Mon Sep 17 00:00:00 2001 From: Jason Wessel Date: Fri, 15 Feb 2008 14:55:54 -0600 Subject: kgdb: clocksource watchdog In order to not trip the clocksource watchdog, kgdb must touch the clocksource watchdog on the return to normal system run state. Signed-off-by: Jason Wessel Signed-off-by: Ingo Molnar --- include/linux/clocksource.h | 1 + kernel/kgdb.c | 4 ++++ kernel/time/clocksource.c | 12 ++++++++++++ 3 files changed, 17 insertions(+) (limited to 'kernel/time') diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 85778a4b1209..35094479ca55 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -216,6 +216,7 @@ static inline void clocksource_calculate_interval(struct clocksource *c, /* used to install a new clocksource */ extern int clocksource_register(struct clocksource*); extern void clocksource_unregister(struct clocksource*); +extern void clocksource_touch_watchdog(void); extern struct clocksource* clocksource_get_next(void); extern void clocksource_change_rating(struct clocksource *cs, int rating); extern void clocksource_resume(void); diff --git a/kernel/kgdb.c b/kernel/kgdb.c index 017ee782bc08..e3f603740425 100644 --- a/kernel/kgdb.c +++ b/kernel/kgdb.c @@ -28,6 +28,7 @@ * kind, whether express or implied. */ #include +#include #include #include #include @@ -574,6 +575,7 @@ static void kgdb_wait(struct pt_regs *regs) /* Signal the primary CPU that we are done: */ atomic_set(&cpu_in_kgdb[cpu], 0); + clocksource_touch_watchdog(); local_irq_restore(flags); } #endif @@ -1396,6 +1398,7 @@ acquirelock: atomic_read(&kgdb_cpu_doing_single_step) != cpu) { atomic_set(&kgdb_active, -1); + clocksource_touch_watchdog(); local_irq_restore(flags); goto acquirelock; @@ -1487,6 +1490,7 @@ acquirelock: kgdb_restore: /* Free kgdb_active */ atomic_set(&kgdb_active, -1); + clocksource_touch_watchdog(); local_irq_restore(flags); return error; diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 7f60097d443a..f61402b1f2d0 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -221,6 +221,18 @@ void clocksource_resume(void) spin_unlock_irqrestore(&clocksource_lock, flags); } +/** + * clocksource_touch_watchdog - Update watchdog + * + * Update the watchdog after exception contexts such as kgdb so as not + * to incorrectly trip the watchdog. + * + */ +void clocksource_touch_watchdog(void) +{ + clocksource_resume_watchdog(); +} + /** * clocksource_get_next - Returns the selected clocksource * -- cgit v1.2.3-59-g8ed1b