diff options
| author | 2005-05-25 23:17:47 +0000 | |
|---|---|---|
| committer | 2005-05-25 23:17:47 +0000 | |
| commit | 51e884f5097bd82bb2785d036ae66e1531f468d2 (patch) | |
| tree | 5ca09db806340d65527cf9a3f83000f9d4656ec2 /sys/kern/sched_bsd.c | |
| parent | Mention isearch wrap (diff) | |
| download | wireguard-openbsd-51e884f5097bd82bb2785d036ae66e1531f468d2.tar.xz wireguard-openbsd-51e884f5097bd82bb2785d036ae66e1531f468d2.zip | |
This patch is mortly art's work and was done *a year* ago. Art wants to thank
everyone for the prompt review and ok of this work ;-) Yeah, that includes me
too, or maybe especially me. I am sorry.
Change the sched_lock to a mutex. This fixes, among other things, the infamous
"telnet localhost &" problem. The real bug in that case was that the sched_lock
which is by design a non-recursive lock, was recursively acquired, and not
enough releases made us hold the lock in the idle loop, blocking scheduling
on the other processors. Some of the other processors would hold the biglock though,
which made it impossible for cpu 0 to enter the kernel... A nice deadlock.
Let me just say debugging this for days just to realize that it was all fixed
in an old diff noone ever ok'd was somewhat of an anti-climax.
This diff also changes splsched to be correct for all our architectures.
Diffstat (limited to 'sys/kern/sched_bsd.c')
| -rw-r--r-- | sys/kern/sched_bsd.c | 81 |
1 files changed, 39 insertions, 42 deletions
diff --git a/sys/kern/sched_bsd.c b/sys/kern/sched_bsd.c index 523e240a4b1..ff39ea3d1ad 100644 --- a/sys/kern/sched_bsd.c +++ b/sys/kern/sched_bsd.c @@ -1,4 +1,4 @@ -/* $OpenBSD: sched_bsd.c,v 1.1 2004/07/29 06:25:45 tedu Exp $ */ +/* $OpenBSD: sched_bsd.c,v 1.2 2005/05/25 23:17:47 niklas Exp $ */ /* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */ /*- @@ -65,7 +65,9 @@ int rrticks_init; /* # of hardclock ticks per roundrobin() */ int whichqs; /* Bit mask summary of non-empty Q's. */ struct prochd qs[NQS]; -struct SIMPLELOCK sched_lock; +#ifdef MULTIPROCESSOR +struct mutex sched_mutex = MUTEX_INITIALIZER(IPL_SCHED); +#endif void scheduler_start(void); @@ -260,7 +262,7 @@ schedcpu(arg) struct timeout *to = (struct timeout *)arg; fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); struct proc *p; - int s; + int s, t; unsigned int newcpu; int phz; @@ -274,6 +276,7 @@ schedcpu(arg) KASSERT(phz); for (p = LIST_FIRST(&allproc); p != 0; p = LIST_NEXT(p, p_list)) { + SCHED_LOCK(s); /* * Increment time in/out of memory and sleep time * (if sleeping). We ignore overflow; with 16-bit int's @@ -287,9 +290,11 @@ schedcpu(arg) * If the process has slept the entire second, * stop recalculating its priority until it wakes up. */ - if (p->p_slptime > 1) + if (p->p_slptime > 1) { + SCHED_UNLOCK(s); continue; - s = splstatclock(); /* prevent state changes */ + } + t = splstatclock(); /* * p_pctcpu is only for ps. */ @@ -305,8 +310,7 @@ schedcpu(arg) p->p_cpticks = 0; newcpu = (u_int) decay_cpu(loadfac, p->p_estcpu); p->p_estcpu = newcpu; - splx(s); - SCHED_LOCK(s); + splx(t); resetpriority(p); if (p->p_priority >= PUSER) { if ((p != curproc) && @@ -355,13 +359,13 @@ updatepri(p) void sched_unlock_idle(void) { - SIMPLE_UNLOCK(&sched_lock); + mtx_leave(&sched_mutex); } void sched_lock_idle(void) { - SIMPLE_LOCK(&sched_lock); + mtx_enter(&sched_mutex); } #endif /* MULTIPROCESSOR || LOCKDEBUG */ @@ -379,9 +383,7 @@ yield() p->p_priority = p->p_usrpri; setrunqueue(p); p->p_stats->p_ru.ru_nvcsw++; - mi_switch(); - SCHED_ASSERT_UNLOCKED(); - splx(s); + mi_switch(s); } /* @@ -408,19 +410,17 @@ preempt(newp) p->p_stat = SRUN; setrunqueue(p); p->p_stats->p_ru.ru_nivcsw++; - mi_switch(); - SCHED_ASSERT_UNLOCKED(); - splx(s); + mi_switch(s); } /* - * Must be called at splstatclock() or higher. + * Must be called at splsched() or higher. */ void -mi_switch() +mi_switch(int s) { - struct proc *p = curproc; /* XXX */ + struct proc *p = curproc; struct rlimit *rlim; struct timeval tv; #if defined(MULTIPROCESSOR) @@ -432,20 +432,6 @@ mi_switch() SCHED_ASSERT_LOCKED(); -#if defined(MULTIPROCESSOR) - /* - * Release the kernel_lock, as we are about to yield the CPU. - * The scheduler lock is still held until cpu_switch() - * selects a new process and removes it from the run queue. - */ - if (p->p_flag & P_BIGLOCK) -#ifdef notyet - hold_count = spinlock_release_all(&kernel_lock); -#else - hold_count = __mp_release_all(&kernel_lock); -#endif -#endif - /* * Compute the amount of time during which the current * process was running, and add that to its total so far. @@ -483,6 +469,7 @@ mi_switch() */ rlim = &p->p_rlimit[RLIMIT_CPU]; if ((rlim_t)p->p_rtime.tv_sec >= rlim->rlim_cur) { + SCHED_UNLOCK(s); if ((rlim_t)p->p_rtime.tv_sec >= rlim->rlim_max) { psignal(p, SIGKILL); } else { @@ -490,8 +477,23 @@ mi_switch() if (rlim->rlim_cur < rlim->rlim_max) rlim->rlim_cur += 5; } + SCHED_LOCK(s); } +#if defined(MULTIPROCESSOR) + /* + * Release the kernel_lock, as we are about to yield the CPU. + * The scheduler lock is still held until cpu_switch() + * selects a new process and removes it from the run queue. + */ + if (p->p_flag & P_BIGLOCK) +#ifdef notyet + hold_count = spinlock_release_all(&kernel_lock); +#else + hold_count = __mp_release_all(&kernel_lock); +#endif +#endif + /* * Process is about to yield the CPU; clear the appropriate * scheduling flags. @@ -534,12 +536,9 @@ mi_switch() * we reacquire the interlock. */ if (p->p_flag & P_BIGLOCK) -#ifdef notyet - spinlock_acquire_count(&kernel_lock, hold_count); -#else __mp_acquire_count(&kernel_lock, hold_count); #endif -#endif + splx(s); } /* @@ -553,7 +552,6 @@ rqinit() for (i = 0; i < NQS; i++) qs[i].ph_link = qs[i].ph_rlink = (struct proc *)&qs[i]; - SIMPLE_LOCK_INIT(&sched_lock); } static __inline void @@ -601,8 +599,7 @@ resched_proc(struct proc *p, u_char pri) * and awakening the swapper if it isn't in memory. */ void -setrunnable(p) - register struct proc *p; +setrunnable(struct proc *p) { SCHED_ASSERT_LOCKED(); @@ -634,7 +631,7 @@ setrunnable(p) updatepri(p); p->p_slptime = 0; if ((p->p_flag & P_INMEM) == 0) - wakeup((caddr_t)&proc0); + sched_wakeup((caddr_t)&proc0); else resched_proc(p, p->p_priority); } @@ -679,10 +676,10 @@ schedclock(p) { int s; - p->p_estcpu = ESTCPULIM(p->p_estcpu + 1); SCHED_LOCK(s); + p->p_estcpu = ESTCPULIM(p->p_estcpu + 1); resetpriority(p); - SCHED_UNLOCK(s); if (p->p_priority >= PUSER) p->p_priority = p->p_usrpri; + SCHED_UNLOCK(s); } |
