summaryrefslogtreecommitdiffstats
path: root/sys/kern/sched_bsd.c
diff options
context:
space:
mode:
authorart <art@openbsd.org>2007-10-10 15:53:51 +0000
committerart <art@openbsd.org>2007-10-10 15:53:51 +0000
commit45053f4a9c0ff11fe529cfbf9c98901ff89d8e2c (patch)
tree8892d5c7c720788fc3d9d2cba5ce3ebc1d11f09c /sys/kern/sched_bsd.c
parentsend_packet() now takes a struct in6_addr as destination instead of a (diff)
downloadwireguard-openbsd-45053f4a9c0ff11fe529cfbf9c98901ff89d8e2c.tar.xz
wireguard-openbsd-45053f4a9c0ff11fe529cfbf9c98901ff89d8e2c.zip
Make context switching much more MI:
- Move the functionality of choosing a process from cpu_switch into a much simpler function: cpu_switchto. Instead of having the locore code walk the run queues, let the MI code choose the process we want to run and only implement the context switching itself in MD code. - Let MD context switching run without worrying about spls or locks. - Instead of having the idle loop implemented with special contexts in MD code, implement one idle proc for each cpu. make the idle loop MI with MD hooks. - Change the proc lists from the old style vax queues to TAILQs. - Change the sleep queue from vax queues to TAILQs. This makes wakeup() go from O(n^2) to O(n) there will be some MD fallout, but it will be fixed shortly. There's also a few cleanups to be done after this. deraadt@, kettenis@ ok
Diffstat (limited to 'sys/kern/sched_bsd.c')
-rw-r--r--sys/kern/sched_bsd.c68
1 files changed, 29 insertions, 39 deletions
diff --git a/sys/kern/sched_bsd.c b/sys/kern/sched_bsd.c
index 047207b1d39..aafcbb9eeb4 100644
--- a/sys/kern/sched_bsd.c
+++ b/sys/kern/sched_bsd.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: sched_bsd.c,v 1.12 2007/05/18 16:10:15 art Exp $ */
+/* $OpenBSD: sched_bsd.c,v 1.13 2007/10/10 15:53:53 art Exp $ */
/* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */
/*-
@@ -57,9 +57,6 @@
int lbolt; /* once a second sleep address */
int rrticks_init; /* # of hardclock ticks per roundrobin() */
-int whichqs; /* Bit mask summary of non-empty Q's. */
-struct prochd qs[NQS];
-
struct SIMPLELOCK sched_lock;
void scheduler_start(void);
@@ -359,29 +356,26 @@ preempt(struct proc *newp)
SCHED_UNLOCK(s);
}
-
-/*
- * Must be called at splstatclock() or higher.
- */
void
mi_switch(void)
{
- struct proc *p = curproc; /* XXX */
+ struct schedstate_percpu *spc = &curcpu()->ci_schedstate;
+ struct proc *p = curproc;
+ struct proc *nextproc;
struct rlimit *rlim;
struct timeval tv;
-#if defined(MULTIPROCESSOR)
+#ifdef MULTIPROCESSOR
int hold_count;
int sched_count;
#endif
- struct schedstate_percpu *spc = &p->p_cpu->ci_schedstate;
+
+ KASSERT(p->p_stat != SONPROC);
SCHED_ASSERT_LOCKED();
-#if defined(MULTIPROCESSOR)
+#ifdef MULTIPROCESSOR
/*
* Release the kernel_lock, as we are about to yield the CPU.
- * The scheduler lock is still held until cpu_switch()
- * selects a new process and removes it from the run queue.
*/
sched_count = __mp_release_all_but_one(&sched_lock);
if (p->p_flag & P_BIGLOCK)
@@ -391,7 +385,6 @@ mi_switch(void)
/*
* Compute the amount of time during which the current
* process was running, and add that to its total so far.
- * XXX - use microuptime here to avoid strangeness.
*/
microuptime(&tv);
if (timercmp(&tv, &spc->spc_runtime, <)) {
@@ -427,16 +420,27 @@ mi_switch(void)
*/
spc->spc_schedflags &= ~SPCF_SWITCHCLEAR;
- /*
- * Pick a new current process and record its start time.
- */
- uvmexp.swtch++;
- cpu_switch(p);
+ nextproc = sched_chooseproc();
+
+ if (p != nextproc) {
+ uvmexp.swtch++;
+ cpu_switchto(p, nextproc);
+ } else {
+ p->p_stat = SONPROC;
+ }
+
+ SCHED_ASSERT_LOCKED();
/*
- * Make sure that MD code released the scheduler lock before
- * resuming us.
+ * To preserve lock ordering, we need to release the sched lock
+ * and grab it after we grab the big lock.
+ * In the future, when the sched lock isn't recursive, we'll
+ * just release it here.
*/
+#ifdef MULTIPROCESSOR
+ __mp_unlock(&sched_lock);
+#endif
+
SCHED_ASSERT_UNLOCKED();
/*
@@ -444,11 +448,11 @@ mi_switch(void)
* be running on a new CPU now, so don't use the cache'd
* schedstate_percpu pointer.
*/
- KDASSERT(p->p_cpu != NULL);
- KDASSERT(p->p_cpu == curcpu());
+ KASSERT(p->p_cpu == curcpu());
+
microuptime(&p->p_cpu->ci_schedstate.spc_runtime);
-#if defined(MULTIPROCESSOR)
+#ifdef MULTIPROCESSOR
/*
* Reacquire the kernel_lock now. We do this after we've
* released the scheduler lock to avoid deadlock, and before
@@ -460,20 +464,6 @@ mi_switch(void)
#endif
}
-/*
- * Initialize the (doubly-linked) run queues
- * to be empty.
- */
-void
-rqinit(void)
-{
- int i;
-
- for (i = 0; i < NQS; i++)
- qs[i].ph_link = qs[i].ph_rlink = (struct proc *)&qs[i];
- SIMPLE_LOCK_INIT(&sched_lock);
-}
-
static __inline void
resched_proc(struct proc *p, u_char pri)
{