summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_sched.c
diff options
context:
space:
mode:
authormpi <mpi@openbsd.org>2020-01-30 08:51:27 +0000
committermpi <mpi@openbsd.org>2020-01-30 08:51:27 +0000
commit24e0bd456b2461b3f663055f18aa3ccc17259829 (patch)
treed77ead955a4e4821a5ad6b6baacb0ebf8e316a3e /sys/kern/kern_sched.c
parentEnable t_ptrace with an errno change compared to NetBSD. (diff)
downloadwireguard-openbsd-24e0bd456b2461b3f663055f18aa3ccc17259829.tar.xz
wireguard-openbsd-24e0bd456b2461b3f663055f18aa3ccc17259829.zip
Split `p_priority' into `p_runpri' and `p_slppri'.
Using different fields to remember in which runqueue or sleepqueue threads currently are will make it easier to split the SCHED_LOCK(). With this change, the (potentially boosted) sleeping priority is no longer overwriting the thread priority. This let us get rids of the logic required to synchronize `p_priority' with `p_usrpri'. Tested by many, ok visa@
Diffstat (limited to 'sys/kern/kern_sched.c')
-rw-r--r--sys/kern/kern_sched.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/sys/kern/kern_sched.c b/sys/kern/kern_sched.c
index e25993099ee..ea893c49a47 100644
--- a/sys/kern/kern_sched.c
+++ b/sys/kern/kern_sched.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_sched.c,v 1.63 2020/01/21 16:16:23 mpi Exp $ */
+/* $OpenBSD: kern_sched.c,v 1.64 2020/01/30 08:51:27 mpi Exp $ */
/*
* Copyright (c) 2007, 2008 Artur Grabowski <art@openbsd.org>
*
@@ -258,7 +258,7 @@ setrunqueue(struct cpu_info *ci, struct proc *p, uint8_t prio)
p->p_cpu = ci;
p->p_stat = SRUN;
- p->p_priority = prio;
+ p->p_runpri = prio;
spc = &p->p_cpu->ci_schedstate;
spc->spc_nrun++;
@@ -279,7 +279,7 @@ void
remrunqueue(struct proc *p)
{
struct schedstate_percpu *spc;
- int queue = p->p_priority >> 2;
+ int queue = p->p_runpri >> 2;
SCHED_ASSERT_LOCKED();
spc = &p->p_cpu->ci_schedstate;
@@ -309,7 +309,7 @@ sched_chooseproc(void)
for (queue = 0; queue < SCHED_NQS; queue++) {
while ((p = TAILQ_FIRST(&spc->spc_qs[queue]))) {
remrunqueue(p);
- setrunqueue(NULL, p, p->p_priority);
+ setrunqueue(NULL, p, p->p_runpri);
if (p->p_cpu == curcpu()) {
KASSERT(p->p_flag & P_CPUPEG);
goto again;
@@ -581,7 +581,7 @@ sched_proc_to_cpu_cost(struct cpu_info *ci, struct proc *p)
* and the higher the priority of the proc.
*/
if (!cpuset_isset(&sched_idle_cpus, ci)) {
- cost += (p->p_priority - spc->spc_curpriority) *
+ cost += (p->p_usrpri - spc->spc_curpriority) *
sched_cost_priority;
cost += sched_cost_runnable;
}