aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/kernel/sched
diff options
context:
space:
mode:
authorVincent Donnefort <vincent.donnefort@arm.com>2020-05-27 17:39:14 +0100
committerPeter Zijlstra <peterz@infradead.org>2020-06-15 14:10:02 +0200
commit4581bea8b4ec4de353369775dfef921191e393b3 (patch)
treeab0e45a3b8cb07f6d3a13a6e460ec34b9c006624 /kernel/sched
parentsched/fair: Remove unused 'sd' parameter from scale_rt_capacity() (diff)
downloadwireguard-linux-4581bea8b4ec4de353369775dfef921191e393b3.tar.xz
wireguard-linux-4581bea8b4ec4de353369775dfef921191e393b3.zip
sched/debug: Add new tracepoints to track util_est
The util_est signals are key elements for EAS task placement and frequency selection. Having tracepoints to track these signals enables load-tracking and schedutil testing and/or debugging by a toolkit. Signed-off-by: Vincent Donnefort <vincent.donnefort@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Valentin Schneider <valentin.schneider@arm.com> Link: https://lkml.kernel.org/r/1590597554-370150-1-git-send-email-vincent.donnefort@arm.com
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/sched/fair.c6
2 files changed, 8 insertions, 0 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 9c89b0eaf796..0208b71bef80 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -36,6 +36,8 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp);
EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp);
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp);
+EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp);
+EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp);
DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 69da576f7f48..a785a9b262dd 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3922,6 +3922,8 @@ static inline void util_est_enqueue(struct cfs_rq *cfs_rq,
enqueued = cfs_rq->avg.util_est.enqueued;
enqueued += _task_util_est(p);
WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
+
+ trace_sched_util_est_cfs_tp(cfs_rq);
}
/*
@@ -3952,6 +3954,8 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
ue.enqueued -= min_t(unsigned int, ue.enqueued, _task_util_est(p));
WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued);
+ trace_sched_util_est_cfs_tp(cfs_rq);
+
/*
* Skip update of task's estimated utilization when the task has not
* yet completed an activation, e.g. being migrated.
@@ -4017,6 +4021,8 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
ue.ewma >>= UTIL_EST_WEIGHT_SHIFT;
done:
WRITE_ONCE(p->se.avg.util_est, ue);
+
+ trace_sched_util_est_se_tp(&p->se);
}
static inline int task_fits_capacity(struct task_struct *p, long capacity)