aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/stats.h
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2018-03-03 14:01:12 +0100
committerIngo Molnar <mingo@kernel.org>2018-03-03 15:50:21 +0100
commit97fb7a0a8944bd6d2c5634e1e0fa689a5c40bc22 (patch)
tree4993de40ba9dc0cf76d2233b8292a771d8c41941 /kernel/sched/stats.h
parentsched/deadline: Clean up various coding style details (diff)
downloadlinux-dev-97fb7a0a8944bd6d2c5634e1e0fa689a5c40bc22.tar.xz
linux-dev-97fb7a0a8944bd6d2c5634e1e0fa689a5c40bc22.zip
sched: Clean up and harmonize the coding style of the scheduler code base
A good number of small style inconsistencies have accumulated in the scheduler core, so do a pass over them to harmonize all these details: - fix speling in comments, - use curly braces for multi-line statements, - remove unnecessary parentheses from integer literals, - capitalize consistently, - remove stray newlines, - add comments where necessary, - remove invalid/unnecessary comments, - align structure definitions and other data types vertically, - add missing newlines for increased readability, - fix vertical tabulation where it's misaligned, - harmonize preprocessor conditional block labeling and vertical alignment, - remove line-breaks where they uglify the code, - add newline after local variable definitions, No change in functionality: md5: 1191fa0a890cfa8132156d2959d7e9e2 built-in.o.before.asm 1191fa0a890cfa8132156d2959d7e9e2 built-in.o.after.asm Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/stats.h')
-rw-r--r--kernel/sched/stats.h86
1 files changed, 40 insertions, 46 deletions
diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
index 8e7b58de61e7..8aea199a39b4 100644
--- a/kernel/sched/stats.h
+++ b/kernel/sched/stats.h
@@ -30,35 +30,29 @@ rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
if (rq)
rq->rq_sched_info.run_delay += delta;
}
-#define schedstat_enabled() static_branch_unlikely(&sched_schedstats)
+#define schedstat_enabled() static_branch_unlikely(&sched_schedstats)
#define __schedstat_inc(var) do { var++; } while (0)
-#define schedstat_inc(var) do { if (schedstat_enabled()) { var++; } } while (0)
+#define schedstat_inc(var) do { if (schedstat_enabled()) { var++; } } while (0)
#define __schedstat_add(var, amt) do { var += (amt); } while (0)
-#define schedstat_add(var, amt) do { if (schedstat_enabled()) { var += (amt); } } while (0)
-#define __schedstat_set(var, val) do { var = (val); } while (0)
-#define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0)
-#define schedstat_val(var) (var)
-#define schedstat_val_or_zero(var) ((schedstat_enabled()) ? (var) : 0)
-
-#else /* !CONFIG_SCHEDSTATS */
-static inline void
-rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
-{}
-static inline void
-rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
-{}
-static inline void
-rq_sched_info_depart(struct rq *rq, unsigned long long delta)
-{}
-#define schedstat_enabled() 0
-#define __schedstat_inc(var) do { } while (0)
-#define schedstat_inc(var) do { } while (0)
-#define __schedstat_add(var, amt) do { } while (0)
-#define schedstat_add(var, amt) do { } while (0)
-#define __schedstat_set(var, val) do { } while (0)
-#define schedstat_set(var, val) do { } while (0)
-#define schedstat_val(var) 0
-#define schedstat_val_or_zero(var) 0
+#define schedstat_add(var, amt) do { if (schedstat_enabled()) { var += (amt); } } while (0)
+#define __schedstat_set(var, val) do { var = (val); } while (0)
+#define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0)
+#define schedstat_val(var) (var)
+#define schedstat_val_or_zero(var) ((schedstat_enabled()) ? (var) : 0)
+
+#else /* !CONFIG_SCHEDSTATS: */
+static inline void rq_sched_info_arrive (struct rq *rq, unsigned long long delta) { }
+static inline void rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) { }
+static inline void rq_sched_info_depart (struct rq *rq, unsigned long long delta) { }
+# define schedstat_enabled() 0
+# define __schedstat_inc(var) do { } while (0)
+# define schedstat_inc(var) do { } while (0)
+# define __schedstat_add(var, amt) do { } while (0)
+# define schedstat_add(var, amt) do { } while (0)
+# define __schedstat_set(var, val) do { } while (0)
+# define schedstat_set(var, val) do { } while (0)
+# define schedstat_val(var) 0
+# define schedstat_val_or_zero(var) 0
#endif /* CONFIG_SCHEDSTATS */
#ifdef CONFIG_SCHED_INFO
@@ -69,9 +63,9 @@ static inline void sched_info_reset_dequeued(struct task_struct *t)
/*
* We are interested in knowing how long it was from the *first* time a
- * task was queued to the time that it finally hit a cpu, we call this routine
- * from dequeue_task() to account for possible rq->clock skew across cpus. The
- * delta taken on each cpu would annul the skew.
+ * task was queued to the time that it finally hit a CPU, we call this routine
+ * from dequeue_task() to account for possible rq->clock skew across CPUs. The
+ * delta taken on each CPU would annul the skew.
*/
static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t)
{
@@ -87,7 +81,7 @@ static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t)
}
/*
- * Called when a task finally hits the cpu. We can now calculate how
+ * Called when a task finally hits the CPU. We can now calculate how
* long it was waiting to run. We also note when it began so that we
* can keep stats on how long its timeslice is.
*/
@@ -112,9 +106,10 @@ static void sched_info_arrive(struct rq *rq, struct task_struct *t)
*/
static inline void sched_info_queued(struct rq *rq, struct task_struct *t)
{
- if (unlikely(sched_info_on()))
+ if (unlikely(sched_info_on())) {
if (!t->sched_info.last_queued)
t->sched_info.last_queued = rq_clock(rq);
+ }
}
/*
@@ -127,8 +122,7 @@ static inline void sched_info_queued(struct rq *rq, struct task_struct *t)
*/
static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
{
- unsigned long long delta = rq_clock(rq) -
- t->sched_info.last_arrival;
+ unsigned long long delta = rq_clock(rq) - t->sched_info.last_arrival;
rq_sched_info_depart(rq, delta);
@@ -142,11 +136,10 @@ static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
* the idle task.) We are only called when prev != next.
*/
static inline void
-__sched_info_switch(struct rq *rq,
- struct task_struct *prev, struct task_struct *next)
+__sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next)
{
/*
- * prev now departs the cpu. It's not interesting to record
+ * prev now departs the CPU. It's not interesting to record
* stats about how efficient we were at scheduling the idle
* process, however.
*/
@@ -156,18 +149,19 @@ __sched_info_switch(struct rq *rq,
if (next != rq->idle)
sched_info_arrive(rq, next);
}
+
static inline void
-sched_info_switch(struct rq *rq,
- struct task_struct *prev, struct task_struct *next)
+sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next)
{
if (unlikely(sched_info_on()))
__sched_info_switch(rq, prev, next);
}
-#else
-#define sched_info_queued(rq, t) do { } while (0)
-#define sched_info_reset_dequeued(t) do { } while (0)
-#define sched_info_dequeued(rq, t) do { } while (0)
-#define sched_info_depart(rq, t) do { } while (0)
-#define sched_info_arrive(rq, next) do { } while (0)
-#define sched_info_switch(rq, t, next) do { } while (0)
+
+#else /* !CONFIG_SCHED_INFO: */
+# define sched_info_queued(rq, t) do { } while (0)
+# define sched_info_reset_dequeued(t) do { } while (0)
+# define sched_info_dequeued(rq, t) do { } while (0)
+# define sched_info_depart(rq, t) do { } while (0)
+# define sched_info_arrive(rq, next) do { } while (0)
+# define sched_info_switch(rq, t, next) do { } while (0)
#endif /* CONFIG_SCHED_INFO */