aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/loadavg.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/loadavg.c')
-rw-r--r--kernel/sched/loadavg.c34
1 files changed, 15 insertions, 19 deletions
diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c
index 89a989e4d758..a171c1258109 100644
--- a/kernel/sched/loadavg.c
+++ b/kernel/sched/loadavg.c
@@ -6,10 +6,6 @@
* figure. Its a silly number but people think its important. We go through
* great pains to make it work on big machines and tickless kernels.
*/
-
-#include <linux/export.h>
-#include <linux/sched/loadavg.h>
-
#include "sched.h"
/*
@@ -32,29 +28,29 @@
* Due to a number of reasons the above turns in the mess below:
*
* - for_each_possible_cpu() is prohibitively expensive on machines with
- * serious number of cpus, therefore we need to take a distributed approach
+ * serious number of CPUs, therefore we need to take a distributed approach
* to calculating nr_active.
*
* \Sum_i x_i(t) = \Sum_i x_i(t) - x_i(t_0) | x_i(t_0) := 0
* = \Sum_i { \Sum_j=1 x_i(t_j) - x_i(t_j-1) }
*
* So assuming nr_active := 0 when we start out -- true per definition, we
- * can simply take per-cpu deltas and fold those into a global accumulate
+ * can simply take per-CPU deltas and fold those into a global accumulate
* to obtain the same result. See calc_load_fold_active().
*
- * Furthermore, in order to avoid synchronizing all per-cpu delta folding
+ * Furthermore, in order to avoid synchronizing all per-CPU delta folding
* across the machine, we assume 10 ticks is sufficient time for every
- * cpu to have completed this task.
+ * CPU to have completed this task.
*
* This places an upper-bound on the IRQ-off latency of the machine. Then
* again, being late doesn't loose the delta, just wrecks the sample.
*
- * - cpu_rq()->nr_uninterruptible isn't accurately tracked per-cpu because
- * this would add another cross-cpu cacheline miss and atomic operation
- * to the wakeup path. Instead we increment on whatever cpu the task ran
- * when it went into uninterruptible state and decrement on whatever cpu
+ * - cpu_rq()->nr_uninterruptible isn't accurately tracked per-CPU because
+ * this would add another cross-CPU cacheline miss and atomic operation
+ * to the wakeup path. Instead we increment on whatever CPU the task ran
+ * when it went into uninterruptible state and decrement on whatever CPU
* did the wakeup. This means that only the sum of nr_uninterruptible over
- * all cpus yields the correct result.
+ * all CPUs yields the correct result.
*
* This covers the NO_HZ=n code, for extra head-aches, see the comment below.
*/
@@ -115,11 +111,11 @@ calc_load(unsigned long load, unsigned long exp, unsigned long active)
* Handle NO_HZ for the global load-average.
*
* Since the above described distributed algorithm to compute the global
- * load-average relies on per-cpu sampling from the tick, it is affected by
+ * load-average relies on per-CPU sampling from the tick, it is affected by
* NO_HZ.
*
* The basic idea is to fold the nr_active delta into a global NO_HZ-delta upon
- * entering NO_HZ state such that we can include this as an 'extra' cpu delta
+ * entering NO_HZ state such that we can include this as an 'extra' CPU delta
* when we read the global state.
*
* Obviously reality has to ruin such a delightfully simple scheme:
@@ -146,9 +142,9 @@ calc_load(unsigned long load, unsigned long exp, unsigned long active)
* busy state.
*
* This is solved by pushing the window forward, and thus skipping the
- * sample, for this cpu (effectively using the NO_HZ-delta for this cpu which
+ * sample, for this CPU (effectively using the NO_HZ-delta for this CPU which
* was in effect at the time the window opened). This also solves the issue
- * of having to deal with a cpu having been in NO_HZ for multiple LOAD_FREQ
+ * of having to deal with a CPU having been in NO_HZ for multiple LOAD_FREQ
* intervals.
*
* When making the ILB scale, we should try to pull this in as well.
@@ -299,7 +295,7 @@ calc_load_n(unsigned long load, unsigned long exp,
}
/*
- * NO_HZ can leave us missing all per-cpu ticks calling
+ * NO_HZ can leave us missing all per-CPU ticks calling
* calc_load_fold_active(), but since a NO_HZ CPU folds its delta into
* calc_load_nohz per calc_load_nohz_start(), all we need to do is fold
* in the pending NO_HZ delta if our NO_HZ period crossed a load cycle boundary.
@@ -363,7 +359,7 @@ void calc_global_load(unsigned long ticks)
return;
/*
- * Fold the 'old' NO_HZ-delta to include all NO_HZ cpus.
+ * Fold the 'old' NO_HZ-delta to include all NO_HZ CPUs.
*/
delta = calc_load_nohz_fold();
if (delta)