aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorBharata B Rao <bharata@amd.com>2021-10-04 16:27:04 +0530
committerPeter Zijlstra <peterz@infradead.org>2021-10-14 13:09:58 +0200
commit5b763a14a5164e4c442e99d186fb39dac489e49b (patch)
treea1f10ea9f81cd1300d92b3d72f36c735f4b0daf9 /kernel/sched
parentsched/numa: Replace hard-coded number by a define in numa_task_group() (diff)
downloadlinux-dev-5b763a14a5164e4c442e99d186fb39dac489e49b.tar.xz
linux-dev-5b763a14a5164e4c442e99d186fb39dac489e49b.zip
sched/numa: Remove the redundant member numa_group::fault_cpus
numa_group::fault_cpus is actually a pointer to the region in numa_group::faults[] where NUMA_CPU stats are located. Remove this redundant member and use numa_group::faults[NUMA_CPU] directly like it is done for similar per-process numa fault stats. There is no functionality change due to this commit. Signed-off-by: Bharata B Rao <bharata@amd.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Mel Gorman <mgorman@suse.de> Link: https://lkml.kernel.org/r/20211004105706.3669-3-bharata@amd.com
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/fair.c12
1 files changed, 5 insertions, 7 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index fc0a0ed63d15..cfbd5ef0b558 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1038,11 +1038,12 @@ struct numa_group {
unsigned long total_faults;
unsigned long max_faults_cpu;
/*
+ * faults[] array is split into two regions: faults_mem and faults_cpu.
+ *
* Faults_cpu is used to decide whether memory should move
* towards the CPU. As a consequence, these stats are weighted
* more by CPU use than by memory faults.
*/
- unsigned long *faults_cpu;
unsigned long faults[];
};
@@ -1216,8 +1217,8 @@ static inline unsigned long group_faults(struct task_struct *p, int nid)
static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)
{
- return group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 0)] +
- group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 1)];
+ return group->faults[task_faults_idx(NUMA_CPU, nid, 0)] +
+ group->faults[task_faults_idx(NUMA_CPU, nid, 1)];
}
static inline unsigned long group_faults_priv(struct numa_group *ng)
@@ -2384,7 +2385,7 @@ static void task_numa_placement(struct task_struct *p)
* is at the beginning of the numa_faults array.
*/
ng->faults[mem_idx] += diff;
- ng->faults_cpu[mem_idx] += f_diff;
+ ng->faults[cpu_idx] += f_diff;
ng->total_faults += diff;
group_faults += ng->faults[mem_idx];
}
@@ -2450,9 +2451,6 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags,
grp->max_faults_cpu = 0;
spin_lock_init(&grp->lock);
grp->gid = p->pid;
- /* Second half of the array tracks nids where faults happen */
- grp->faults_cpu = grp->faults + NR_NUMA_HINT_FAULT_TYPES *
- nr_node_ids;
for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
grp->faults[i] = p->numa_faults[i];