aboutsummaryrefslogtreecommitdiffstats
path: root/tools/testing/selftests/cgroup
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2021-04-29 22:56:33 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-04-30 11:20:38 -0700
commit4bbcc5a41c5449f6a67edb3fbc2dccae9c6724db (patch)
tree5d83c7d6abc40ff6a9bf1c70b1157b634a67afcd /tools/testing/selftests/cgroup
parentmm: memcontrol: consolidate lruvec stat flushing (diff)
downloadlinux-dev-4bbcc5a41c5449f6a67edb3fbc2dccae9c6724db.tar.xz
linux-dev-4bbcc5a41c5449f6a67edb3fbc2dccae9c6724db.zip
kselftests: cgroup: update kmem test for new vmstat implementation
With memcg having switched to rstat, memory.stat output is precise. Update the cgroup selftest to reflect the expectations and error tolerances of the new implementation. Also add newly tracked types of memory to the memory.stat side of the equation, since they're included in memory.current and could throw false positives. Link: https://lkml.kernel.org/r/20210209163304.77088-9-hannes@cmpxchg.org Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Shakeel Butt <shakeelb@google.com> Reviewed-by: Michal Koutný <mkoutny@suse.com> Acked-by: Roman Gushchin <guro@fb.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Tejun Heo <tj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'tools/testing/selftests/cgroup')
-rw-r--r--tools/testing/selftests/cgroup/test_kmem.c22
1 files changed, 14 insertions, 8 deletions
diff --git a/tools/testing/selftests/cgroup/test_kmem.c b/tools/testing/selftests/cgroup/test_kmem.c
index 0941aa16157e..22b31ebb3513 100644
--- a/tools/testing/selftests/cgroup/test_kmem.c
+++ b/tools/testing/selftests/cgroup/test_kmem.c
@@ -19,12 +19,12 @@
/*
- * Memory cgroup charging and vmstat data aggregation is performed using
- * percpu batches 32 pages big (look at MEMCG_CHARGE_BATCH). So the maximum
- * discrepancy between charge and vmstat entries is number of cpus multiplied
- * by 32 pages multiplied by 2.
+ * Memory cgroup charging is performed using percpu batches 32 pages
+ * big (look at MEMCG_CHARGE_BATCH), whereas memory.stat is exact. So
+ * the maximum discrepancy between charge and vmstat entries is number
+ * of cpus multiplied by 32 pages.
*/
-#define MAX_VMSTAT_ERROR (4096 * 32 * 2 * get_nprocs())
+#define MAX_VMSTAT_ERROR (4096 * 32 * get_nprocs())
static int alloc_dcache(const char *cgroup, void *arg)
@@ -162,7 +162,7 @@ static int cg_run_in_subcgroups(const char *parent,
*/
static int test_kmem_memcg_deletion(const char *root)
{
- long current, slab, anon, file, kernel_stack, sum;
+ long current, slab, anon, file, kernel_stack, pagetables, percpu, sock, sum;
int ret = KSFT_FAIL;
char *parent;
@@ -184,11 +184,14 @@ static int test_kmem_memcg_deletion(const char *root)
anon = cg_read_key_long(parent, "memory.stat", "anon ");
file = cg_read_key_long(parent, "memory.stat", "file ");
kernel_stack = cg_read_key_long(parent, "memory.stat", "kernel_stack ");
+ pagetables = cg_read_key_long(parent, "memory.stat", "pagetables ");
+ percpu = cg_read_key_long(parent, "memory.stat", "percpu ");
+ sock = cg_read_key_long(parent, "memory.stat", "sock ");
if (current < 0 || slab < 0 || anon < 0 || file < 0 ||
- kernel_stack < 0)
+ kernel_stack < 0 || pagetables < 0 || percpu < 0 || sock < 0)
goto cleanup;
- sum = slab + anon + file + kernel_stack;
+ sum = slab + anon + file + kernel_stack + pagetables + percpu + sock;
if (abs(sum - current) < MAX_VMSTAT_ERROR) {
ret = KSFT_PASS;
} else {
@@ -198,6 +201,9 @@ static int test_kmem_memcg_deletion(const char *root)
printf("anon = %ld\n", anon);
printf("file = %ld\n", file);
printf("kernel_stack = %ld\n", kernel_stack);
+ printf("pagetables = %ld\n", pagetables);
+ printf("percpu = %ld\n", percpu);
+ printf("sock = %ld\n", sock);
}
cleanup: