From d30dd8be06a5ae640766b20ea9ae288832bd12ac Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Thu, 28 Jul 2016 15:48:14 -0700 Subject: mm: track NR_KERNEL_STACK in KiB instead of number of stacks Currently, NR_KERNEL_STACK tracks the number of kernel stacks in a zone. This only makes sense if each kernel stack exists entirely in one zone, and allowing vmapped stacks could break this assumption. Since frv has THREAD_SIZE < PAGE_SIZE, we need to track kernel stack allocations in a unit that divides both THREAD_SIZE and PAGE_SIZE on all architectures. Keep it simple and use KiB. Link: http://lkml.kernel.org/r/083c71e642c5fa5f1b6898902e1b2db7b48940d4.1468523549.git.luto@kernel.org Signed-off-by: Andy Lutomirski Cc: Vladimir Davydov Acked-by: Johannes Weiner Cc: Michal Hocko Reviewed-by: Josh Poimboeuf Reviewed-by: Vladimir Davydov Acked-by: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/base/node.c | 3 +-- fs/proc/meminfo.c | 2 +- include/linux/mmzone.h | 2 +- kernel/fork.c | 3 ++- mm/page_alloc.c | 3 +-- 5 files changed, 6 insertions(+), 7 deletions(-) diff --git a/drivers/base/node.c b/drivers/base/node.c index 264cc214c4df..29cd96661b30 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -124,8 +124,7 @@ static ssize_t node_read_meminfo(struct device *dev, nid, K(node_page_state(pgdat, NR_FILE_MAPPED)), nid, K(node_page_state(pgdat, NR_ANON_MAPPED)), nid, K(i.sharedram), - nid, sum_zone_node_page_state(nid, NR_KERNEL_STACK) * - THREAD_SIZE / 1024, + nid, sum_zone_node_page_state(nid, NR_KERNEL_STACK_KB), nid, K(sum_zone_node_page_state(nid, NR_PAGETABLE)), nid, K(node_page_state(pgdat, NR_UNSTABLE_NFS)), nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)), diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index c1fdcc1a907a..09e18fdf61e5 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c @@ -147,7 +147,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v) global_page_state(NR_SLAB_UNRECLAIMABLE)), K(global_page_state(NR_SLAB_RECLAIMABLE)), K(global_page_state(NR_SLAB_UNRECLAIMABLE)), - global_page_state(NR_KERNEL_STACK) * THREAD_SIZE / 1024, + global_page_state(NR_KERNEL_STACK_KB), K(global_page_state(NR_PAGETABLE)), #ifdef CONFIG_QUICKLIST K(quicklist_total_size()), diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index ca0fbc483441..f2e4e90621ec 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -121,7 +121,7 @@ enum zone_stat_item { NR_SLAB_RECLAIMABLE, NR_SLAB_UNRECLAIMABLE, NR_PAGETABLE, /* used for pagetables */ - NR_KERNEL_STACK, + NR_KERNEL_STACK_KB, /* measured in KiB */ /* Second 128 byte cacheline */ NR_BOUNCE, #if IS_ENABLED(CONFIG_ZSMALLOC) diff --git a/kernel/fork.c b/kernel/fork.c index de21f25e0d2c..af3637e0ee52 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -225,7 +225,8 @@ static void account_kernel_stack(unsigned long *stack, int account) { struct zone *zone = page_zone(virt_to_page(stack)); - mod_zone_page_state(zone, NR_KERNEL_STACK, account); + mod_zone_page_state(zone, NR_KERNEL_STACK_KB, + THREAD_SIZE / 1024 * account); } void free_task(struct task_struct *tsk) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index dfdb608f7b3d..c281125b2349 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4359,8 +4359,7 @@ void show_free_areas(unsigned int filter) K(zone_page_state(zone, NR_MLOCK)), K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)), K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)), - zone_page_state(zone, NR_KERNEL_STACK) * - THREAD_SIZE / 1024, + zone_page_state(zone, NR_KERNEL_STACK_KB), K(zone_page_state(zone, NR_PAGETABLE)), K(zone_page_state(zone, NR_BOUNCE)), K(free_pcp), -- cgit v1.2.3-59-g8ed1b