aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMinchan Kim <minchan@kernel.org>2021-05-04 18:37:19 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-05-05 11:27:24 -0700
commitbbb269206f3c914d4f23e023de4ec020abea6d1b (patch)
treef3d6ed6e3faae4cffecba3e3e14eb2af019100e4
parentRevert "mm: migrate: skip shared exec THP for NUMA balancing" (diff)
downloadlinux-dev-bbb269206f3c914d4f23e023de4ec020abea6d1b.tar.xz
linux-dev-bbb269206f3c914d4f23e023de4ec020abea6d1b.zip
mm: vmstat: add cma statistics
Since CMA is used more widely, it's worth to have CMA allocation statistics into vmstat. With it, we could know how agressively system uses cma allocation and how often it fails. Link: https://lkml.kernel.org/r/20210302183346.3707237-1-minchan@kernel.org Signed-off-by: Minchan Kim <minchan@kernel.org> Reviewed-by: John Hubbard <jhubbard@nvidia.com> Cc: John Dias <joaodias@google.com> Cc: Suren Baghdasaryan <surenb@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/vm_event_item.h4
-rw-r--r--mm/cma.c12
-rw-r--r--mm/vmstat.c4
3 files changed, 17 insertions, 3 deletions
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 18e75974d4e3..21d7c7f72f1c 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -71,6 +71,10 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
#ifdef CONFIG_HUGETLB_PAGE
HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
#endif
+#ifdef CONFIG_CMA
+ CMA_ALLOC_SUCCESS,
+ CMA_ALLOC_FAIL,
+#endif
UNEVICTABLE_PGCULLED, /* culled to noreclaim list */
UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */
UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */
diff --git a/mm/cma.c b/mm/cma.c
index acd6991f77a0..b44a71eb3174 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -435,13 +435,13 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
int ret = -ENOMEM;
if (!cma || !cma->count || !cma->bitmap)
- return NULL;
+ goto out;
pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma,
count, align);
if (!count)
- return NULL;
+ goto out;
mask = cma_bitmap_aligned_mask(cma, align);
offset = cma_bitmap_aligned_offset(cma, align);
@@ -449,7 +449,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
bitmap_count = cma_bitmap_pages_to_bits(cma, count);
if (bitmap_count > bitmap_maxno)
- return NULL;
+ goto out;
for (;;) {
spin_lock_irq(&cma->lock);
@@ -506,6 +506,12 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
}
pr_debug("%s(): returned %p\n", __func__, page);
+out:
+ if (page)
+ count_vm_event(CMA_ALLOC_SUCCESS);
+ else
+ count_vm_event(CMA_ALLOC_FAIL);
+
return page;
}
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 74b2c374b86c..49a8456ec079 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1313,6 +1313,10 @@ const char * const vmstat_text[] = {
"htlb_buddy_alloc_success",
"htlb_buddy_alloc_fail",
#endif
+#ifdef CONFIG_CMA
+ "cma_alloc_success",
+ "cma_alloc_fail",
+#endif
"unevictable_pgs_culled",
"unevictable_pgs_scanned",
"unevictable_pgs_rescued",