aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWei Yang <richard.weiyang@gmail.com>2020-06-03 15:58:36 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-06-03 20:09:44 -0700
commit534fe5e3c44f93ddd9daf7bb59ec2950583d0522 (patch)
tree737513c454b9fe00137495547003f5d5435cdf9f
parentmm/page_alloc.c: rename free_pages_check_bad() to check_free_page_bad() (diff)
downloadlinux-dev-534fe5e3c44f93ddd9daf7bb59ec2950583d0522.tar.xz
linux-dev-534fe5e3c44f93ddd9daf7bb59ec2950583d0522.zip
mm/page_alloc.c: rename free_pages_check() to check_free_page()
free_pages_check() is the counterpart of check_new_page(). Rename it to use the same naming convention. Signed-off-by: Wei Yang <richard.weiyang@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Cc: Anshuman Khandual <anshuman.khandual@arm.com> Cc: David Hildenbrand <david@redhat.com> Cc: David Rientjes <rientjes@google.com> Cc: Michal Hocko <mhocko@suse.com> Link: http://lkml.kernel.org/r/20200411220357.9636-5-richard.weiyang@gmail.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/page_alloc.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 773c1005a662..57c497d32738 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1089,7 +1089,7 @@ static void check_free_page_bad(struct page *page)
bad_page(page, bad_reason);
}
-static inline int free_pages_check(struct page *page)
+static inline int check_free_page(struct page *page)
{
if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
return 0;
@@ -1181,7 +1181,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
for (i = 1; i < (1 << order); i++) {
if (compound)
bad += free_tail_pages_check(page, page + i);
- if (unlikely(free_pages_check(page + i))) {
+ if (unlikely(check_free_page(page + i))) {
bad++;
continue;
}
@@ -1193,7 +1193,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
if (memcg_kmem_enabled() && PageKmemcg(page))
__memcg_kmem_uncharge_page(page, order);
if (check_free)
- bad += free_pages_check(page);
+ bad += check_free_page(page);
if (bad)
return false;
@@ -1240,7 +1240,7 @@ static bool free_pcp_prepare(struct page *page)
static bool bulkfree_pcp_prepare(struct page *page)
{
if (debug_pagealloc_enabled_static())
- return free_pages_check(page);
+ return check_free_page(page);
else
return false;
}
@@ -1261,7 +1261,7 @@ static bool free_pcp_prepare(struct page *page)
static bool bulkfree_pcp_prepare(struct page *page)
{
- return free_pages_check(page);
+ return check_free_page(page);
}
#endif /* CONFIG_DEBUG_VM */