aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorKefeng Wang <wangkefeng.wang@huawei.com>2022-09-07 14:08:43 +0800
committerAndrew Morton <akpm@linux-foundation.org>2022-10-03 14:03:04 -0700
commit5f7fa13fa858c17580ed513bd5e0a4b36d68fdd6 (patch)
tree8204ee6892cde25ee0b4831475a3a5c1c231f98e
parentmm: reuse pageblock_start/end_pfn() macro (diff)
downloadwireguard-linux-5f7fa13fa858c17580ed513bd5e0a4b36d68fdd6.tar.xz
wireguard-linux-5f7fa13fa858c17580ed513bd5e0a4b36d68fdd6.zip
mm: add pageblock_align() macro
Add pageblock_align() macro and use it to simplify code. Link: https://lkml.kernel.org/r/20220907060844.126891-2-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> Acked-by: Mike Rapoport <rppt@linux.ibm.com> Reviewed-by: David Hildenbrand <david@redhat.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--include/linux/pageblock-flags.h1
-rw-r--r--mm/memblock.c4
-rw-r--r--mm/page_isolation.c4
3 files changed, 5 insertions, 4 deletions
diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
index a09b7fe6bbf8..293c76630fa8 100644
--- a/include/linux/pageblock-flags.h
+++ b/include/linux/pageblock-flags.h
@@ -53,6 +53,7 @@ extern unsigned int pageblock_order;
#endif /* CONFIG_HUGETLB_PAGE */
#define pageblock_nr_pages (1UL << pageblock_order)
+#define pageblock_align(pfn) ALIGN((pfn), pageblock_nr_pages)
#define pageblock_start_pfn(pfn) ALIGN_DOWN((pfn), pageblock_nr_pages)
#define pageblock_end_pfn(pfn) ALIGN((pfn) + 1, pageblock_nr_pages)
diff --git a/mm/memblock.c b/mm/memblock.c
index 46fe7575f03c..511d4783dcf1 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -2014,12 +2014,12 @@ static void __init free_unused_memmap(void)
* presume that there are no holes in the memory map inside
* a pageblock
*/
- prev_end = ALIGN(end, pageblock_nr_pages);
+ prev_end = pageblock_align(end);
}
#ifdef CONFIG_SPARSEMEM
if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) {
- prev_end = ALIGN(end, pageblock_nr_pages);
+ prev_end = pageblock_align(end);
free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
}
#endif
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 5819cb9c62f3..fa82faa07daf 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -533,7 +533,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
struct page *page;
/* isolation is done at page block granularity */
unsigned long isolate_start = pageblock_start_pfn(start_pfn);
- unsigned long isolate_end = ALIGN(end_pfn, pageblock_nr_pages);
+ unsigned long isolate_end = pageblock_align(end_pfn);
int ret;
bool skip_isolation = false;
@@ -580,7 +580,7 @@ void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
unsigned long pfn;
struct page *page;
unsigned long isolate_start = pageblock_start_pfn(start_pfn);
- unsigned long isolate_end = ALIGN(end_pfn, pageblock_nr_pages);
+ unsigned long isolate_end = pageblock_align(end_pfn);
for (pfn = isolate_start;
pfn < isolate_end;