aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2022-04-04 14:35:04 -0400
committerMatthew Wilcox (Oracle) <willy@infradead.org>2022-04-07 09:43:41 -0400
commitffe06786b54039edcecb51a54061ee8d81036a19 (patch)
treebfb6c6b00973c48903c1ac6ba50fc05899a4f469 /mm
parentmm/huge_memory: Avoid calling pmd_page() on a non-leaf PMD (diff)
downloadlinux-dev-ffe06786b54039edcecb51a54061ee8d81036a19.tar.xz
linux-dev-ffe06786b54039edcecb51a54061ee8d81036a19.zip
mm/migrate: Use a folio in alloc_migration_target()
This removes an assumption that a large folio is HPAGE_PMD_ORDER as well as letting us remove the call to prep_transhuge_page() and a few hidden calls to compound_head(). Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Zi Yan <ziy@nvidia.com> Reviewed-by: William Kucharski <william.kucharski@oracle.com>
Diffstat (limited to 'mm')
-rw-r--r--mm/migrate.c22
1 files changed, 10 insertions, 12 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index de175e2fdba5..9894e90db006 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1520,10 +1520,11 @@ out:
struct page *alloc_migration_target(struct page *page, unsigned long private)
{
+ struct folio *folio = page_folio(page);
struct migration_target_control *mtc;
gfp_t gfp_mask;
unsigned int order = 0;
- struct page *new_page = NULL;
+ struct folio *new_folio = NULL;
int nid;
int zidx;
@@ -1531,34 +1532,31 @@ struct page *alloc_migration_target(struct page *page, unsigned long private)
gfp_mask = mtc->gfp_mask;
nid = mtc->nid;
if (nid == NUMA_NO_NODE)
- nid = page_to_nid(page);
+ nid = folio_nid(folio);
- if (PageHuge(page)) {
- struct hstate *h = page_hstate(compound_head(page));
+ if (folio_test_hugetlb(folio)) {
+ struct hstate *h = page_hstate(&folio->page);
gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask);
}
- if (PageTransHuge(page)) {
+ if (folio_test_large(folio)) {
/*
* clear __GFP_RECLAIM to make the migration callback
* consistent with regular THP allocations.
*/
gfp_mask &= ~__GFP_RECLAIM;
gfp_mask |= GFP_TRANSHUGE;
- order = HPAGE_PMD_ORDER;
+ order = folio_order(folio);
}
- zidx = zone_idx(page_zone(page));
+ zidx = zone_idx(folio_zone(folio));
if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
gfp_mask |= __GFP_HIGHMEM;
- new_page = __alloc_pages(gfp_mask, order, nid, mtc->nmask);
-
- if (new_page && PageTransHuge(new_page))
- prep_transhuge_page(new_page);
+ new_folio = __folio_alloc(gfp_mask, order, nid, mtc->nmask);
- return new_page;
+ return &new_folio->page;
}
#ifdef CONFIG_NUMA