aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/highmem.c4
-rw-r--r--mm/huge_memory.c11
-rw-r--r--mm/list_lru.c6
-rw-r--r--mm/mempolicy.c39
-rw-r--r--mm/migrate.c80
-rw-r--r--mm/mremap.c3
-rw-r--r--mm/page_vma_mapped.c6
7 files changed, 64 insertions, 85 deletions
diff --git a/mm/highmem.c b/mm/highmem.c
index 0cc0c4da7ed9..1a692997fac4 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -624,7 +624,7 @@ void __kmap_local_sched_out(void)
/* With debug all even slots are unmapped and act as guard */
if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL) && !(i & 0x01)) {
- WARN_ON_ONCE(!pte_none(pteval));
+ WARN_ON_ONCE(pte_val(pteval) != 0);
continue;
}
if (WARN_ON_ONCE(pte_none(pteval)))
@@ -661,7 +661,7 @@ void __kmap_local_sched_in(void)
/* With debug all even slots are unmapped and act as guard */
if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL) && !(i & 0x01)) {
- WARN_ON_ONCE(!pte_none(pteval));
+ WARN_ON_ONCE(pte_val(pteval) != 0);
continue;
}
if (WARN_ON_ONCE(pte_none(pteval)))
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 2fe38212e07c..c468fee595ff 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2145,15 +2145,14 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
* pmd against. Otherwise we can end up replacing wrong folio.
*/
VM_BUG_ON(freeze && !folio);
- if (folio) {
- VM_WARN_ON_ONCE(!folio_test_locked(folio));
- if (folio != page_folio(pmd_page(*pmd)))
- goto out;
- }
+ VM_WARN_ON_ONCE(folio && !folio_test_locked(folio));
if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) ||
- is_pmd_migration_entry(*pmd))
+ is_pmd_migration_entry(*pmd)) {
+ if (folio && folio != page_folio(pmd_page(*pmd)))
+ goto out;
__split_huge_pmd_locked(vma, pmd, range.start, freeze);
+ }
out:
spin_unlock(ptl);
diff --git a/mm/list_lru.c b/mm/list_lru.c
index c669d87001a6..ba76428ceece 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -395,12 +395,6 @@ static void memcg_reparent_list_lru_node(struct list_lru *lru, int nid,
struct list_lru_one *src, *dst;
/*
- * If there is no lru entry in this nlru, we can skip it immediately.
- */
- if (!READ_ONCE(nlru->nr_items))
- return;
-
- /*
* Since list_lru_{add,del} may be called under an IRQ-safe lock,
* we have to use IRQ-safe primitives here to avoid deadlock.
*/
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index a2516d31db6c..8c74107a2b15 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1191,8 +1191,10 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
*/
static struct page *new_page(struct page *page, unsigned long start)
{
+ struct folio *dst, *src = page_folio(page);
struct vm_area_struct *vma;
unsigned long address;
+ gfp_t gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL;
vma = find_vma(current->mm, start);
while (vma) {
@@ -1202,24 +1204,19 @@ static struct page *new_page(struct page *page, unsigned long start)
vma = vma->vm_next;
}
- if (PageHuge(page)) {
- return alloc_huge_page_vma(page_hstate(compound_head(page)),
+ if (folio_test_hugetlb(src))
+ return alloc_huge_page_vma(page_hstate(&src->page),
vma, address);
- } else if (PageTransHuge(page)) {
- struct page *thp;
- thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
- HPAGE_PMD_ORDER);
- if (!thp)
- return NULL;
- prep_transhuge_page(thp);
- return thp;
- }
+ if (folio_test_large(src))
+ gfp = GFP_TRANSHUGE;
+
/*
- * if !vma, alloc_page_vma() will use task or system default policy
+ * if !vma, vma_alloc_folio() will use task or system default policy
*/
- return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
- vma, address);
+ dst = vma_alloc_folio(gfp, folio_order(src), vma, address,
+ folio_test_large(src));
+ return &dst->page;
}
#else
@@ -2227,6 +2224,19 @@ out:
}
EXPORT_SYMBOL(alloc_pages_vma);
+struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
+ unsigned long addr, bool hugepage)
+{
+ struct folio *folio;
+
+ folio = (struct folio *)alloc_pages_vma(gfp, order, vma, addr,
+ hugepage);
+ if (folio && order > 1)
+ prep_transhuge_page(&folio->page);
+
+ return folio;
+}
+
/**
* alloc_pages - Allocate pages.
* @gfp: GFP flags.
@@ -2733,6 +2743,7 @@ alloc_new:
mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
if (!mpol_new)
goto err_out;
+ atomic_set(&mpol_new->refcnt, 1);
goto restart;
}
diff --git a/mm/migrate.c b/mm/migrate.c
index de175e2fdba5..6c31ee1e1c9b 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1520,10 +1520,11 @@ out:
struct page *alloc_migration_target(struct page *page, unsigned long private)
{
+ struct folio *folio = page_folio(page);
struct migration_target_control *mtc;
gfp_t gfp_mask;
unsigned int order = 0;
- struct page *new_page = NULL;
+ struct folio *new_folio = NULL;
int nid;
int zidx;
@@ -1531,34 +1532,31 @@ struct page *alloc_migration_target(struct page *page, unsigned long private)
gfp_mask = mtc->gfp_mask;
nid = mtc->nid;
if (nid == NUMA_NO_NODE)
- nid = page_to_nid(page);
+ nid = folio_nid(folio);
- if (PageHuge(page)) {
- struct hstate *h = page_hstate(compound_head(page));
+ if (folio_test_hugetlb(folio)) {
+ struct hstate *h = page_hstate(&folio->page);
gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask);
}
- if (PageTransHuge(page)) {
+ if (folio_test_large(folio)) {
/*
* clear __GFP_RECLAIM to make the migration callback
* consistent with regular THP allocations.
*/
gfp_mask &= ~__GFP_RECLAIM;
gfp_mask |= GFP_TRANSHUGE;
- order = HPAGE_PMD_ORDER;
+ order = folio_order(folio);
}
- zidx = zone_idx(page_zone(page));
+ zidx = zone_idx(folio_zone(folio));
if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
gfp_mask |= __GFP_HIGHMEM;
- new_page = __alloc_pages(gfp_mask, order, nid, mtc->nmask);
-
- if (new_page && PageTransHuge(new_page))
- prep_transhuge_page(new_page);
+ new_folio = __folio_alloc(gfp_mask, order, nid, mtc->nmask);
- return new_page;
+ return &new_folio->page;
}
#ifdef CONFIG_NUMA
@@ -1999,32 +1997,20 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
unsigned long data)
{
int nid = (int) data;
- struct page *newpage;
-
- newpage = __alloc_pages_node(nid,
- (GFP_HIGHUSER_MOVABLE |
- __GFP_THISNODE | __GFP_NOMEMALLOC |
- __GFP_NORETRY | __GFP_NOWARN) &
- ~__GFP_RECLAIM, 0);
-
- return newpage;
-}
-
-static struct page *alloc_misplaced_dst_page_thp(struct page *page,
- unsigned long data)
-{
- int nid = (int) data;
- struct page *newpage;
-
- newpage = alloc_pages_node(nid, (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
- HPAGE_PMD_ORDER);
- if (!newpage)
- goto out;
-
- prep_transhuge_page(newpage);
+ int order = compound_order(page);
+ gfp_t gfp = __GFP_THISNODE;
+ struct folio *new;
+
+ if (order > 0)
+ gfp |= GFP_TRANSHUGE_LIGHT;
+ else {
+ gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY |
+ __GFP_NOWARN;
+ gfp &= ~__GFP_RECLAIM;
+ }
+ new = __folio_alloc_node(gfp, order, nid);
-out:
- return newpage;
+ return &new->page;
}
static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
@@ -2082,23 +2068,9 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
int nr_remaining;
unsigned int nr_succeeded;
LIST_HEAD(migratepages);
- new_page_t *new;
- bool compound;
int nr_pages = thp_nr_pages(page);
/*
- * PTE mapped THP or HugeTLB page can't reach here so the page could
- * be either base page or THP. And it must be head page if it is
- * THP.
- */
- compound = PageTransHuge(page);
-
- if (compound)
- new = alloc_misplaced_dst_page_thp;
- else
- new = alloc_misplaced_dst_page;
-
- /*
* Don't migrate file pages that are mapped in multiple processes
* with execute permissions as they are probably shared libraries.
*/
@@ -2118,9 +2090,9 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
goto out;
list_add(&page->lru, &migratepages);
- nr_remaining = migrate_pages(&migratepages, *new, NULL, node,
- MIGRATE_ASYNC, MR_NUMA_MISPLACED,
- &nr_succeeded);
+ nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
+ NULL, node, MIGRATE_ASYNC,
+ MR_NUMA_MISPLACED, &nr_succeeded);
if (nr_remaining) {
if (!list_empty(&migratepages)) {
list_del(&page->lru);
diff --git a/mm/mremap.c b/mm/mremap.c
index 9d76da79594d..303d3290b938 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -486,6 +486,9 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
pmd_t *old_pmd, *new_pmd;
pud_t *old_pud, *new_pud;
+ if (!len)
+ return 0;
+
old_end = old_addr + len;
flush_cache_range(vma, old_addr, old_end);
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index 1187f9c1ec5b..14a5cda73dee 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -163,7 +163,8 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
return not_found(pvmw);
if (unlikely(is_vm_hugetlb_page(vma))) {
- unsigned long size = pvmw->nr_pages * PAGE_SIZE;
+ struct hstate *hstate = hstate_vma(vma);
+ unsigned long size = huge_page_size(hstate);
/* The only possible mapping was handled on last iteration */
if (pvmw->pte)
return not_found(pvmw);
@@ -173,8 +174,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
if (!pvmw->pte)
return false;
- pvmw->ptl = huge_pte_lockptr(size_to_hstate(size), mm,
- pvmw->pte);
+ pvmw->ptl = huge_pte_lockptr(hstate, mm, pvmw->pte);
spin_lock(pvmw->ptl);
if (!check_pte(pvmw))
return not_found(pvmw);