aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mempolicy.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2020-12-15 22:55:54 -0500
committerMatthew Wilcox (Oracle) <willy@infradead.org>2021-10-18 07:49:40 -0400
commitcc09cb134124a42fbe3bdcebefdc54e286d8f3e5 (patch)
tree9b4d5703a69507091b2c689a1f111088c19592ec /mm/mempolicy.c
parentmm/lru: Add folio_add_lru() (diff)
downloadlinux-dev-cc09cb134124a42fbe3bdcebefdc54e286d8f3e5.tar.xz
linux-dev-cc09cb134124a42fbe3bdcebefdc54e286d8f3e5.zip
mm/page_alloc: Add folio allocation functions
The __folio_alloc(), __folio_alloc_node() and folio_alloc() functions are mostly for type safety, but they also ensure that the page allocator allocates a compound page and initialises the deferred list if the page is large enough to have one. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Acked-by: Vlastimil Babka <vbabka@suse.cz>
Diffstat (limited to 'mm/mempolicy.c')
-rw-r--r--mm/mempolicy.c10
1 files changed, 10 insertions, 0 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 1592b081c58e..251df91ddc80 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2202,6 +2202,16 @@ struct page *alloc_pages(gfp_t gfp, unsigned order)
}
EXPORT_SYMBOL(alloc_pages);
+struct folio *folio_alloc(gfp_t gfp, unsigned order)
+{
+ struct page *page = alloc_pages(gfp | __GFP_COMP, order);
+
+ if (page && order > 1)
+ prep_transhuge_page(page);
+ return (struct folio *)page;
+}
+EXPORT_SYMBOL(folio_alloc);
+
int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
{
struct mempolicy *pol = mpol_dup(vma_policy(src));