aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2021-04-29 23:01:48 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-04-30 11:20:43 -0700
commit0f87d9d30f21390dd71114f30e63038980e6eb3f (patch)
treeaf3d4e1e8f488534cd51fb644ca15f805bfd4127 /mm/page_alloc.c
parentmm/page_alloc: add a bulk page allocator (diff)
downloadlinux-dev-0f87d9d30f21390dd71114f30e63038980e6eb3f.tar.xz
linux-dev-0f87d9d30f21390dd71114f30e63038980e6eb3f.zip
mm/page_alloc: add an array-based interface to the bulk page allocator
The proposed callers for the bulk allocator store pages from the bulk allocator in an array. This patch adds an array-based interface to the API to avoid multiple list iterations. The page list interface is preserved to avoid requiring all users of the bulk API to allocate and manage enough storage to store the pages. [akpm@linux-foundation.org: remove now unused local `allocated'] Link: https://lkml.kernel.org/r/20210325114228.27719-4-mgorman@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Reviewed-by: Alexander Lobakin <alobakin@pm.me> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Alexander Duyck <alexander.duyck@gmail.com> Cc: Christoph Hellwig <hch@infradead.org> Cc: Chuck Lever <chuck.lever@oracle.com> Cc: David Miller <davem@davemloft.net> Cc: Ilias Apalodimas <ilias.apalodimas@linaro.org> Cc: Jesper Dangaard Brouer <brouer@redhat.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to '')
-rw-r--r--mm/page_alloc.c60
1 files changed, 44 insertions, 16 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d8209d48a543..e240704b5d39 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5007,21 +5007,29 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
}
/*
- * __alloc_pages_bulk - Allocate a number of order-0 pages to a list
+ * __alloc_pages_bulk - Allocate a number of order-0 pages to a list or array
* @gfp: GFP flags for the allocation
* @preferred_nid: The preferred NUMA node ID to allocate from
* @nodemask: Set of nodes to allocate from, may be NULL
- * @nr_pages: The number of pages desired on the list
- * @page_list: List to store the allocated pages
+ * @nr_pages: The number of pages desired on the list or array
+ * @page_list: Optional list to store the allocated pages
+ * @page_array: Optional array to store the pages
*
* This is a batched version of the page allocator that attempts to
- * allocate nr_pages quickly and add them to a list.
+ * allocate nr_pages quickly. Pages are added to page_list if page_list
+ * is not NULL, otherwise it is assumed that the page_array is valid.
*
- * Returns the number of pages on the list.
+ * For lists, nr_pages is the number of pages that should be allocated.
+ *
+ * For arrays, only NULL elements are populated with pages and nr_pages
+ * is the maximum number of pages that will be stored in the array.
+ *
+ * Returns the number of pages on the list or array.
*/
unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
nodemask_t *nodemask, int nr_pages,
- struct list_head *page_list)
+ struct list_head *page_list,
+ struct page **page_array)
{
struct page *page;
unsigned long flags;
@@ -5032,13 +5040,20 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
struct alloc_context ac;
gfp_t alloc_gfp;
unsigned int alloc_flags = ALLOC_WMARK_LOW;
- int allocated = 0;
+ int nr_populated = 0;
if (WARN_ON_ONCE(nr_pages <= 0))
return 0;
+ /*
+ * Skip populated array elements to determine if any pages need
+ * to be allocated before disabling IRQs.
+ */
+ while (page_array && page_array[nr_populated] && nr_populated < nr_pages)
+ nr_populated++;
+
/* Use the single page allocator for one page. */
- if (nr_pages == 1)
+ if (nr_pages - nr_populated == 1)
goto failed;
/* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */
@@ -5082,12 +5097,19 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
pcp = &this_cpu_ptr(zone->pageset)->pcp;
pcp_list = &pcp->lists[ac.migratetype];
- while (allocated < nr_pages) {
+ while (nr_populated < nr_pages) {
+
+ /* Skip existing pages */
+ if (page_array && page_array[nr_populated]) {
+ nr_populated++;
+ continue;
+ }
+
page = __rmqueue_pcplist(zone, ac.migratetype, alloc_flags,
pcp, pcp_list);
if (!page) {
/* Try and get at least one page */
- if (!allocated)
+ if (!nr_populated)
goto failed_irq;
break;
}
@@ -5102,13 +5124,16 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
zone_statistics(ac.preferred_zoneref->zone, zone);
prep_new_page(page, 0, gfp, 0);
- list_add(&page->lru, page_list);
- allocated++;
+ if (page_list)
+ list_add(&page->lru, page_list);
+ else
+ page_array[nr_populated] = page;
+ nr_populated++;
}
local_irq_restore(flags);
- return allocated;
+ return nr_populated;
failed_irq:
local_irq_restore(flags);
@@ -5116,11 +5141,14 @@ failed_irq:
failed:
page = __alloc_pages(gfp, 0, preferred_nid, nodemask);
if (page) {
- list_add(&page->lru, page_list);
- allocated = 1;
+ if (page_list)
+ list_add(&page->lru, page_list);
+ else
+ page_array[nr_populated] = page;
+ nr_populated++;
}
- return allocated;
+ return nr_populated;
}
EXPORT_SYMBOL_GPL(__alloc_pages_bulk);