aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mmzone.h
diff options
context:
space:
mode:
authorMarek Szyprowski <m.szyprowski@samsung.com>2012-01-25 12:49:24 +0100
committerMarek Szyprowski <m.szyprowski@samsung.com>2012-05-21 15:09:36 +0200
commit49f223a9cd96c7293d7258ff88c2bdf83065f69c (patch)
tree4a141cbe4132ab2a5edfbc44165d091bb2289c75 /include/linux/mmzone.h
parentmm: extract reclaim code from __alloc_pages_direct_reclaim() (diff)
downloadlinux-dev-49f223a9cd96c7293d7258ff88c2bdf83065f69c.tar.xz
linux-dev-49f223a9cd96c7293d7258ff88c2bdf83065f69c.zip
mm: trigger page reclaim in alloc_contig_range() to stabilise watermarks
alloc_contig_range() performs memory allocation so it also should keep track on keeping the correct level of memory watermarks. This commit adds a call to *_slowpath style reclaim to grab enough pages to make sure that the final collection of contiguous pages from freelists will not starve the system. Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com> CC: Michal Nazarewicz <mina86@mina86.com> Tested-by: Rob Clark <rob.clark@linaro.org> Tested-by: Ohad Ben-Cohen <ohad@wizery.com> Tested-by: Benjamin Gaignard <benjamin.gaignard@linaro.org> Tested-by: Robert Nelson <robertcnelson@gmail.com> Tested-by: Barry Song <Baohua.Song@csr.com>
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r--include/linux/mmzone.h9
1 files changed, 9 insertions, 0 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 8c1335f3c3a3..26f2040b8b04 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -63,8 +63,10 @@ enum {
#ifdef CONFIG_CMA
# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
+# define cma_wmark_pages(zone) zone->min_cma_pages
#else
# define is_migrate_cma(migratetype) false
+# define cma_wmark_pages(zone) 0
#endif
#define for_each_migratetype_order(order, type) \
@@ -371,6 +373,13 @@ struct zone {
/* see spanned/present_pages for more description */
seqlock_t span_seqlock;
#endif
+#ifdef CONFIG_CMA
+ /*
+ * CMA needs to increase watermark levels during the allocation
+ * process to make sure that the system is not starved.
+ */
+ unsigned long min_cma_pages;
+#endif
struct free_area free_area[MAX_ORDER];
#ifndef CONFIG_SPARSEMEM