aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mmzone.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r--include/linux/mmzone.h107
1 files changed, 83 insertions, 24 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 70394cabaf4e..bda20282746b 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -215,8 +215,9 @@ enum node_stat_item {
NR_INACTIVE_FILE, /* " " " " " */
NR_ACTIVE_FILE, /* " " " " " */
NR_UNEVICTABLE, /* " " " " " */
- NR_SLAB_RECLAIMABLE,
- NR_SLAB_UNRECLAIMABLE,
+ NR_SLAB_RECLAIMABLE, /* Please do not reorder this item */
+ NR_SLAB_UNRECLAIMABLE, /* and this one without looking at
+ * memcg_flush_percpu_vmstats() first. */
NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
WORKINGSET_NODES,
@@ -234,6 +235,8 @@ enum node_stat_item {
NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */
NR_SHMEM_THPS,
NR_SHMEM_PMDMAPPED,
+ NR_FILE_THPS,
+ NR_FILE_PMDMAPPED,
NR_ANON_THPS,
NR_UNSTABLE_NFS, /* NFS unstable pages */
NR_VMSCAN_WRITE,
@@ -676,6 +679,14 @@ struct zonelist {
extern struct page *mem_map;
#endif
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+struct deferred_split {
+ spinlock_t split_queue_lock;
+ struct list_head split_queue;
+ unsigned long split_queue_len;
+};
+#endif
+
/*
* On NUMA machines, each NUMA node would have a pg_data_t to describe
* it's memory layout. On UMA machines there is a single pglist_data which
@@ -755,9 +766,7 @@ typedef struct pglist_data {
#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- spinlock_t split_queue_lock;
- struct list_head split_queue;
- unsigned long split_queue_len;
+ struct deferred_split deferred_split_queue;
#endif
/* Fields commonly accessed by the page reclaim scanner */
@@ -855,18 +864,6 @@ static inline int local_memory_node(int node_id) { return node_id; };
*/
#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
-#ifdef CONFIG_ZONE_DEVICE
-static inline bool is_dev_zone(const struct zone *zone)
-{
- return zone_idx(zone) == ZONE_DEVICE;
-}
-#else
-static inline bool is_dev_zone(const struct zone *zone)
-{
- return false;
-}
-#endif
-
/*
* Returns true if a zone has pages managed by the buddy allocator.
* All the reclaim decisions have to use this function rather than
@@ -1160,6 +1157,29 @@ static inline unsigned long section_nr_to_pfn(unsigned long sec)
#define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
+#define SUBSECTION_SHIFT 21
+
+#define PFN_SUBSECTION_SHIFT (SUBSECTION_SHIFT - PAGE_SHIFT)
+#define PAGES_PER_SUBSECTION (1UL << PFN_SUBSECTION_SHIFT)
+#define PAGE_SUBSECTION_MASK (~(PAGES_PER_SUBSECTION-1))
+
+#if SUBSECTION_SHIFT > SECTION_SIZE_BITS
+#error Subsection size exceeds section size
+#else
+#define SUBSECTIONS_PER_SECTION (1UL << (SECTION_SIZE_BITS - SUBSECTION_SHIFT))
+#endif
+
+#define SUBSECTION_ALIGN_UP(pfn) ALIGN((pfn), PAGES_PER_SUBSECTION)
+#define SUBSECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SUBSECTION_MASK)
+
+struct mem_section_usage {
+ DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION);
+ /* See declaration of similar field in struct zone */
+ unsigned long pageblock_flags[0];
+};
+
+void subsection_map_init(unsigned long pfn, unsigned long nr_pages);
+
struct page;
struct page_ext;
struct mem_section {
@@ -1177,8 +1197,7 @@ struct mem_section {
*/
unsigned long section_mem_map;
- /* See declaration of similar field in struct zone */
- unsigned long *pageblock_flags;
+ struct mem_section_usage *usage;
#ifdef CONFIG_PAGE_EXTENSION
/*
* If SPARSEMEM, pgdat doesn't have page_ext pointer. We use
@@ -1209,6 +1228,11 @@ extern struct mem_section **mem_section;
extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
#endif
+static inline unsigned long *section_to_usemap(struct mem_section *ms)
+{
+ return ms->usage->pageblock_flags;
+}
+
static inline struct mem_section *__nr_to_section(unsigned long nr)
{
#ifdef CONFIG_SPARSEMEM_EXTREME
@@ -1219,8 +1243,8 @@ static inline struct mem_section *__nr_to_section(unsigned long nr)
return NULL;
return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
}
-extern int __section_nr(struct mem_section* ms);
-extern unsigned long usemap_size(void);
+extern unsigned long __section_nr(struct mem_section *ms);
+extern size_t mem_section_usage_size(void);
/*
* We use the lower bits of the mem_map pointer to store
@@ -1238,7 +1262,8 @@ extern unsigned long usemap_size(void);
#define SECTION_MARKED_PRESENT (1UL<<0)
#define SECTION_HAS_MEM_MAP (1UL<<1)
#define SECTION_IS_ONLINE (1UL<<2)
-#define SECTION_MAP_LAST_BIT (1UL<<3)
+#define SECTION_IS_EARLY (1UL<<3)
+#define SECTION_MAP_LAST_BIT (1UL<<4)
#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
#define SECTION_NID_SHIFT 3
@@ -1264,6 +1289,11 @@ static inline int valid_section(struct mem_section *section)
return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
}
+static inline int early_section(struct mem_section *section)
+{
+ return (section && (section->section_mem_map & SECTION_IS_EARLY));
+}
+
static inline int valid_section_nr(unsigned long nr)
{
return valid_section(__nr_to_section(nr));
@@ -1291,14 +1321,42 @@ static inline struct mem_section *__pfn_to_section(unsigned long pfn)
return __nr_to_section(pfn_to_section_nr(pfn));
}
-extern int __highest_present_section_nr;
+extern unsigned long __highest_present_section_nr;
+
+static inline int subsection_map_index(unsigned long pfn)
+{
+ return (pfn & ~(PAGE_SECTION_MASK)) / PAGES_PER_SUBSECTION;
+}
+
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
+{
+ int idx = subsection_map_index(pfn);
+
+ return test_bit(idx, ms->usage->subsection_map);
+}
+#else
+static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
+{
+ return 1;
+}
+#endif
#ifndef CONFIG_HAVE_ARCH_PFN_VALID
static inline int pfn_valid(unsigned long pfn)
{
+ struct mem_section *ms;
+
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
return 0;
- return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
+ ms = __nr_to_section(pfn_to_section_nr(pfn));
+ if (!valid_section(ms))
+ return 0;
+ /*
+ * Traditionally early sections always returned pfn_valid() for
+ * the entire section-sized span.
+ */
+ return early_section(ms) || pfn_section_valid(ms, pfn);
}
#endif
@@ -1330,6 +1388,7 @@ void sparse_init(void);
#define sparse_init() do {} while (0)
#define sparse_index_init(_sec, _nid) do {} while (0)
#define pfn_present pfn_valid
+#define subsection_map_init(_pfn, _nr_pages) do {} while (0)
#endif /* CONFIG_SPARSEMEM */
/*