diff options
Diffstat (limited to 'include/linux/mmzone.h')
| -rw-r--r-- | include/linux/mmzone.h | 23 | 
1 files changed, 18 insertions, 5 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 54d74f6eb233..754c25966a0a 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -762,6 +762,14 @@ typedef struct pglist_data {  	/* Number of pages migrated during the rate limiting time interval */  	unsigned long numabalancing_migrate_nr_pages;  #endif + +#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT +	/* +	 * If memory initialisation on large machines is deferred then this +	 * is the first PFN that needs to be initialised. +	 */ +	unsigned long first_deferred_pfn; +#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */  } pg_data_t;  #define node_present_pages(nid)	(NODE_DATA(nid)->node_present_pages) @@ -1216,11 +1224,16 @@ void sparse_init(void);  #define sparse_index_init(_sec, _nid)  do {} while (0)  #endif /* CONFIG_SPARSEMEM */ -#ifdef CONFIG_NODES_SPAN_OTHER_NODES -bool early_pfn_in_nid(unsigned long pfn, int nid); -#else -#define early_pfn_in_nid(pfn, nid)	(1) -#endif +/* + * During memory init memblocks map pfns to nids. The search is expensive and + * this caches recent lookups. The implementation of __early_pfn_to_nid + * may treat start/end as pfns or sections. + */ +struct mminit_pfnnid_cache { +	unsigned long last_start; +	unsigned long last_end; +	int last_nid; +};  #ifndef early_pfn_valid  #define early_pfn_valid(pfn)	(1)  | 
