aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/hugetlb.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/hugetlb.h')
-rw-r--r--include/linux/hugetlb.h61
1 files changed, 52 insertions, 9 deletions
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 3c0117656745..1faebe1cd0ed 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -29,12 +29,29 @@ typedef struct { unsigned long pd; } hugepd_t;
#include <linux/shm.h>
#include <asm/tlbflush.h>
+/*
+ * For HugeTLB page, there are more metadata to save in the struct page. But
+ * the head struct page cannot meet our needs, so we have to abuse other tail
+ * struct page to store the metadata. In order to avoid conflicts caused by
+ * subsequent use of more tail struct pages, we gather these discrete indexes
+ * of tail struct page here.
+ */
+enum {
+ SUBPAGE_INDEX_SUBPOOL = 1, /* reuse page->private */
+#ifdef CONFIG_CGROUP_HUGETLB
+ SUBPAGE_INDEX_CGROUP, /* reuse page->private */
+ SUBPAGE_INDEX_CGROUP_RSVD, /* reuse page->private */
+ __MAX_CGROUP_SUBPAGE_INDEX = SUBPAGE_INDEX_CGROUP_RSVD,
+#endif
+ __NR_USED_SUBPAGE,
+};
+
struct hugepage_subpool {
spinlock_t lock;
long count;
long max_hpages; /* Maximum huge pages or -1 if no maximum. */
long used_hpages; /* Used count against maximum, includes */
- /* both alloced and reserved pages. */
+ /* both allocated and reserved pages. */
struct hstate *hstate;
long min_hpages; /* Minimum huge pages or -1 if no minimum. */
long rsv_hpages; /* Pages reserved against global pool to */
@@ -68,7 +85,7 @@ struct resv_map {
* by a resv_map's lock. The set of regions within the resv_map represent
* reservations for huge pages, or huge pages that have already been
* instantiated within the map. The from and to elements are huge page
- * indicies into the associated mapping. from indicates the starting index
+ * indices into the associated mapping. from indicates the starting index
* of the region. to represents the first index past the end of the region.
*
* For example, a file region structure with from == 0 and to == 4 represents
@@ -451,7 +468,7 @@ static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
extern const struct file_operations hugetlbfs_file_operations;
extern const struct vm_operations_struct hugetlb_vm_ops;
struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
- struct user_struct **user, int creat_flags,
+ struct ucounts **ucounts, int creat_flags,
int page_size_log);
static inline bool is_file_hugepages(struct file *file)
@@ -471,7 +488,7 @@ static inline struct hstate *hstate_inode(struct inode *i)
#define is_file_hugepages(file) false
static inline struct file *
hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
- struct user_struct **user, int creat_flags,
+ struct ucounts **ucounts, int creat_flags,
int page_size_log)
{
return ERR_PTR(-ENOSYS);
@@ -515,12 +532,14 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
* modifications require hugetlb_lock.
* HPG_freed - Set when page is on the free lists.
* Synchronization: hugetlb_lock held for examination and modification.
+ * HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed.
*/
enum hugetlb_page_flags {
HPG_restore_reserve = 0,
HPG_migratable,
HPG_temporary,
HPG_freed,
+ HPG_vmemmap_optimized,
__NR_HPAGEFLAGS,
};
@@ -566,6 +585,7 @@ HPAGEFLAG(RestoreReserve, restore_reserve)
HPAGEFLAG(Migratable, migratable)
HPAGEFLAG(Temporary, temporary)
HPAGEFLAG(Freed, freed)
+HPAGEFLAG(VmemmapOptimized, vmemmap_optimized)
#ifdef CONFIG_HUGETLB_PAGE
@@ -588,6 +608,9 @@ struct hstate {
unsigned int nr_huge_pages_node[MAX_NUMNODES];
unsigned int free_huge_pages_node[MAX_NUMNODES];
unsigned int surplus_huge_pages_node[MAX_NUMNODES];
+#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
+ unsigned int nr_free_vmemmap_pages;
+#endif
#ifdef CONFIG_CGROUP_HUGETLB
/* cgroup control files */
struct cftype cgroup_files_dfl[7];
@@ -635,13 +658,13 @@ extern unsigned int default_hstate_idx;
*/
static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
{
- return (struct hugepage_subpool *)(hpage+1)->private;
+ return (void *)page_private(hpage + SUBPAGE_INDEX_SUBPOOL);
}
static inline void hugetlb_set_page_subpool(struct page *hpage,
struct hugepage_subpool *subpool)
{
- set_page_private(hpage+1, (unsigned long)subpool);
+ set_page_private(hpage + SUBPAGE_INDEX_SUBPOOL, (unsigned long)subpool);
}
static inline struct hstate *hstate_file(struct file *f)
@@ -718,8 +741,8 @@ static inline void arch_clear_hugepage_flags(struct page *page) { }
#endif
#ifndef arch_make_huge_pte
-static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
- struct page *page, int writable)
+static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift,
+ vm_flags_t flags)
{
return entry;
}
@@ -774,7 +797,7 @@ static inline bool hugepage_migration_supported(struct hstate *h)
* It determines whether or not a huge page should be placed on
* movable zone or not. Movability of any huge page should be
* required only if huge page size is supported for migration.
- * There wont be any reason for the huge page to be movable if
+ * There won't be any reason for the huge page to be movable if
* it is not migratable to start with. Also the size of the huge
* page should be large enough to be placed under a movable zone
* and still feasible enough to be migratable. Just the presence
@@ -835,6 +858,11 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
+static inline void hugetlb_count_init(struct mm_struct *mm)
+{
+ atomic_long_set(&mm->hugetlb_usage, 0);
+}
+
static inline void hugetlb_count_add(long l, struct mm_struct *mm)
{
atomic_long_add(l, &mm->hugetlb_usage);
@@ -875,6 +903,11 @@ static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
#else /* CONFIG_HUGETLB_PAGE */
struct hstate {};
+static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
+{
+ return NULL;
+}
+
static inline int isolate_or_dissolve_huge_page(struct page *page,
struct list_head *list)
{
@@ -1014,6 +1047,10 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
return &mm->page_table_lock;
}
+static inline void hugetlb_count_init(struct mm_struct *mm)
+{
+}
+
static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
{
}
@@ -1028,6 +1065,12 @@ static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr
}
#endif /* CONFIG_HUGETLB_PAGE */
+#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
+extern bool hugetlb_free_vmemmap_enabled;
+#else
+#define hugetlb_free_vmemmap_enabled false
+#endif
+
static inline spinlock_t *huge_pte_lock(struct hstate *h,
struct mm_struct *mm, pte_t *pte)
{