aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/hugetlb.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/hugetlb.h')
-rw-r--r--include/linux/hugetlb.h127
1 files changed, 92 insertions, 35 deletions
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index b857fc8cc2ec..0ed8e41aaf11 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -14,6 +14,30 @@ struct ctl_table;
struct user_struct;
struct mmu_gather;
+#ifndef is_hugepd
+/*
+ * Some architectures requires a hugepage directory format that is
+ * required to support multiple hugepage sizes. For example
+ * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
+ * introduced the same on powerpc. This allows for a more flexible hugepage
+ * pagetable layout.
+ */
+typedef struct { unsigned long pd; } hugepd_t;
+#define is_hugepd(hugepd) (0)
+#define __hugepd(x) ((hugepd_t) { (x) })
+static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
+ unsigned pdshift, unsigned long end,
+ int write, struct page **pages, int *nr)
+{
+ return 0;
+}
+#else
+extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
+ unsigned pdshift, unsigned long end,
+ int write, struct page **pages, int *nr);
+#endif
+
+
#ifdef CONFIG_HUGETLB_PAGE
#include <linux/mempolicy.h>
@@ -92,7 +116,6 @@ int hugetlb_reserve_pages(struct inode *inode, long from, long to,
vm_flags_t vm_flags);
long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
long freed);
-int dequeue_hwpoisoned_huge_page(struct page *page);
bool isolate_huge_page(struct page *page, struct list_head *list);
void putback_active_hugepage(struct page *page);
void free_huge_page(struct page *page);
@@ -113,19 +136,27 @@ extern struct list_head huge_boot_pages;
pte_t *huge_pte_alloc(struct mm_struct *mm,
unsigned long addr, unsigned long sz);
-pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
+pte_t *huge_pte_offset(struct mm_struct *mm,
+ unsigned long addr, unsigned long sz);
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
int write);
+struct page *follow_huge_pd(struct vm_area_struct *vma,
+ unsigned long address, hugepd_t hpd,
+ int flags, int pdshift);
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int flags);
struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
pud_t *pud, int flags);
+struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
+ pgd_t *pgd, int flags);
+
int pmd_huge(pmd_t pmd);
int pud_huge(pud_t pud);
unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long address, unsigned long end, pgprot_t newprot);
+bool is_hugetlb_entry_migration(pte_t pte);
#else /* !CONFIG_HUGETLB_PAGE */
static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
@@ -147,8 +178,10 @@ static inline void hugetlb_report_meminfo(struct seq_file *m)
static inline void hugetlb_show_meminfo(void)
{
}
+#define follow_huge_pd(vma, addr, hpd, flags, pdshift) NULL
#define follow_huge_pmd(mm, addr, pmd, flags) NULL
#define follow_huge_pud(mm, addr, pud, flags) NULL
+#define follow_huge_pgd(mm, addr, pgd, flags) NULL
#define prepare_hugepage_range(file, addr, len) (-EINVAL)
#define pmd_huge(x) 0
#define pud_huge(x) 0
@@ -157,11 +190,7 @@ static inline void hugetlb_show_meminfo(void)
#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
#define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
src_addr, pagep) ({ BUG(); 0; })
-#define huge_pte_offset(mm, address) 0
-static inline int dequeue_hwpoisoned_huge_page(struct page *page)
-{
- return 0;
-}
+#define huge_pte_offset(mm, address, sz) 0
static inline bool isolate_huge_page(struct page *page, struct list_head *list)
{
@@ -217,29 +246,6 @@ static inline int pud_write(pud_t pud)
}
#endif
-#ifndef is_hugepd
-/*
- * Some architectures requires a hugepage directory format that is
- * required to support multiple hugepage sizes. For example
- * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
- * introduced the same on powerpc. This allows for a more flexible hugepage
- * pagetable layout.
- */
-typedef struct { unsigned long pd; } hugepd_t;
-#define is_hugepd(hugepd) (0)
-#define __hugepd(x) ((hugepd_t) { (x) })
-static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
- unsigned pdshift, unsigned long end,
- int write, struct page **pages, int *nr)
-{
- return 0;
-}
-#else
-extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
- unsigned pdshift, unsigned long end,
- int write, struct page **pages, int *nr);
-#endif
-
#define HUGETLB_ANON_FILE "anon_hugepage"
enum {
@@ -262,6 +268,9 @@ struct hugetlbfs_sb_info {
spinlock_t stat_lock;
struct hstate *hstate;
struct hugepage_subpool *spool;
+ kuid_t uid;
+ kgid_t gid;
+ umode_t mode;
};
static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
@@ -343,6 +352,8 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
struct page *alloc_huge_page_node(struct hstate *h, int nid);
struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
unsigned long addr, int avoid_reserve);
+struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
+ nodemask_t *nmask);
int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
pgoff_t idx);
@@ -461,12 +472,17 @@ static inline pgoff_t basepage_index(struct page *page)
return __basepage_index(page);
}
+extern int dissolve_free_huge_page(struct page *page);
extern int dissolve_free_huge_pages(unsigned long start_pfn,
unsigned long end_pfn);
static inline bool hugepage_migration_supported(struct hstate *h)
{
#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
- return huge_page_shift(h) == PMD_SHIFT;
+ if ((huge_page_shift(h) == PMD_SHIFT) ||
+ (huge_page_shift(h) == PGDIR_SHIFT))
+ return true;
+ else
+ return false;
#else
return false;
#endif
@@ -501,10 +517,19 @@ static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
{
atomic_long_sub(l, &mm->hugetlb_usage);
}
+
+#ifndef set_huge_swap_pte_at
+static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte, unsigned long sz)
+{
+ set_huge_pte_at(mm, addr, ptep, pte);
+}
+#endif
#else /* CONFIG_HUGETLB_PAGE */
struct hstate {};
#define alloc_huge_page(v, a, r) NULL
#define alloc_huge_page_node(h, nid) NULL
+#define alloc_huge_page_nodemask(h, preferred_nid, nmask) NULL
#define alloc_huge_page_noerr(v, a, r) NULL
#define alloc_bootmem_huge_page(h) NULL
#define hstate_file(f) NULL
@@ -518,19 +543,46 @@ struct hstate {};
#define vma_mmu_pagesize(v) PAGE_SIZE
#define huge_page_order(h) 0
#define huge_page_shift(h) PAGE_SHIFT
+static inline bool hstate_is_gigantic(struct hstate *h)
+{
+ return false;
+}
+
static inline unsigned int pages_per_huge_page(struct hstate *h)
{
return 1;
}
-#define hstate_index_to_shift(index) 0
-#define hstate_index(h) 0
+
+static inline unsigned hstate_index_to_shift(unsigned index)
+{
+ return 0;
+}
+
+static inline int hstate_index(struct hstate *h)
+{
+ return 0;
+}
static inline pgoff_t basepage_index(struct page *page)
{
return page->index;
}
-#define dissolve_free_huge_pages(s, e) 0
-#define hugepage_migration_supported(h) false
+
+static inline int dissolve_free_huge_page(struct page *page)
+{
+ return 0;
+}
+
+static inline int dissolve_free_huge_pages(unsigned long start_pfn,
+ unsigned long end_pfn)
+{
+ return 0;
+}
+
+static inline bool hugepage_migration_supported(struct hstate *h)
+{
+ return false;
+}
static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
struct mm_struct *mm, pte_t *pte)
@@ -545,6 +597,11 @@ static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
{
}
+
+static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte, unsigned long sz)
+{
+}
#endif /* CONFIG_HUGETLB_PAGE */
static inline spinlock_t *huge_pte_lock(struct hstate *h,