aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arc/include
diff options
context:
space:
mode:
authorVineet Gupta <vgupta@kernel.org>2021-08-12 12:54:43 -0700
committerVineet Gupta <vgupta@kernel.org>2021-08-26 13:42:42 -0700
commitd9820ff76f95fa26d33e412254a89cd65b23142d (patch)
tree4f7fcc648a3b08f5533923b05abbd00c29879b6d /arch/arc/include
parentARC: mm: hack to allow 2 level build with 4 level code (diff)
downloadlinux-dev-d9820ff76f95fa26d33e412254a89cd65b23142d.tar.xz
linux-dev-d9820ff76f95fa26d33e412254a89cd65b23142d.zip
ARC: mm: switch pgtable_t back to struct page *
So far ARC pgtable_t has not been struct page based to avoid extra page_address() calls involved. However the differences are down to noise and get in the way of using generic code, hence this patch. This also allows us to reuse generic THP depost/withdraw code. There's some additional consideration for PGDIR_SHIFT in 4K page config. Now due to page tables being PAGE_SIZE deep only, the address split can't be really arbitrary. Tested-by: kernel test robot <lkp@intel.com> Suggested-by: Mike Rapoport <rppt@linux.ibm.com> Acked-by: Mike Rapoport <rppt@linux.ibm.com> Signed-off-by: Vineet Gupta <vgupta@kernel.org>
Diffstat (limited to 'arch/arc/include')
-rw-r--r--arch/arc/include/asm/hugepage.h8
-rw-r--r--arch/arc/include/asm/page.h2
-rw-r--r--arch/arc/include/asm/pgalloc.h57
-rw-r--r--arch/arc/include/asm/pgtable-levels.h8
4 files changed, 25 insertions, 50 deletions
diff --git a/arch/arc/include/asm/hugepage.h b/arch/arc/include/asm/hugepage.h
index 4eef17c5c1da..11b0ff26b97b 100644
--- a/arch/arc/include/asm/hugepage.h
+++ b/arch/arc/include/asm/hugepage.h
@@ -58,14 +58,6 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd);
-/* Generic variants assume pgtable_t is struct page *, hence need for these */
-#define __HAVE_ARCH_PGTABLE_DEPOSIT
-extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
- pgtable_t pgtable);
-
-#define __HAVE_ARCH_PGTABLE_WITHDRAW
-extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
-
#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
extern void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
index 313e6f543d2d..28ed82b1800f 100644
--- a/arch/arc/include/asm/page.h
+++ b/arch/arc/include/asm/page.h
@@ -60,7 +60,7 @@ typedef struct {
#define __pgprot(x) ((pgprot_t) { (x) })
#define pte_pgprot(x) __pgprot(pte_val(x))
-typedef pte_t * pgtable_t;
+typedef struct page *pgtable_t;
/*
* Use virt_to_pfn with caution:
diff --git a/arch/arc/include/asm/pgalloc.h b/arch/arc/include/asm/pgalloc.h
index 408bc4b0842d..8ab1af3da6e7 100644
--- a/arch/arc/include/asm/pgalloc.h
+++ b/arch/arc/include/asm/pgalloc.h
@@ -45,22 +45,17 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
set_pmd(pmd, __pmd((unsigned long)pte));
}
-static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte)
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte_page)
{
- set_pmd(pmd, __pmd((unsigned long)pte));
-}
-
-static inline int __get_order_pgd(void)
-{
- return get_order(PTRS_PER_PGD * sizeof(pgd_t));
+ set_pmd(pmd, __pmd((unsigned long)page_address(pte_page)));
}
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
- int num, num2;
- pgd_t *ret = (pgd_t *) __get_free_pages(GFP_KERNEL, __get_order_pgd());
+ pgd_t *ret = (pgd_t *) __get_free_page(GFP_KERNEL);
if (ret) {
+ int num, num2;
num = USER_PTRS_PER_PGD + USER_KERNEL_GUTTER / PGDIR_SIZE;
memzero(ret, num * sizeof(pgd_t));
@@ -76,61 +71,43 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
- free_pages((unsigned long)pgd, __get_order_pgd());
-}
-
-
-/*
- * With software-only page-tables, addr-split for traversal is tweakable and
- * that directly governs how big tables would be at each level.
- * Further, the MMU page size is configurable.
- * Thus we need to programatically assert the size constraint
- * All of this is const math, allowing gcc to do constant folding/propagation.
- */
-
-static inline int __get_order_pte(void)
-{
- return get_order(PTRS_PER_PTE * sizeof(pte_t));
+ free_page((unsigned long)pgd);
}
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
pte_t *pte;
- pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
- __get_order_pte());
+ pte = (pte_t *) __get_free_page(GFP_KERNEL | __GFP_ZERO);
return pte;
}
-static inline pgtable_t
-pte_alloc_one(struct mm_struct *mm)
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
{
- pgtable_t pte_pg;
struct page *page;
- pte_pg = (pgtable_t)__get_free_pages(GFP_KERNEL, __get_order_pte());
- if (!pte_pg)
- return 0;
- memzero((void *)pte_pg, PTRS_PER_PTE * sizeof(pte_t));
- page = virt_to_page(pte_pg);
+ page = (pgtable_t)alloc_page(GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT);
+ if (!page)
+ return NULL;
+
if (!pgtable_pte_page_ctor(page)) {
__free_page(page);
- return 0;
+ return NULL;
}
- return pte_pg;
+ return page;
}
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
- free_pages((unsigned long)pte, __get_order_pte()); /* takes phy addr */
+ free_page((unsigned long)pte);
}
-static inline void pte_free(struct mm_struct *mm, pgtable_t ptep)
+static inline void pte_free(struct mm_struct *mm, pgtable_t pte_page)
{
- pgtable_pte_page_dtor(virt_to_page(ptep));
- free_pages((unsigned long)ptep, __get_order_pte());
+ pgtable_pte_page_dtor(pte_page);
+ __free_page(pte_page);
}
#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
diff --git a/arch/arc/include/asm/pgtable-levels.h b/arch/arc/include/asm/pgtable-levels.h
index e65fb8c9da12..561dedab79ed 100644
--- a/arch/arc/include/asm/pgtable-levels.h
+++ b/arch/arc/include/asm/pgtable-levels.h
@@ -35,9 +35,15 @@
#else
/*
* No Super page case
- * Default value provides 11:8:13 (8K), 11:9:12 (4K)
+ * Default value provides 11:8:13 (8K), 10:10:12 (4K)
+ * Limits imposed by pgtable_t only PAGE_SIZE long
+ * (so 4K page can only have 1K entries: or 10 bits)
*/
+#ifdef CONFIG_ARC_PAGE_SIZE_4K
+#define PGDIR_SHIFT 22
+#else
#define PGDIR_SHIFT 21
+#endif
#endif