aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorZong Li <zong.li@sifive.com>2020-02-07 17:52:44 +0800
committerPalmer Dabbelt <palmerdabbelt@google.com>2020-02-24 13:12:49 -0800
commita0a31fd84f8f66828790d860545d4167777d58c6 (patch)
tree6cdd0438bd890d614a043891de86290fcd3efb3f /arch
parentriscv: Fix gitignore (diff)
downloadlinux-dev-a0a31fd84f8f66828790d860545d4167777d58c6.tar.xz
linux-dev-a0a31fd84f8f66828790d860545d4167777d58c6.zip
riscv: allocate a complete page size for each page table
Each page table should be created by allocating a complete page size for it. Otherwise, the content of the page table would be corrupted somewhere through memory allocation which allocates the memory at the middle of the page table for other use. Signed-off-by: Zong Li <zong.li@sifive.com> Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/riscv/mm/kasan_init.c27
1 files changed, 16 insertions, 11 deletions
diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
index f0cc86040587..f8eaf7e73a23 100644
--- a/arch/riscv/mm/kasan_init.c
+++ b/arch/riscv/mm/kasan_init.c
@@ -46,29 +46,34 @@ asmlinkage void __init kasan_early_init(void)
static void __init populate(void *start, void *end)
{
- unsigned long i;
+ unsigned long i, offset;
unsigned long vaddr = (unsigned long)start & PAGE_MASK;
unsigned long vend = PAGE_ALIGN((unsigned long)end);
unsigned long n_pages = (vend - vaddr) / PAGE_SIZE;
+ unsigned long n_ptes =
+ ((n_pages + PTRS_PER_PTE) & -PTRS_PER_PTE) / PTRS_PER_PTE;
unsigned long n_pmds =
- (n_pages % PTRS_PER_PTE) ? n_pages / PTRS_PER_PTE + 1 :
- n_pages / PTRS_PER_PTE;
+ ((n_ptes + PTRS_PER_PMD) & -PTRS_PER_PMD) / PTRS_PER_PMD;
+
+ pte_t *pte =
+ memblock_alloc(n_ptes * PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
+ pmd_t *pmd =
+ memblock_alloc(n_pmds * PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
pgd_t *pgd = pgd_offset_k(vaddr);
- pmd_t *pmd = memblock_alloc(n_pmds * sizeof(pmd_t), PAGE_SIZE);
- pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
for (i = 0; i < n_pages; i++) {
phys_addr_t phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
-
- set_pte(pte + i, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
+ set_pte(&pte[i], pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
}
- for (i = 0; i < n_pmds; ++pgd, i += PTRS_PER_PMD)
- set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(((uintptr_t)(pmd + i)))),
+ for (i = 0, offset = 0; i < n_ptes; i++, offset += PTRS_PER_PTE)
+ set_pmd(&pmd[i],
+ pfn_pmd(PFN_DOWN(__pa(&pte[offset])),
__pgprot(_PAGE_TABLE)));
- for (i = 0; i < n_pages; ++pmd, i += PTRS_PER_PTE)
- set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa((uintptr_t)(pte + i))),
+ for (i = 0, offset = 0; i < n_pmds; i++, offset += PTRS_PER_PMD)
+ set_pgd(&pgd[i],
+ pfn_pgd(PFN_DOWN(__pa(&pmd[offset])),
__pgprot(_PAGE_TABLE)));
flush_tlb_all();