aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/kasan_init_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm/kasan_init_64.c')
-rw-r--r--arch/x86/mm/kasan_init_64.c55
1 files changed, 29 insertions, 26 deletions
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 04a9cf6b034f..462fde83b515 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -211,7 +211,8 @@ static void __init kasan_early_p4d_populate(pgd_t *pgd,
unsigned long next;
if (pgd_none(*pgd)) {
- pgd_entry = __pgd(_KERNPG_TABLE | __pa_nodebug(kasan_zero_p4d));
+ pgd_entry = __pgd(_KERNPG_TABLE |
+ __pa_nodebug(kasan_early_shadow_p4d));
set_pgd(pgd, pgd_entry);
}
@@ -222,7 +223,8 @@ static void __init kasan_early_p4d_populate(pgd_t *pgd,
if (!p4d_none(*p4d))
continue;
- p4d_entry = __p4d(_KERNPG_TABLE | __pa_nodebug(kasan_zero_pud));
+ p4d_entry = __p4d(_KERNPG_TABLE |
+ __pa_nodebug(kasan_early_shadow_pud));
set_p4d(p4d, p4d_entry);
} while (p4d++, addr = next, addr != end && p4d_none(*p4d));
}
@@ -261,10 +263,11 @@ static struct notifier_block kasan_die_notifier = {
void __init kasan_early_init(void)
{
int i;
- pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL | _PAGE_ENC;
- pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
- pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
- p4dval_t p4d_val = __pa_nodebug(kasan_zero_pud) | _KERNPG_TABLE;
+ pteval_t pte_val = __pa_nodebug(kasan_early_shadow_page) |
+ __PAGE_KERNEL | _PAGE_ENC;
+ pmdval_t pmd_val = __pa_nodebug(kasan_early_shadow_pte) | _KERNPG_TABLE;
+ pudval_t pud_val = __pa_nodebug(kasan_early_shadow_pmd) | _KERNPG_TABLE;
+ p4dval_t p4d_val = __pa_nodebug(kasan_early_shadow_pud) | _KERNPG_TABLE;
/* Mask out unsupported __PAGE_KERNEL bits: */
pte_val &= __default_kernel_pte_mask;
@@ -273,16 +276,16 @@ void __init kasan_early_init(void)
p4d_val &= __default_kernel_pte_mask;
for (i = 0; i < PTRS_PER_PTE; i++)
- kasan_zero_pte[i] = __pte(pte_val);
+ kasan_early_shadow_pte[i] = __pte(pte_val);
for (i = 0; i < PTRS_PER_PMD; i++)
- kasan_zero_pmd[i] = __pmd(pmd_val);
+ kasan_early_shadow_pmd[i] = __pmd(pmd_val);
for (i = 0; i < PTRS_PER_PUD; i++)
- kasan_zero_pud[i] = __pud(pud_val);
+ kasan_early_shadow_pud[i] = __pud(pud_val);
for (i = 0; pgtable_l5_enabled() && i < PTRS_PER_P4D; i++)
- kasan_zero_p4d[i] = __p4d(p4d_val);
+ kasan_early_shadow_p4d[i] = __p4d(p4d_val);
kasan_map_early_shadow(early_top_pgt);
kasan_map_early_shadow(init_top_pgt);
@@ -326,7 +329,7 @@ void __init kasan_init(void)
clear_pgds(KASAN_SHADOW_START & PGDIR_MASK, KASAN_SHADOW_END);
- kasan_populate_zero_shadow((void *)(KASAN_SHADOW_START & PGDIR_MASK),
+ kasan_populate_early_shadow((void *)(KASAN_SHADOW_START & PGDIR_MASK),
kasan_mem_to_shadow((void *)PAGE_OFFSET));
for (i = 0; i < E820_MAX_ENTRIES; i++) {
@@ -338,41 +341,41 @@ void __init kasan_init(void)
shadow_cpu_entry_begin = (void *)CPU_ENTRY_AREA_BASE;
shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin);
- shadow_cpu_entry_begin = (void *)round_down((unsigned long)shadow_cpu_entry_begin,
- PAGE_SIZE);
+ shadow_cpu_entry_begin = (void *)round_down(
+ (unsigned long)shadow_cpu_entry_begin, PAGE_SIZE);
shadow_cpu_entry_end = (void *)(CPU_ENTRY_AREA_BASE +
CPU_ENTRY_AREA_MAP_SIZE);
shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end);
- shadow_cpu_entry_end = (void *)round_up((unsigned long)shadow_cpu_entry_end,
- PAGE_SIZE);
+ shadow_cpu_entry_end = (void *)round_up(
+ (unsigned long)shadow_cpu_entry_end, PAGE_SIZE);
- kasan_populate_zero_shadow(
+ kasan_populate_early_shadow(
kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
shadow_cpu_entry_begin);
kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin,
(unsigned long)shadow_cpu_entry_end, 0);
- kasan_populate_zero_shadow(shadow_cpu_entry_end,
- kasan_mem_to_shadow((void *)__START_KERNEL_map));
+ kasan_populate_early_shadow(shadow_cpu_entry_end,
+ kasan_mem_to_shadow((void *)__START_KERNEL_map));
kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext),
(unsigned long)kasan_mem_to_shadow(_end),
early_pfn_to_nid(__pa(_stext)));
- kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
- (void *)KASAN_SHADOW_END);
+ kasan_populate_early_shadow(kasan_mem_to_shadow((void *)MODULES_END),
+ (void *)KASAN_SHADOW_END);
load_cr3(init_top_pgt);
__flush_tlb_all();
/*
- * kasan_zero_page has been used as early shadow memory, thus it may
- * contain some garbage. Now we can clear and write protect it, since
- * after the TLB flush no one should write to it.
+ * kasan_early_shadow_page has been used as early shadow memory, thus
+ * it may contain some garbage. Now we can clear and write protect it,
+ * since after the TLB flush no one should write to it.
*/
- memset(kasan_zero_page, 0, PAGE_SIZE);
+ memset(kasan_early_shadow_page, 0, PAGE_SIZE);
for (i = 0; i < PTRS_PER_PTE; i++) {
pte_t pte;
pgprot_t prot;
@@ -380,8 +383,8 @@ void __init kasan_init(void)
prot = __pgprot(__PAGE_KERNEL_RO | _PAGE_ENC);
pgprot_val(prot) &= __default_kernel_pte_mask;
- pte = __pte(__pa(kasan_zero_page) | pgprot_val(prot));
- set_pte(&kasan_zero_pte[i], pte);
+ pte = __pte(__pa(kasan_early_shadow_page) | pgprot_val(prot));
+ set_pte(&kasan_early_shadow_pte[i], pte);
}
/* Flush TLBs again to be sure that write protection applied. */
__flush_tlb_all();