aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/mm/pageattr.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/mm/pageattr.c')
-rw-r--r--arch/arm64/mm/pageattr.c39
1 files changed, 29 insertions, 10 deletions
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index 250c49008d73..5922178d7a06 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -8,7 +8,7 @@
#include <linux/sched.h>
#include <linux/vmalloc.h>
-#include <asm/pgtable.h>
+#include <asm/cacheflush.h>
#include <asm/set_memory.h>
#include <asm/tlbflush.h>
@@ -19,6 +19,17 @@ struct page_change_data {
bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED);
+bool can_set_direct_map(void)
+{
+ /*
+ * rodata_full, DEBUG_PAGEALLOC and KFENCE require linear map to be
+ * mapped at page granularity, so that it is possible to
+ * protect/unprotect single pages.
+ */
+ return (rodata_enabled && rodata_full) || debug_pagealloc_enabled() ||
+ IS_ENABLED(CONFIG_KFENCE);
+}
+
static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
{
struct page_change_data *cdata = data;
@@ -80,7 +91,7 @@ static int change_memory_common(unsigned long addr, int numpages,
*/
area = find_vm_area((void *)addr);
if (!area ||
- end > (unsigned long)area->addr + area->size ||
+ end > (unsigned long)kasan_reset_tag(area->addr) + area->size ||
!(area->flags & VM_ALLOC))
return -EINVAL;
@@ -91,7 +102,8 @@ static int change_memory_common(unsigned long addr, int numpages,
* If we are manipulating read-only permissions, apply the same
* change to the linear mapping of the pages that back this VM area.
*/
- if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
+ if (rodata_enabled &&
+ rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
pgprot_val(clear_mask) == PTE_RDONLY)) {
for (i = 0; i < area->nr_pages; i++) {
__change_memory_common((u64)page_address(area->pages[i]),
@@ -126,13 +138,13 @@ int set_memory_nx(unsigned long addr, int numpages)
{
return change_memory_common(addr, numpages,
__pgprot(PTE_PXN),
- __pgprot(0));
+ __pgprot(PTE_MAYBE_GP));
}
int set_memory_x(unsigned long addr, int numpages)
{
return change_memory_common(addr, numpages,
- __pgprot(0),
+ __pgprot(PTE_MAYBE_GP),
__pgprot(PTE_PXN));
}
@@ -155,7 +167,7 @@ int set_direct_map_invalid_noflush(struct page *page)
.clear_mask = __pgprot(PTE_VALID),
};
- if (!rodata_full)
+ if (!can_set_direct_map())
return 0;
return apply_to_page_range(&init_mm,
@@ -170,7 +182,7 @@ int set_direct_map_default_noflush(struct page *page)
.clear_mask = __pgprot(PTE_RDONLY),
};
- if (!rodata_full)
+ if (!can_set_direct_map())
return 0;
return apply_to_page_range(&init_mm,
@@ -178,13 +190,15 @@ int set_direct_map_default_noflush(struct page *page)
PAGE_SIZE, change_page_range, &data);
}
+#ifdef CONFIG_DEBUG_PAGEALLOC
void __kernel_map_pages(struct page *page, int numpages, int enable)
{
- if (!debug_pagealloc_enabled() && !rodata_full)
+ if (!can_set_direct_map())
return;
set_memory_valid((unsigned long)page_address(page), numpages, enable);
}
+#endif /* CONFIG_DEBUG_PAGEALLOC */
/*
* This function is used to determine if a linear map page has been marked as
@@ -198,19 +212,24 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
bool kernel_page_present(struct page *page)
{
pgd_t *pgdp;
+ p4d_t *p4dp;
pud_t *pudp, pud;
pmd_t *pmdp, pmd;
pte_t *ptep;
unsigned long addr = (unsigned long)page_address(page);
- if (!debug_pagealloc_enabled() && !rodata_full)
+ if (!can_set_direct_map())
return true;
pgdp = pgd_offset_k(addr);
if (pgd_none(READ_ONCE(*pgdp)))
return false;
- pudp = pud_offset(pgdp, addr);
+ p4dp = p4d_offset(pgdp, addr);
+ if (p4d_none(READ_ONCE(*p4dp)))
+ return false;
+
+ pudp = pud_offset(p4dp, addr);
pud = READ_ONCE(*pudp);
if (pud_none(pud))
return false;