aboutsummaryrefslogtreecommitdiffstats
path: root/arch/riscv/include/asm/pgtable.h
diff options
context:
space:
mode:
authorHeiko Stuebner <heiko@sntech.de>2022-05-11 21:29:17 +0200
committerPalmer Dabbelt <palmer@rivosinc.com>2022-05-11 21:36:32 -0700
commit100631b48ded73fcd8fdd7e17139cda92dfbfb79 (patch)
tree1cdb9547e66c18b542591bcce0795bd3fd714522 /arch/riscv/include/asm/pgtable.h
parentriscv: move boot alternatives to after fill_hwcap (diff)
downloadlinux-dev-100631b48ded73fcd8fdd7e17139cda92dfbfb79.tar.xz
linux-dev-100631b48ded73fcd8fdd7e17139cda92dfbfb79.zip
riscv: Fix accessing pfn bits in PTEs for non-32bit variants
On rv32 the PFN part of PTEs is defined to use bits [xlen-1:10] while on rv64 it is defined to use bits [53:10], leaving [63:54] as reserved. With upcoming optional extensions like svpbmt these previously reserved bits will get used so simply right-shifting the PTE to get the PFN won't be enough. So introduce a _PAGE_PFN_MASK constant to mask the correct bits for both rv32 and rv64 before shifting. Signed-off-by: Heiko Stuebner <heiko@sntech.de> Reviewed-by: Philipp Tomsich <philipp.tomsich@vrull.eu> Link: https://lore.kernel.org/r/20220511192921.2223629-9-heiko@sntech.de Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
Diffstat (limited to 'arch/riscv/include/asm/pgtable.h')
-rw-r--r--arch/riscv/include/asm/pgtable.h8
1 files changed, 5 insertions, 3 deletions
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 046b44225623..faba543e2b08 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -108,6 +108,8 @@
#include <asm/tlbflush.h>
#include <linux/mm_types.h>
+#define __page_val_to_pfn(_val) (((_val) & _PAGE_PFN_MASK) >> _PAGE_PFN_SHIFT)
+
#ifdef CONFIG_64BIT
#include <asm/pgtable-64.h>
#else
@@ -261,12 +263,12 @@ static inline unsigned long _pgd_pfn(pgd_t pgd)
static inline struct page *pmd_page(pmd_t pmd)
{
- return pfn_to_page(pmd_val(pmd) >> _PAGE_PFN_SHIFT);
+ return pfn_to_page(__page_val_to_pfn(pmd_val(pmd)));
}
static inline unsigned long pmd_page_vaddr(pmd_t pmd)
{
- return (unsigned long)pfn_to_virt(pmd_val(pmd) >> _PAGE_PFN_SHIFT);
+ return (unsigned long)pfn_to_virt(__page_val_to_pfn(pmd_val(pmd)));
}
static inline pte_t pmd_pte(pmd_t pmd)
@@ -282,7 +284,7 @@ static inline pte_t pud_pte(pud_t pud)
/* Yields the page frame number (PFN) of a page table entry */
static inline unsigned long pte_pfn(pte_t pte)
{
- return (pte_val(pte) >> _PAGE_PFN_SHIFT);
+ return __page_val_to_pfn(pte_val(pte));
}
#define pte_page(x) pfn_to_page(pte_pfn(x))