aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/xen
diff options
context:
space:
mode:
authorJuergen Gross <jgross@suse.com>2017-10-27 19:49:37 +0200
committerBoris Ostrovsky <boris.ostrovsky@oracle.com>2017-10-31 09:06:48 -0400
commit6f0e8bf16730a36ff6773802d8c8df56d10e60cd (patch)
tree331fcc02308d71fb979e421a0d4c9508f1cc5e0c /arch/x86/include/asm/xen
parentxen: introduce a Kconfig option to enable the pvcalls frontend (diff)
downloadlinux-dev-6f0e8bf16730a36ff6773802d8c8df56d10e60cd.tar.xz
linux-dev-6f0e8bf16730a36ff6773802d8c8df56d10e60cd.zip
xen: support 52 bit physical addresses in pv guests
Physical addresses on processors supporting 5 level paging can be up to 52 bits wide. For a Xen pv guest running on such a machine those physical addresses have to be supported in order to be able to use any memory on the machine even if the guest itself does not support 5 level paging. So when reading/writing a MFN from/to a pte don't use the kernel's PTE_PFN_MASK but a new XEN_PTE_MFN_MASK allowing full 40 bit wide MFNs. Signed-off-by: Juergen Gross <jgross@suse.com> Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Diffstat (limited to 'arch/x86/include/asm/xen')
-rw-r--r--arch/x86/include/asm/xen/page.h11
1 files changed, 10 insertions, 1 deletions
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 07b6531813c4..90e91003fd9d 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -26,6 +26,15 @@ typedef struct xpaddr {
phys_addr_t paddr;
} xpaddr_t;
+#ifdef CONFIG_X86_64
+#define XEN_PHYSICAL_MASK __sme_clr((1UL << 52) - 1)
+#else
+#define XEN_PHYSICAL_MASK __PHYSICAL_MASK
+#endif
+
+#define XEN_PTE_MFN_MASK ((pteval_t)(((signed long)PAGE_MASK) & \
+ XEN_PHYSICAL_MASK))
+
#define XMADDR(x) ((xmaddr_t) { .maddr = (x) })
#define XPADDR(x) ((xpaddr_t) { .paddr = (x) })
@@ -277,7 +286,7 @@ static inline unsigned long bfn_to_local_pfn(unsigned long mfn)
static inline unsigned long pte_mfn(pte_t pte)
{
- return (pte.pte & PTE_PFN_MASK) >> PAGE_SHIFT;
+ return (pte.pte & XEN_PTE_MFN_MASK) >> PAGE_SHIFT;
}
static inline pte_t mfn_pte(unsigned long page_nr, pgprot_t pgprot)