aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/pat.c
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2018-07-13 21:50:27 -0700
committerDave Jiang <dave.jiang@intel.com>2018-08-20 09:22:45 -0700
commit510ee090abc3dbc862bd35f5f3d8b7284a9117b4 (patch)
treed2df0a6b7bc544752629f55255c8823dc3884d90 /arch/x86/mm/pat.c
parentmm, memory_failure: Teach memory_failure() about dev_pagemap pages (diff)
downloadlinux-dev-510ee090abc3dbc862bd35f5f3d8b7284a9117b4.tar.xz
linux-dev-510ee090abc3dbc862bd35f5f3d8b7284a9117b4.zip
x86/mm/pat: Prepare {reserve, free}_memtype() for "decoy" addresses
In preparation for using set_memory_uc() instead set_memory_np() for isolating poison from speculation, teach the memtype code to sanitize physical addresses vs __PHYSICAL_MASK. The motivation for using set_memory_uc() for this case is to allow ongoing access to persistent memory pages via the pmem-driver + memcpy_mcsafe() until the poison is repaired. Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Borislav Petkov <bp@alien8.de> Cc: <linux-edac@vger.kernel.org> Cc: <x86@kernel.org> Signed-off-by: Dan Williams <dan.j.williams@intel.com> Acked-by: Ingo Molnar <mingo@redhat.com> Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Diffstat (limited to 'arch/x86/mm/pat.c')
-rw-r--r--arch/x86/mm/pat.c16
1 files changed, 16 insertions, 0 deletions
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 1555bd7d3449..3d0c83ef6aab 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -512,6 +512,17 @@ static int free_ram_pages_type(u64 start, u64 end)
return 0;
}
+static u64 sanitize_phys(u64 address)
+{
+ /*
+ * When changing the memtype for pages containing poison allow
+ * for a "decoy" virtual address (bit 63 clear) passed to
+ * set_memory_X(). __pa() on a "decoy" address results in a
+ * physical address with bit 63 set.
+ */
+ return address & __PHYSICAL_MASK;
+}
+
/*
* req_type typically has one of the:
* - _PAGE_CACHE_MODE_WB
@@ -533,6 +544,8 @@ int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type,
int is_range_ram;
int err = 0;
+ start = sanitize_phys(start);
+ end = sanitize_phys(end);
BUG_ON(start >= end); /* end is exclusive */
if (!pat_enabled()) {
@@ -609,6 +622,9 @@ int free_memtype(u64 start, u64 end)
if (!pat_enabled())
return 0;
+ start = sanitize_phys(start);
+ end = sanitize_phys(end);
+
/* Low ISA region is always mapped WB. No need to track */
if (x86_platform.is_untracked_pat_range(start, end))
return 0;