From 16ebf031e8ab73779a382c9f2b097891da6af923 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 3 Dec 2018 18:03:46 +0100 Subject: x86/mm/cpa: Add __cpa_addr() helper The code to compute the virtual address of a cpa_data is duplicated; introduce a helper before more copies happen. Signed-off-by: Peter Zijlstra (Intel) Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Dave Hansen Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: Thomas Gleixner Cc: Tom.StDenis@amd.com Cc: dave.hansen@intel.com Link: http://lkml.kernel.org/r/20181203171043.229119497@infradead.org Signed-off-by: Ingo Molnar --- arch/x86/mm/pageattr.c | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) (limited to 'arch/x86/mm/pageattr.c') diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index a1bcde35db4c..6e6900ebea30 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -228,6 +228,23 @@ static bool __cpa_pfn_in_highmap(unsigned long pfn) #endif +static unsigned long __cpa_addr(struct cpa_data *cpa, int idx) +{ + if (cpa->flags & CPA_PAGES_ARRAY) { + struct page *page = cpa->pages[idx]; + + if (unlikely(PageHighMem(page))) + return 0; + + return (unsigned long)page_address(page); + } + + if (cpa->flags & CPA_ARRAY) + return cpa->vaddr[idx]; + + return *cpa->vaddr; +} + /* * Flushing functions */ @@ -1476,15 +1493,7 @@ static int __change_page_attr(struct cpa_data *cpa, int primary) unsigned int level; pte_t *kpte, old_pte; - if (cpa->flags & CPA_PAGES_ARRAY) { - struct page *page = cpa->pages[cpa->curpage]; - if (unlikely(PageHighMem(page))) - return 0; - address = (unsigned long)page_address(page); - } else if (cpa->flags & CPA_ARRAY) - address = cpa->vaddr[cpa->curpage]; - else - address = *cpa->vaddr; + address = __cpa_addr(cpa, cpa->curpage); repeat: kpte = _lookup_address_cpa(cpa, address, &level); if (!kpte) @@ -1565,16 +1574,7 @@ static int cpa_process_alias(struct cpa_data *cpa) * No need to redo, when the primary call touched the direct * mapping already: */ - if (cpa->flags & CPA_PAGES_ARRAY) { - struct page *page = cpa->pages[cpa->curpage]; - if (unlikely(PageHighMem(page))) - return 0; - vaddr = (unsigned long)page_address(page); - } else if (cpa->flags & CPA_ARRAY) - vaddr = cpa->vaddr[cpa->curpage]; - else - vaddr = *cpa->vaddr; - + vaddr = __cpa_addr(cpa, cpa->curpage); if (!(within(vaddr, PAGE_OFFSET, PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) { -- cgit v1.2.3-59-g8ed1b From 98bfc9b038cde1ce108f69a50720e394fe774cb7 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 3 Dec 2018 18:03:47 +0100 Subject: x86/mm/cpa: Make cpa_data::vaddr invariant Currently __change_page_attr_set_clr() will modify cpa->vaddr when !(CPA_ARRAY | CPA_PAGES_ARRAY), whereas in the array cases it will increment cpa->curpage. Change __cpa_addr() such that its @idx argument also works in the !array case and use cpa->curpage increments for all cases. NOTE: since cpa_data::numpages is 'unsigned long' so should cpa_data::curpage be. NOTE: after this only cpa->numpages is still modified. Signed-off-by: Peter Zijlstra (Intel) Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Dave Hansen Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: Thomas Gleixner Cc: Tom.StDenis@amd.com Cc: dave.hansen@intel.com Link: http://lkml.kernel.org/r/20181203171043.295174892@infradead.org Signed-off-by: Ingo Molnar --- arch/x86/mm/pageattr.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) (limited to 'arch/x86/mm/pageattr.c') diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 6e6900ebea30..ce8af3f08628 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -35,11 +35,11 @@ struct cpa_data { pgprot_t mask_set; pgprot_t mask_clr; unsigned long numpages; - int flags; + unsigned long curpage; unsigned long pfn; - unsigned force_split : 1, + unsigned int flags; + unsigned int force_split : 1, force_static_prot : 1; - int curpage; struct page **pages; }; @@ -228,7 +228,7 @@ static bool __cpa_pfn_in_highmap(unsigned long pfn) #endif -static unsigned long __cpa_addr(struct cpa_data *cpa, int idx) +static unsigned long __cpa_addr(struct cpa_data *cpa, unsigned long idx) { if (cpa->flags & CPA_PAGES_ARRAY) { struct page *page = cpa->pages[idx]; @@ -242,7 +242,7 @@ static unsigned long __cpa_addr(struct cpa_data *cpa, int idx) if (cpa->flags & CPA_ARRAY) return cpa->vaddr[idx]; - return *cpa->vaddr; + return *cpa->vaddr + idx * PAGE_SIZE; } /* @@ -1581,6 +1581,7 @@ static int cpa_process_alias(struct cpa_data *cpa) alias_cpa = *cpa; alias_cpa.vaddr = &laddr; alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); + alias_cpa.curpage = 0; ret = __change_page_attr_set_clr(&alias_cpa, 0); if (ret) @@ -1600,6 +1601,7 @@ static int cpa_process_alias(struct cpa_data *cpa) alias_cpa = *cpa; alias_cpa.vaddr = &temp_cpa_vaddr; alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); + alias_cpa.curpage = 0; /* * The high mapping range is imprecise, so ignore the @@ -1648,11 +1650,7 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) */ BUG_ON(cpa->numpages > numpages || !cpa->numpages); numpages -= cpa->numpages; - if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) - cpa->curpage++; - else - *cpa->vaddr += cpa->numpages * PAGE_SIZE; - + cpa->curpage += cpa->numpages; } return 0; } -- cgit v1.2.3-59-g8ed1b From 5fe26b7a8f4693d532c7a3c3632e47e7d7016238 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 3 Dec 2018 18:03:48 +0100 Subject: x86/mm/cpa: Simplify the code after making cpa->vaddr invariant Since cpa->vaddr is invariant, this means we can remove all workarounds that deal with it changing. Signed-off-by: Peter Zijlstra (Intel) Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Dave Hansen Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: Thomas Gleixner Cc: Tom.StDenis@amd.com Cc: dave.hansen@intel.com Link: http://lkml.kernel.org/r/20181203171043.366619025@infradead.org Signed-off-by: Ingo Molnar --- arch/x86/mm/pageattr-test.c | 7 ++----- arch/x86/mm/pageattr.c | 13 ++++--------- 2 files changed, 6 insertions(+), 14 deletions(-) (limited to 'arch/x86/mm/pageattr.c') diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c index b6b6468530f1..facce271e8b9 100644 --- a/arch/x86/mm/pageattr-test.c +++ b/arch/x86/mm/pageattr-test.c @@ -124,7 +124,6 @@ static int pageattr_test(void) unsigned int level; int i, k; int err; - unsigned long test_addr; if (print) printk(KERN_INFO "CPA self-test:\n"); @@ -181,8 +180,7 @@ static int pageattr_test(void) switch (i % 3) { case 0: - test_addr = addr[i]; - err = change_page_attr_set(&test_addr, len[i], PAGE_CPA_TEST, 0); + err = change_page_attr_set(&addr[i], len[i], PAGE_CPA_TEST, 0); break; case 1: @@ -226,8 +224,7 @@ static int pageattr_test(void) failed++; continue; } - test_addr = addr[i]; - err = change_page_attr_clear(&test_addr, len[i], PAGE_CPA_TEST, 0); + err = change_page_attr_clear(&addr[i], len[i], PAGE_CPA_TEST, 0); if (err < 0) { printk(KERN_ERR "CPA reverting failed: %d\n", err); failed++; diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index ce8af3f08628..afa98b7b6050 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -1908,15 +1908,13 @@ EXPORT_SYMBOL_GPL(set_memory_array_wt); int _set_memory_wc(unsigned long addr, int numpages) { int ret; - unsigned long addr_copy = addr; ret = change_page_attr_set(&addr, numpages, cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS), 0); if (!ret) { - ret = change_page_attr_set_clr(&addr_copy, numpages, - cachemode2pgprot( - _PAGE_CACHE_MODE_WC), + ret = change_page_attr_set_clr(&addr, numpages, + cachemode2pgprot(_PAGE_CACHE_MODE_WC), __pgprot(_PAGE_CACHE_MASK), 0, 0, NULL); } @@ -2064,7 +2062,6 @@ int set_memory_global(unsigned long addr, int numpages) static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc) { struct cpa_data cpa; - unsigned long start; int ret; /* Nothing to do if memory encryption is not active */ @@ -2075,8 +2072,6 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc) if (WARN_ONCE(addr & ~PAGE_MASK, "misaligned address: %#lx\n", addr)) addr &= PAGE_MASK; - start = addr; - memset(&cpa, 0, sizeof(cpa)); cpa.vaddr = &addr; cpa.numpages = numpages; @@ -2091,7 +2086,7 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc) /* * Before changing the encryption attribute, we need to flush caches. */ - cpa_flush_range(start, numpages, 1); + cpa_flush_range(addr, numpages, 1); ret = __change_page_attr_set_clr(&cpa, 1); @@ -2102,7 +2097,7 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc) * in case TLB flushing gets optimized in the cpa_flush_range() * path use the same logic as above. */ - cpa_flush_range(start, numpages, 0); + cpa_flush_range(addr, numpages, 0); return ret; } -- cgit v1.2.3-59-g8ed1b From 935f5839827ef54b53406e80906f7c355eb73c1b Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 3 Dec 2018 18:03:49 +0100 Subject: x86/mm/cpa: Optimize cpa_flush_array() TLB invalidation Instead of punting and doing tlb_flush_all(), do the same as flush_tlb_kernel_range() does and use single page invalidations. Signed-off-by: Peter Zijlstra (Intel) Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Dave Hansen Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: Thomas Gleixner Cc: Tom.StDenis@amd.com Cc: dave.hansen@intel.com Link: http://lkml.kernel.org/r/20181203171043.430001980@infradead.org Signed-off-by: Ingo Molnar --- arch/x86/mm/mm_internal.h | 2 ++ arch/x86/mm/pageattr.c | 42 ++++++++++++++++++++++++------------------ arch/x86/mm/tlb.c | 4 +++- 3 files changed, 29 insertions(+), 19 deletions(-) (limited to 'arch/x86/mm/pageattr.c') diff --git a/arch/x86/mm/mm_internal.h b/arch/x86/mm/mm_internal.h index 4e1f6e1b8159..319bde386d5f 100644 --- a/arch/x86/mm/mm_internal.h +++ b/arch/x86/mm/mm_internal.h @@ -19,4 +19,6 @@ extern int after_bootmem; void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache); +extern unsigned long tlb_single_page_flush_ceiling; + #endif /* __X86_MM_INTERNAL_H */ diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index afa98b7b6050..351874259a71 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -26,6 +26,8 @@ #include #include +#include "mm_internal.h" + /* * The current flushing context - we pass it instead of 5 arguments: */ @@ -346,16 +348,26 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache) } } -static void cpa_flush_array(unsigned long baddr, unsigned long *start, - int numpages, int cache, - int in_flags, struct page **pages) +void __cpa_flush_array(void *data) { - unsigned int i, level; + struct cpa_data *cpa = data; + unsigned int i; - if (__inv_flush_all(cache)) + for (i = 0; i < cpa->numpages; i++) + __flush_tlb_one_kernel(__cpa_addr(cpa, i)); +} + +static void cpa_flush_array(struct cpa_data *cpa, int cache) +{ + unsigned int i; + + if (cpa_check_flush_all(cache)) return; - flush_tlb_all(); + if (cpa->numpages <= tlb_single_page_flush_ceiling) + on_each_cpu(__cpa_flush_array, cpa, 1); + else + flush_tlb_all(); if (!cache) return; @@ -366,15 +378,11 @@ static void cpa_flush_array(unsigned long baddr, unsigned long *start, * will cause all other CPUs to flush the same * cachelines: */ - for (i = 0; i < numpages; i++) { - unsigned long addr; + for (i = 0; i < cpa->numpages; i++) { + unsigned long addr = __cpa_addr(cpa, i); + unsigned int level; pte_t *pte; - if (in_flags & CPA_PAGES_ARRAY) - addr = (unsigned long)page_address(pages[i]); - else - addr = start[i]; - pte = lookup_address(addr, &level); /* @@ -1771,12 +1779,10 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, goto out; } - if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) { - cpa_flush_array(baddr, addr, numpages, cache, - cpa.flags, pages); - } else { + if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) + cpa_flush_array(&cpa, cache); + else cpa_flush_range(baddr, numpages, cache); - } out: return ret; diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 03b6b4c2238d..999d6d8f0bef 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -15,6 +15,8 @@ #include #include +#include "mm_internal.h" + /* * TLB flushing, formerly SMP-only * c/o Linus Torvalds. @@ -721,7 +723,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask, * * This is in units of pages. */ -static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33; +unsigned long tlb_single_page_flush_ceiling __read_mostly = 33; void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned int stride_shift, -- cgit v1.2.3-59-g8ed1b From 83b4e39146aa70913580966e0f2b78b7c3492760 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 3 Dec 2018 18:03:50 +0100 Subject: x86/mm/cpa: Make cpa_data::numpages invariant Make sure __change_page_attr_set_clr() doesn't modify cpa->numpages. Signed-off-by: Peter Zijlstra (Intel) Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Dave Hansen Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: Thomas Gleixner Cc: Tom.StDenis@amd.com Cc: dave.hansen@intel.com Link: http://lkml.kernel.org/r/20181203171043.493000228@infradead.org Signed-off-by: Ingo Molnar --- arch/x86/mm/pageattr.c | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) (limited to 'arch/x86/mm/pageattr.c') diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 351874259a71..12b69263e501 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -1625,14 +1625,15 @@ static int cpa_process_alias(struct cpa_data *cpa) static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) { unsigned long numpages = cpa->numpages; - int ret; + unsigned long rempages = numpages; + int ret = 0; - while (numpages) { + while (rempages) { /* * Store the remaining nr of pages for the large page * preservation check. */ - cpa->numpages = numpages; + cpa->numpages = rempages; /* for array changes, we can't use large page */ if (cpa->flags & (CPA_ARRAY | CPA_PAGES_ARRAY)) cpa->numpages = 1; @@ -1643,12 +1644,12 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) if (!debug_pagealloc_enabled()) spin_unlock(&cpa_lock); if (ret) - return ret; + goto out; if (checkalias) { ret = cpa_process_alias(cpa); if (ret) - return ret; + goto out; } /* @@ -1656,11 +1657,15 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) * CPA operation. Either a large page has been * preserved or a single page update happened. */ - BUG_ON(cpa->numpages > numpages || !cpa->numpages); - numpages -= cpa->numpages; + BUG_ON(cpa->numpages > rempages || !cpa->numpages); + rempages -= cpa->numpages; cpa->curpage += cpa->numpages; } - return 0; + +out: + /* Restore the original numpages */ + cpa->numpages = numpages; + return ret; } /* -- cgit v1.2.3-59-g8ed1b From fe0937b24ff5d7b343b9922201e469f9a6009d9d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 3 Dec 2018 18:03:51 +0100 Subject: x86/mm/cpa: Fold cpa_flush_range() and cpa_flush_array() into a single cpa_flush() function Note that the cache flush loop in cpa_flush_*() is identical when we use __cpa_addr(); further observe that flush_tlb_kernel_range() is a special case of to the cpa_flush_array() TLB invalidation code. This then means the two functions are virtually identical. Fold these two functions into a single cpa_flush() call. Signed-off-by: Peter Zijlstra (Intel) Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Dave Hansen Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: Thomas Gleixner Cc: Tom.StDenis@amd.com Cc: dave.hansen@intel.com Link: http://lkml.kernel.org/r/20181203171043.559855600@infradead.org Signed-off-by: Ingo Molnar --- arch/x86/mm/pageattr.c | 92 ++++++++++---------------------------------------- 1 file changed, 18 insertions(+), 74 deletions(-) (limited to 'arch/x86/mm/pageattr.c') diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 12b69263e501..85ef53b86fa0 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -304,51 +304,7 @@ static void cpa_flush_all(unsigned long cache) on_each_cpu(__cpa_flush_all, (void *) cache, 1); } -static bool __inv_flush_all(int cache) -{ - BUG_ON(irqs_disabled() && !early_boot_irqs_disabled); - - if (cache && !static_cpu_has(X86_FEATURE_CLFLUSH)) { - cpa_flush_all(cache); - return true; - } - - return false; -} - -static void cpa_flush_range(unsigned long start, int numpages, int cache) -{ - unsigned int i, level; - unsigned long addr; - - WARN_ON(PAGE_ALIGN(start) != start); - - if (__inv_flush_all(cache)) - return; - - flush_tlb_kernel_range(start, start + PAGE_SIZE * numpages); - - if (!cache) - return; - - /* - * We only need to flush on one CPU, - * clflush is a MESI-coherent instruction that - * will cause all other CPUs to flush the same - * cachelines: - */ - for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) { - pte_t *pte = lookup_address(addr, &level); - - /* - * Only flush present addresses: - */ - if (pte && (pte_val(*pte) & _PAGE_PRESENT)) - clflush_cache_range((void *) addr, PAGE_SIZE); - } -} - -void __cpa_flush_array(void *data) +void __cpa_flush_tlb(void *data) { struct cpa_data *cpa = data; unsigned int i; @@ -357,33 +313,31 @@ void __cpa_flush_array(void *data) __flush_tlb_one_kernel(__cpa_addr(cpa, i)); } -static void cpa_flush_array(struct cpa_data *cpa, int cache) +static void cpa_flush(struct cpa_data *data, int cache) { + struct cpa_data *cpa = data; unsigned int i; - if (cpa_check_flush_all(cache)) + BUG_ON(irqs_disabled() && !early_boot_irqs_disabled); + + if (cache && !static_cpu_has(X86_FEATURE_CLFLUSH)) { + cpa_flush_all(cache); return; + } if (cpa->numpages <= tlb_single_page_flush_ceiling) - on_each_cpu(__cpa_flush_array, cpa, 1); + on_each_cpu(__cpa_flush_tlb, cpa, 1); else flush_tlb_all(); if (!cache) return; - /* - * We only need to flush on one CPU, - * clflush is a MESI-coherent instruction that - * will cause all other CPUs to flush the same - * cachelines: - */ for (i = 0; i < cpa->numpages; i++) { unsigned long addr = __cpa_addr(cpa, i); unsigned int level; - pte_t *pte; - pte = lookup_address(addr, &level); + pte_t *pte = lookup_address(addr, &level); /* * Only flush present addresses: @@ -1698,7 +1652,6 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, { struct cpa_data cpa; int ret, cache, checkalias; - unsigned long baddr = 0; memset(&cpa, 0, sizeof(cpa)); @@ -1732,11 +1685,6 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, */ WARN_ON_ONCE(1); } - /* - * Save address for cache flush. *addr is modified in the call - * to __change_page_attr_set_clr() below. - */ - baddr = make_addr_canonical_again(*addr); } /* Must avoid aliasing mappings in the highmem code */ @@ -1784,11 +1732,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, goto out; } - if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) - cpa_flush_array(&cpa, cache); - else - cpa_flush_range(baddr, numpages, cache); - + cpa_flush(&cpa, cache); out: return ret; } @@ -2097,18 +2041,18 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc) /* * Before changing the encryption attribute, we need to flush caches. */ - cpa_flush_range(addr, numpages, 1); + cpa_flush(&cpa, 1); ret = __change_page_attr_set_clr(&cpa, 1); /* - * After changing the encryption attribute, we need to flush TLBs - * again in case any speculative TLB caching occurred (but no need - * to flush caches again). We could just use cpa_flush_all(), but - * in case TLB flushing gets optimized in the cpa_flush_range() - * path use the same logic as above. + * After changing the encryption attribute, we need to flush TLBs again + * in case any speculative TLB caching occurred (but no need to flush + * caches again). We could just use cpa_flush_all(), but in case TLB + * flushing gets optimized in the cpa_flush() path use the same logic + * as above. */ - cpa_flush_range(addr, numpages, 0); + cpa_flush(&cpa, 0); return ret; } -- cgit v1.2.3-59-g8ed1b From c38116bb940ae37f51fccd315b420ee5961dcb76 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 3 Dec 2018 18:03:52 +0100 Subject: x86/mm/cpa: Better use CLFLUSHOPT Currently we issue an MFENCE before and after flushing a range. This means that if we flush a bunch of single page ranges -- like with the cpa array, we issue a whole bunch of superfluous MFENCEs. Reorgainze the code a little to avoid this. [ mingo: capitalize instructions, tweak changelog and comments. ] Signed-off-by: Peter Zijlstra (Intel) Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Dave Hansen Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: Thomas Gleixner Cc: Tom.StDenis@amd.com Cc: dave.hansen@intel.com Link: http://lkml.kernel.org/r/20181203171043.626999883@infradead.org Signed-off-by: Ingo Molnar --- arch/x86/mm/pageattr.c | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) (limited to 'arch/x86/mm/pageattr.c') diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 85ef53b86fa0..7d05149995dc 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -251,15 +251,7 @@ static unsigned long __cpa_addr(struct cpa_data *cpa, unsigned long idx) * Flushing functions */ -/** - * clflush_cache_range - flush a cache range with clflush - * @vaddr: virtual start address - * @size: number of bytes to flush - * - * clflushopt is an unordered instruction which needs fencing with mfence or - * sfence to avoid ordering issues. - */ -void clflush_cache_range(void *vaddr, unsigned int size) +static void clflush_cache_range_opt(void *vaddr, unsigned int size) { const unsigned long clflush_size = boot_cpu_data.x86_clflush_size; void *p = (void *)((unsigned long)vaddr & ~(clflush_size - 1)); @@ -268,11 +260,22 @@ void clflush_cache_range(void *vaddr, unsigned int size) if (p >= vend) return; - mb(); - for (; p < vend; p += clflush_size) clflushopt(p); +} +/** + * clflush_cache_range - flush a cache range with clflush + * @vaddr: virtual start address + * @size: number of bytes to flush + * + * CLFLUSHOPT is an unordered instruction which needs fencing with MFENCE or + * SFENCE to avoid ordering issues. + */ +void clflush_cache_range(void *vaddr, unsigned int size) +{ + mb(); + clflush_cache_range_opt(vaddr, size); mb(); } EXPORT_SYMBOL_GPL(clflush_cache_range); @@ -333,6 +336,7 @@ static void cpa_flush(struct cpa_data *data, int cache) if (!cache) return; + mb(); for (i = 0; i < cpa->numpages; i++) { unsigned long addr = __cpa_addr(cpa, i); unsigned int level; @@ -343,8 +347,9 @@ static void cpa_flush(struct cpa_data *data, int cache) * Only flush present addresses: */ if (pte && (pte_val(*pte) & _PAGE_PRESENT)) - clflush_cache_range((void *)addr, PAGE_SIZE); + clflush_cache_range_opt((void *)addr, PAGE_SIZE); } + mb(); } static bool overlaps(unsigned long r1_start, unsigned long r1_end, -- cgit v1.2.3-59-g8ed1b From 3c567356dbe0da4fc310cfcffafc39526e1ca43a Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 3 Dec 2018 18:03:53 +0100 Subject: x86/mm/cpa: Rename @addrinarray to @numpages The CPA_ARRAY interface works in single pages, and everything, except in these 'few' locations is this variable called 'numpages'. Remove this 'addrinarray' abberation and use 'numpages' consistently. Signed-off-by: Peter Zijlstra (Intel) Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Dave Hansen Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rik van Riel Cc: Thomas Gleixner Cc: Tom.StDenis@amd.com Cc: dave.hansen@intel.com Link: http://lkml.kernel.org/r/20181203171043.695039210@infradead.org Signed-off-by: Ingo Molnar --- arch/x86/mm/pageattr.c | 52 +++++++++++++++++++++++++------------------------- 1 file changed, 26 insertions(+), 26 deletions(-) (limited to 'arch/x86/mm/pageattr.c') diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 7d05149995dc..df4340c8e293 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -1808,14 +1808,14 @@ out_err: } EXPORT_SYMBOL(set_memory_uc); -static int _set_memory_array(unsigned long *addr, int addrinarray, +static int _set_memory_array(unsigned long *addr, int numpages, enum page_cache_mode new_type) { enum page_cache_mode set_type; int i, j; int ret; - for (i = 0; i < addrinarray; i++) { + for (i = 0; i < numpages; i++) { ret = reserve_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE, new_type, NULL); if (ret) @@ -1826,11 +1826,11 @@ static int _set_memory_array(unsigned long *addr, int addrinarray, set_type = (new_type == _PAGE_CACHE_MODE_WC) ? _PAGE_CACHE_MODE_UC_MINUS : new_type; - ret = change_page_attr_set(addr, addrinarray, + ret = change_page_attr_set(addr, numpages, cachemode2pgprot(set_type), 1); if (!ret && new_type == _PAGE_CACHE_MODE_WC) - ret = change_page_attr_set_clr(addr, addrinarray, + ret = change_page_attr_set_clr(addr, numpages, cachemode2pgprot( _PAGE_CACHE_MODE_WC), __pgprot(_PAGE_CACHE_MASK), @@ -1847,21 +1847,21 @@ out_free: return ret; } -int set_memory_array_uc(unsigned long *addr, int addrinarray) +int set_memory_array_uc(unsigned long *addr, int numpages) { - return _set_memory_array(addr, addrinarray, _PAGE_CACHE_MODE_UC_MINUS); + return _set_memory_array(addr, numpages, _PAGE_CACHE_MODE_UC_MINUS); } EXPORT_SYMBOL(set_memory_array_uc); -int set_memory_array_wc(unsigned long *addr, int addrinarray) +int set_memory_array_wc(unsigned long *addr, int numpages) { - return _set_memory_array(addr, addrinarray, _PAGE_CACHE_MODE_WC); + return _set_memory_array(addr, numpages, _PAGE_CACHE_MODE_WC); } EXPORT_SYMBOL(set_memory_array_wc); -int set_memory_array_wt(unsigned long *addr, int addrinarray) +int set_memory_array_wt(unsigned long *addr, int numpages) { - return _set_memory_array(addr, addrinarray, _PAGE_CACHE_MODE_WT); + return _set_memory_array(addr, numpages, _PAGE_CACHE_MODE_WT); } EXPORT_SYMBOL_GPL(set_memory_array_wt); @@ -1941,18 +1941,18 @@ int set_memory_wb(unsigned long addr, int numpages) } EXPORT_SYMBOL(set_memory_wb); -int set_memory_array_wb(unsigned long *addr, int addrinarray) +int set_memory_array_wb(unsigned long *addr, int numpages) { int i; int ret; /* WB cache mode is hard wired to all cache attribute bits being 0 */ - ret = change_page_attr_clear(addr, addrinarray, + ret = change_page_attr_clear(addr, numpages, __pgprot(_PAGE_CACHE_MASK), 1); if (ret) return ret; - for (i = 0; i < addrinarray; i++) + for (i = 0; i < numpages; i++) free_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE); return 0; @@ -2082,7 +2082,7 @@ int set_pages_uc(struct page *page, int numpages) } EXPORT_SYMBOL(set_pages_uc); -static int _set_pages_array(struct page **pages, int addrinarray, +static int _set_pages_array(struct page **pages, int numpages, enum page_cache_mode new_type) { unsigned long start; @@ -2092,7 +2092,7 @@ static int _set_pages_array(struct page **pages, int addrinarray, int free_idx; int ret; - for (i = 0; i < addrinarray; i++) { + for (i = 0; i < numpages; i++) { if (PageHighMem(pages[i])) continue; start = page_to_pfn(pages[i]) << PAGE_SHIFT; @@ -2105,10 +2105,10 @@ static int _set_pages_array(struct page **pages, int addrinarray, set_type = (new_type == _PAGE_CACHE_MODE_WC) ? _PAGE_CACHE_MODE_UC_MINUS : new_type; - ret = cpa_set_pages_array(pages, addrinarray, + ret = cpa_set_pages_array(pages, numpages, cachemode2pgprot(set_type)); if (!ret && new_type == _PAGE_CACHE_MODE_WC) - ret = change_page_attr_set_clr(NULL, addrinarray, + ret = change_page_attr_set_clr(NULL, numpages, cachemode2pgprot( _PAGE_CACHE_MODE_WC), __pgprot(_PAGE_CACHE_MASK), @@ -2128,21 +2128,21 @@ err_out: return -EINVAL; } -int set_pages_array_uc(struct page **pages, int addrinarray) +int set_pages_array_uc(struct page **pages, int numpages) { - return _set_pages_array(pages, addrinarray, _PAGE_CACHE_MODE_UC_MINUS); + return _set_pages_array(pages, numpages, _PAGE_CACHE_MODE_UC_MINUS); } EXPORT_SYMBOL(set_pages_array_uc); -int set_pages_array_wc(struct page **pages, int addrinarray) +int set_pages_array_wc(struct page **pages, int numpages) { - return _set_pages_array(pages, addrinarray, _PAGE_CACHE_MODE_WC); + return _set_pages_array(pages, numpages, _PAGE_CACHE_MODE_WC); } EXPORT_SYMBOL(set_pages_array_wc); -int set_pages_array_wt(struct page **pages, int addrinarray) +int set_pages_array_wt(struct page **pages, int numpages) { - return _set_pages_array(pages, addrinarray, _PAGE_CACHE_MODE_WT); + return _set_pages_array(pages, numpages, _PAGE_CACHE_MODE_WT); } EXPORT_SYMBOL_GPL(set_pages_array_wt); @@ -2154,7 +2154,7 @@ int set_pages_wb(struct page *page, int numpages) } EXPORT_SYMBOL(set_pages_wb); -int set_pages_array_wb(struct page **pages, int addrinarray) +int set_pages_array_wb(struct page **pages, int numpages) { int retval; unsigned long start; @@ -2162,12 +2162,12 @@ int set_pages_array_wb(struct page **pages, int addrinarray) int i; /* WB cache mode is hard wired to all cache attribute bits being 0 */ - retval = cpa_clear_pages_array(pages, addrinarray, + retval = cpa_clear_pages_array(pages, numpages, __pgprot(_PAGE_CACHE_MASK)); if (retval) return retval; - for (i = 0; i < addrinarray; i++) { + for (i = 0; i < numpages; i++) { if (PageHighMem(pages[i])) continue; start = page_to_pfn(pages[i]) << PAGE_SHIFT; -- cgit v1.2.3-59-g8ed1b