From 1e69a0efc0bd0e02b8327e7186fbb4a81878ea0b Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 6 Dec 2019 12:50:16 +0100 Subject: perf/x86: Fix potential out-of-bounds access UBSAN reported out-of-bound accesses for x86_pmu.event_map(), it's arguments should be < x86_pmu.max_events. Make sure all users observe this constraint. Reported-by: Meelis Roos Signed-off-by: Peter Zijlstra (Intel) Tested-by: Meelis Roos --- arch/x86/events/core.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 9a89d98c55bd..84fe1becbe26 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -1642,9 +1642,12 @@ static struct attribute_group x86_pmu_format_group __ro_after_init = { ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, char *page) { - struct perf_pmu_events_attr *pmu_attr = \ + struct perf_pmu_events_attr *pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); - u64 config = x86_pmu.event_map(pmu_attr->id); + u64 config = 0; + + if (pmu_attr->id < x86_pmu.max_events) + config = x86_pmu.event_map(pmu_attr->id); /* string trumps id */ if (pmu_attr->event_str) @@ -1713,6 +1716,9 @@ is_visible(struct kobject *kobj, struct attribute *attr, int idx) { struct perf_pmu_events_attr *pmu_attr; + if (idx >= x86_pmu.max_events) + return 0; + pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr); /* str trumps id */ return pmu_attr->event_str || x86_pmu.event_map(idx) ? attr->mode : 0; -- cgit v1.2.3-59-g8ed1b From ff61541cc6c1962957758ba433c574b76f588d23 Mon Sep 17 00:00:00 2001 From: Alexander Shishkin Date: Thu, 5 Dec 2019 17:28:52 +0300 Subject: perf/x86/intel/bts: Fix the use of page_private() Commit 8062382c8dbe2 ("perf/x86/intel/bts: Add BTS PMU driver") brought in a warning with the BTS buffer initialization that is easily tripped with (assuming KPTI is disabled): instantly throwing: > ------------[ cut here ]------------ > WARNING: CPU: 2 PID: 326 at arch/x86/events/intel/bts.c:86 bts_buffer_setup_aux+0x117/0x3d0 > Modules linked in: > CPU: 2 PID: 326 Comm: perf Not tainted 5.4.0-rc8-00291-gceb9e77324fa #904 > RIP: 0010:bts_buffer_setup_aux+0x117/0x3d0 > Call Trace: > rb_alloc_aux+0x339/0x550 > perf_mmap+0x607/0xc70 > mmap_region+0x76b/0xbd0 ... It appears to assume (for lost raisins) that PagePrivate() is set, while later it actually tests for PagePrivate() before using page_private(). Make it consistent and always check PagePrivate() before using page_private(). Fixes: 8062382c8dbe2 ("perf/x86/intel/bts: Add BTS PMU driver") Signed-off-by: Alexander Shishkin Signed-off-by: Peter Zijlstra (Intel) Cc: Jiri Olsa Cc: Vince Weaver Cc: Ingo Molnar Cc: Arnaldo Carvalho de Melo Link: https://lkml.kernel.org/r/20191205142853.28894-2-alexander.shishkin@linux.intel.com --- arch/x86/events/intel/bts.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c index 38de4a7f6752..6a3b599ee0fe 100644 --- a/arch/x86/events/intel/bts.c +++ b/arch/x86/events/intel/bts.c @@ -63,9 +63,17 @@ struct bts_buffer { static struct pmu bts_pmu; +static int buf_nr_pages(struct page *page) +{ + if (!PagePrivate(page)) + return 1; + + return 1 << page_private(page); +} + static size_t buf_size(struct page *page) { - return 1 << (PAGE_SHIFT + page_private(page)); + return buf_nr_pages(page) * PAGE_SIZE; } static void * @@ -83,9 +91,7 @@ bts_buffer_setup_aux(struct perf_event *event, void **pages, /* count all the high order buffers */ for (pg = 0, nbuf = 0; pg < nr_pages;) { page = virt_to_page(pages[pg]); - if (WARN_ON_ONCE(!PagePrivate(page) && nr_pages > 1)) - return NULL; - pg += 1 << page_private(page); + pg += buf_nr_pages(page); nbuf++; } @@ -109,7 +115,7 @@ bts_buffer_setup_aux(struct perf_event *event, void **pages, unsigned int __nr_pages; page = virt_to_page(pages[pg]); - __nr_pages = PagePrivate(page) ? 1 << page_private(page) : 1; + __nr_pages = buf_nr_pages(page); buf->buf[nbuf].page = page; buf->buf[nbuf].offset = offset; buf->buf[nbuf].displacement = (pad ? BTS_RECORD_SIZE - pad : 0); -- cgit v1.2.3-59-g8ed1b From 92ca7da4bdc24d63bb0bcd241c11441ddb63b80a Mon Sep 17 00:00:00 2001 From: Alexander Shishkin Date: Tue, 10 Dec 2019 12:51:01 +0200 Subject: perf/x86/intel: Fix PT PMI handling Commit: ccbebba4c6bf ("perf/x86/intel/pt: Bypass PT vs. LBR exclusivity if the core supports it") skips the PT/LBR exclusivity check on CPUs where PT and LBRs coexist, but also inadvertently skips the active_events bump for PT in that case, which is a bug. If there aren't any hardware events at the same time as PT, the PMI handler will ignore PT PMIs, as active_events reads zero in that case, resulting in the "Uhhuh" spurious NMI warning and PT data loss. Fix this by always increasing active_events for PT events. Fixes: ccbebba4c6bf ("perf/x86/intel/pt: Bypass PT vs. LBR exclusivity if the core supports it") Reported-by: Vitaly Slobodskoy Signed-off-by: Alexander Shishkin Signed-off-by: Peter Zijlstra (Intel) Acked-by: Alexey Budankov Cc: Jiri Olsa Cc: Ingo Molnar Cc: Arnaldo Carvalho de Melo Link: https://lkml.kernel.org/r/20191210105101.77210-1-alexander.shishkin@linux.intel.com --- arch/x86/events/core.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 84fe1becbe26..f118af9f0718 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -376,7 +376,7 @@ int x86_add_exclusive(unsigned int what) * LBR and BTS are still mutually exclusive. */ if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt) - return 0; + goto out; if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) { mutex_lock(&pmc_reserve_mutex); @@ -388,6 +388,7 @@ int x86_add_exclusive(unsigned int what) mutex_unlock(&pmc_reserve_mutex); } +out: atomic_inc(&active_events); return 0; @@ -398,11 +399,15 @@ fail_unlock: void x86_del_exclusive(unsigned int what) { + atomic_dec(&active_events); + + /* + * See the comment in x86_add_exclusive(). + */ if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt) return; atomic_dec(&x86_pmu.lbr_exclusive[what]); - atomic_dec(&active_events); } int x86_setup_perfctr(struct perf_event *event) -- cgit v1.2.3-59-g8ed1b