aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-09-16 17:06:21 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-09-16 17:06:21 -0700
commit772c1d06bd402f7ee72c61a18c2db74cd74b6758 (patch)
treee362fc7e158b3580d810a26189ecf91ec8a4f141 /arch/x86
parentMerge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip (diff)
parentkprobes: Prohibit probing on BUG() and WARN() address (diff)
downloadlinux-dev-772c1d06bd402f7ee72c61a18c2db74cd74b6758.tar.xz
linux-dev-772c1d06bd402f7ee72c61a18c2db74cd74b6758.zip
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf updates from Ingo Molnar: "Kernel side changes: - Improved kbprobes robustness - Intel PEBS support for PT hardware tracing - Other Intel PT improvements: high order pages memory footprint reduction and various related cleanups - Misc cleanups The perf tooling side has been very busy in this cycle, with over 300 commits. This is an incomplete high-level summary of the many improvements done by over 30 developers: - Lots of updates to the following tools: 'perf c2c' 'perf config' 'perf record' 'perf report' 'perf script' 'perf test' 'perf top' 'perf trace' - Updates to libperf and libtraceevent, and a consolidation of the proliferation of x86 instruction decoder libraries. - Vendor event updates for Intel and PowerPC CPUs, - Updates to hardware tracing tooling for ARM and Intel CPUs, - ... and lots of other changes and cleanups - see the shortlog and Git log for details" * 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (322 commits) kprobes: Prohibit probing on BUG() and WARN() address perf/x86: Make more stuff static x86, perf: Fix the dependency of the x86 insn decoder selftest objtool: Ignore intentional differences for the x86 insn decoder objtool: Update sync-check.sh from perf's check-headers.sh perf build: Ignore intentional differences for the x86 insn decoder perf intel-pt: Use shared x86 insn decoder perf intel-pt: Remove inat.c from build dependency list perf: Update .gitignore file objtool: Move x86 insn decoder to a common location perf metricgroup: Support multiple events for metricgroup perf metricgroup: Scale the metric result perf pmu: Change convert_scale from static to global perf symbols: Move mem_info and branch_info out of symbol.h perf auxtrace: Uninline functions that touch perf_session perf tools: Remove needless evlist.h include directives perf tools: Remove needless evlist.h include directives perf tools: Remove needless thread_map.h include directives perf tools: Remove needless thread.h include directives perf tools: Remove needless map.h include directives ...
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig.debug2
-rw-r--r--arch/x86/events/core.c34
-rw-r--r--arch/x86/events/intel/core.c18
-rw-r--r--arch/x86/events/intel/cstate.c4
-rw-r--r--arch/x86/events/intel/ds.c51
-rw-r--r--arch/x86/events/intel/lbr.c2
-rw-r--r--arch/x86/events/intel/pt.c330
-rw-r--r--arch/x86/events/intel/pt.h12
-rw-r--r--arch/x86/events/intel/rapl.c2
-rw-r--r--arch/x86/events/msr.c2
-rw-r--r--arch/x86/events/perf_event.h17
-rw-r--r--arch/x86/include/asm/intel_pt.h2
-rw-r--r--arch/x86/include/asm/msr-index.h4
13 files changed, 346 insertions, 134 deletions
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 71c92db47c41..bf9cd83de777 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -171,7 +171,7 @@ config HAVE_MMIOTRACE_SUPPORT
config X86_DECODER_SELFTEST
bool "x86 instruction decoder selftest"
- depends on DEBUG_KERNEL && KPROBES
+ depends on DEBUG_KERNEL && INSTRUCTION_DECODER
depends on !COMPILE_TEST
---help---
Perform x86 instruction decoder selftests at build time.
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 325959d19d9a..15b90b1a8fb1 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -1005,6 +1005,27 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader,
/* current number of events already accepted */
n = cpuc->n_events;
+ if (!cpuc->n_events)
+ cpuc->pebs_output = 0;
+
+ if (!cpuc->is_fake && leader->attr.precise_ip) {
+ /*
+ * For PEBS->PT, if !aux_event, the group leader (PT) went
+ * away, the group was broken down and this singleton event
+ * can't schedule any more.
+ */
+ if (is_pebs_pt(leader) && !leader->aux_event)
+ return -EINVAL;
+
+ /*
+ * pebs_output: 0: no PEBS so far, 1: PT, 2: DS
+ */
+ if (cpuc->pebs_output &&
+ cpuc->pebs_output != is_pebs_pt(leader) + 1)
+ return -EINVAL;
+
+ cpuc->pebs_output = is_pebs_pt(leader) + 1;
+ }
if (is_x86_event(leader)) {
if (n >= max_count)
@@ -2241,6 +2262,17 @@ static int x86_pmu_check_period(struct perf_event *event, u64 value)
return 0;
}
+static int x86_pmu_aux_output_match(struct perf_event *event)
+{
+ if (!(pmu.capabilities & PERF_PMU_CAP_AUX_OUTPUT))
+ return 0;
+
+ if (x86_pmu.aux_output_match)
+ return x86_pmu.aux_output_match(event);
+
+ return 0;
+}
+
static struct pmu pmu = {
.pmu_enable = x86_pmu_enable,
.pmu_disable = x86_pmu_disable,
@@ -2266,6 +2298,8 @@ static struct pmu pmu = {
.sched_task = x86_pmu_sched_task,
.task_ctx_size = sizeof(struct x86_perf_task_context),
.check_period = x86_pmu_check_period,
+
+ .aux_output_match = x86_pmu_aux_output_match,
};
void arch_perf_update_userpage(struct perf_event *event,
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index e4c2cb65ea50..43c966d1208e 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -18,6 +18,7 @@
#include <asm/cpufeature.h>
#include <asm/hardirq.h>
#include <asm/intel-family.h>
+#include <asm/intel_pt.h>
#include <asm/apic.h>
#include <asm/cpu_device_id.h>
@@ -3298,6 +3299,13 @@ static int intel_pmu_hw_config(struct perf_event *event)
}
}
+ if (event->attr.aux_output) {
+ if (!event->attr.precise_ip)
+ return -EINVAL;
+
+ event->hw.flags |= PERF_X86_EVENT_PEBS_VIA_PT;
+ }
+
if (event->attr.type != PERF_TYPE_RAW)
return 0;
@@ -3816,6 +3824,14 @@ static int intel_pmu_check_period(struct perf_event *event, u64 value)
return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
}
+static int intel_pmu_aux_output_match(struct perf_event *event)
+{
+ if (!x86_pmu.intel_cap.pebs_output_pt_available)
+ return 0;
+
+ return is_intel_pt_event(event);
+}
+
PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
PMU_FORMAT_ATTR(ldlat, "config1:0-15");
@@ -3940,6 +3956,8 @@ static __initconst const struct x86_pmu intel_pmu = {
.sched_task = intel_pmu_sched_task,
.check_period = intel_pmu_check_period,
+
+ .aux_output_match = intel_pmu_aux_output_match,
};
static __init void intel_clovertown_quirk(void)
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index 688592b34564..db498b5d4aae 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -446,7 +446,7 @@ static int cstate_cpu_init(unsigned int cpu)
return 0;
}
-const struct attribute_group *core_attr_update[] = {
+static const struct attribute_group *core_attr_update[] = {
&group_cstate_core_c1,
&group_cstate_core_c3,
&group_cstate_core_c6,
@@ -454,7 +454,7 @@ const struct attribute_group *core_attr_update[] = {
NULL,
};
-const struct attribute_group *pkg_attr_update[] = {
+static const struct attribute_group *pkg_attr_update[] = {
&group_cstate_pkg_c2,
&group_cstate_pkg_c3,
&group_cstate_pkg_c6,
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index f1269e804e9b..ce83950036c5 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -902,6 +902,9 @@ struct event_constraint *intel_pebs_constraints(struct perf_event *event)
*/
static inline bool pebs_needs_sched_cb(struct cpu_hw_events *cpuc)
{
+ if (cpuc->n_pebs == cpuc->n_pebs_via_pt)
+ return false;
+
return cpuc->n_pebs && (cpuc->n_pebs == cpuc->n_large_pebs);
}
@@ -919,6 +922,9 @@ static inline void pebs_update_threshold(struct cpu_hw_events *cpuc)
u64 threshold;
int reserved;
+ if (cpuc->n_pebs_via_pt)
+ return;
+
if (x86_pmu.flags & PMU_FL_PEBS_ALL)
reserved = x86_pmu.max_pebs_events + x86_pmu.num_counters_fixed;
else
@@ -1059,10 +1065,40 @@ void intel_pmu_pebs_add(struct perf_event *event)
cpuc->n_pebs++;
if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS)
cpuc->n_large_pebs++;
+ if (hwc->flags & PERF_X86_EVENT_PEBS_VIA_PT)
+ cpuc->n_pebs_via_pt++;
pebs_update_state(needed_cb, cpuc, event, true);
}
+static void intel_pmu_pebs_via_pt_disable(struct perf_event *event)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+
+ if (!is_pebs_pt(event))
+ return;
+
+ if (!(cpuc->pebs_enabled & ~PEBS_VIA_PT_MASK))
+ cpuc->pebs_enabled &= ~PEBS_VIA_PT_MASK;
+}
+
+static void intel_pmu_pebs_via_pt_enable(struct perf_event *event)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+ struct debug_store *ds = cpuc->ds;
+
+ if (!is_pebs_pt(event))
+ return;
+
+ if (!(event->hw.flags & PERF_X86_EVENT_LARGE_PEBS))
+ cpuc->pebs_enabled |= PEBS_PMI_AFTER_EACH_RECORD;
+
+ cpuc->pebs_enabled |= PEBS_OUTPUT_PT;
+
+ wrmsrl(MSR_RELOAD_PMC0 + hwc->idx, ds->pebs_event_reset[hwc->idx]);
+}
+
void intel_pmu_pebs_enable(struct perf_event *event)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
@@ -1100,6 +1136,8 @@ void intel_pmu_pebs_enable(struct perf_event *event)
} else {
ds->pebs_event_reset[hwc->idx] = 0;
}
+
+ intel_pmu_pebs_via_pt_enable(event);
}
void intel_pmu_pebs_del(struct perf_event *event)
@@ -1111,6 +1149,8 @@ void intel_pmu_pebs_del(struct perf_event *event)
cpuc->n_pebs--;
if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS)
cpuc->n_large_pebs--;
+ if (hwc->flags & PERF_X86_EVENT_PEBS_VIA_PT)
+ cpuc->n_pebs_via_pt--;
pebs_update_state(needed_cb, cpuc, event, false);
}
@@ -1120,7 +1160,8 @@ void intel_pmu_pebs_disable(struct perf_event *event)
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
- if (cpuc->n_pebs == cpuc->n_large_pebs)
+ if (cpuc->n_pebs == cpuc->n_large_pebs &&
+ cpuc->n_pebs != cpuc->n_pebs_via_pt)
intel_pmu_drain_pebs_buffer();
cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
@@ -1131,6 +1172,8 @@ void intel_pmu_pebs_disable(struct perf_event *event)
else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST)
cpuc->pebs_enabled &= ~(1ULL << 63);
+ intel_pmu_pebs_via_pt_disable(event);
+
if (cpuc->enabled)
wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
@@ -2031,6 +2074,12 @@ void __init intel_ds_init(void)
PERF_SAMPLE_REGS_INTR);
}
pr_cont("PEBS fmt4%c%s, ", pebs_type, pebs_qual);
+
+ if (x86_pmu.intel_cap.pebs_output_pt_available) {
+ pr_cont("PEBS-via-PT, ");
+ x86_get_pmu()->capabilities |= PERF_PMU_CAP_AUX_OUTPUT;
+ }
+
break;
default:
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index 6f814a27416b..ea54634eabf3 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -273,7 +273,7 @@ static inline bool lbr_from_signext_quirk_needed(void)
return !tsx_support && (lbr_desc[lbr_format] & LBR_TSX);
}
-DEFINE_STATIC_KEY_FALSE(lbr_from_quirk_key);
+static DEFINE_STATIC_KEY_FALSE(lbr_from_quirk_key);
/* If quirk is enabled, ensure sign extension is 63 bits: */
inline u64 lbr_from_signext_quirk_wr(u64 val)
diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c
index d3dc2274ddd4..b1bb4d28e0be 100644
--- a/arch/x86/events/intel/pt.c
+++ b/arch/x86/events/intel/pt.c
@@ -545,33 +545,62 @@ static void pt_config_buffer(void *buf, unsigned int topa_idx,
wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, reg);
}
-/*
- * Keep ToPA table-related metadata on the same page as the actual table,
- * taking up a few words from the top
- */
-
-#define TENTS_PER_PAGE (((PAGE_SIZE - 40) / sizeof(struct topa_entry)) - 1)
-
/**
- * struct topa - page-sized ToPA table with metadata at the top
- * @table: actual ToPA table entries, as understood by PT hardware
+ * struct topa - ToPA metadata
* @list: linkage to struct pt_buffer's list of tables
- * @phys: physical address of this page
* @offset: offset of the first entry in this table in the buffer
* @size: total size of all entries in this table
* @last: index of the last initialized entry in this table
+ * @z_count: how many times the first entry repeats
*/
struct topa {
- struct topa_entry table[TENTS_PER_PAGE];
struct list_head list;
- u64 phys;
u64 offset;
size_t size;
int last;
+ unsigned int z_count;
};
+/*
+ * Keep ToPA table-related metadata on the same page as the actual table,
+ * taking up a few words from the top
+ */
+
+#define TENTS_PER_PAGE \
+ ((PAGE_SIZE - sizeof(struct topa)) / sizeof(struct topa_entry))
+
+/**
+ * struct topa_page - page-sized ToPA table with metadata at the top
+ * @table: actual ToPA table entries, as understood by PT hardware
+ * @topa: metadata
+ */
+struct topa_page {
+ struct topa_entry table[TENTS_PER_PAGE];
+ struct topa topa;
+};
+
+static inline struct topa_page *topa_to_page(struct topa *topa)
+{
+ return container_of(topa, struct topa_page, topa);
+}
+
+static inline struct topa_page *topa_entry_to_page(struct topa_entry *te)
+{
+ return (struct topa_page *)((unsigned long)te & PAGE_MASK);
+}
+
+static inline phys_addr_t topa_pfn(struct topa *topa)
+{
+ return PFN_DOWN(virt_to_phys(topa_to_page(topa)));
+}
+
/* make -1 stand for the last table entry */
-#define TOPA_ENTRY(t, i) ((i) == -1 ? &(t)->table[(t)->last] : &(t)->table[(i)])
+#define TOPA_ENTRY(t, i) \
+ ((i) == -1 \
+ ? &topa_to_page(t)->table[(t)->last] \
+ : &topa_to_page(t)->table[(i)])
+#define TOPA_ENTRY_SIZE(t, i) (sizes(TOPA_ENTRY((t), (i))->size))
+#define TOPA_ENTRY_PAGES(t, i) (1 << TOPA_ENTRY((t), (i))->size)
/**
* topa_alloc() - allocate page-sized ToPA table
@@ -583,27 +612,26 @@ struct topa {
static struct topa *topa_alloc(int cpu, gfp_t gfp)
{
int node = cpu_to_node(cpu);
- struct topa *topa;
+ struct topa_page *tp;
struct page *p;
p = alloc_pages_node(node, gfp | __GFP_ZERO, 0);
if (!p)
return NULL;
- topa = page_address(p);
- topa->last = 0;
- topa->phys = page_to_phys(p);
+ tp = page_address(p);
+ tp->topa.last = 0;
/*
* In case of singe-entry ToPA, always put the self-referencing END
* link as the 2nd entry in the table
*/
if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) {
- TOPA_ENTRY(topa, 1)->base = topa->phys >> TOPA_SHIFT;
- TOPA_ENTRY(topa, 1)->end = 1;
+ TOPA_ENTRY(&tp->topa, 1)->base = page_to_phys(p);
+ TOPA_ENTRY(&tp->topa, 1)->end = 1;
}
- return topa;
+ return &tp->topa;
}
/**
@@ -643,7 +671,7 @@ static void topa_insert_table(struct pt_buffer *buf, struct topa *topa)
BUG_ON(last->last != TENTS_PER_PAGE - 1);
- TOPA_ENTRY(last, -1)->base = topa->phys >> TOPA_SHIFT;
+ TOPA_ENTRY(last, -1)->base = topa_pfn(topa);
TOPA_ENTRY(last, -1)->end = 1;
}
@@ -670,7 +698,7 @@ static bool topa_table_full(struct topa *topa)
*
* Return: 0 on success or error code.
*/
-static int topa_insert_pages(struct pt_buffer *buf, gfp_t gfp)
+static int topa_insert_pages(struct pt_buffer *buf, int cpu, gfp_t gfp)
{
struct topa *topa = buf->last;
int order = 0;
@@ -681,13 +709,18 @@ static int topa_insert_pages(struct pt_buffer *buf, gfp_t gfp)
order = page_private(p);
if (topa_table_full(topa)) {
- topa = topa_alloc(buf->cpu, gfp);
+ topa = topa_alloc(cpu, gfp);
if (!topa)
return -ENOMEM;
topa_insert_table(buf, topa);
}
+ if (topa->z_count == topa->last - 1) {
+ if (order == TOPA_ENTRY(topa, topa->last - 1)->size)
+ topa->z_count++;
+ }
+
TOPA_ENTRY(topa, -1)->base = page_to_phys(p) >> TOPA_SHIFT;
TOPA_ENTRY(topa, -1)->size = order;
if (!buf->snapshot &&
@@ -713,23 +746,26 @@ static void pt_topa_dump(struct pt_buffer *buf)
struct topa *topa;
list_for_each_entry(topa, &buf->tables, list) {
+ struct topa_page *tp = topa_to_page(topa);
int i;
- pr_debug("# table @%p (%016Lx), off %llx size %zx\n", topa->table,
- topa->phys, topa->offset, topa->size);
+ pr_debug("# table @%p, off %llx size %zx\n", tp->table,
+ topa->offset, topa->size);
for (i = 0; i < TENTS_PER_PAGE; i++) {
pr_debug("# entry @%p (%lx sz %u %c%c%c) raw=%16llx\n",
- &topa->table[i],
- (unsigned long)topa->table[i].base << TOPA_SHIFT,
- sizes(topa->table[i].size),
- topa->table[i].end ? 'E' : ' ',
- topa->table[i].intr ? 'I' : ' ',
- topa->table[i].stop ? 'S' : ' ',
- *(u64 *)&topa->table[i]);
+ &tp->table[i],
+ (unsigned long)tp->table[i].base << TOPA_SHIFT,
+ sizes(tp->table[i].size),
+ tp->table[i].end ? 'E' : ' ',
+ tp->table[i].intr ? 'I' : ' ',
+ tp->table[i].stop ? 'S' : ' ',
+ *(u64 *)&tp->table[i]);
if ((intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) &&
- topa->table[i].stop) ||
- topa->table[i].end)
+ tp->table[i].stop) ||
+ tp->table[i].end)
break;
+ if (!i && topa->z_count)
+ i += topa->z_count;
}
}
}
@@ -771,7 +807,7 @@ static void pt_update_head(struct pt *pt)
/* offset of the current output region within this table */
for (topa_idx = 0; topa_idx < buf->cur_idx; topa_idx++)
- base += sizes(buf->cur->table[topa_idx].size);
+ base += TOPA_ENTRY_SIZE(buf->cur, topa_idx);
if (buf->snapshot) {
local_set(&buf->data_size, base);
@@ -791,7 +827,7 @@ static void pt_update_head(struct pt *pt)
*/
static void *pt_buffer_region(struct pt_buffer *buf)
{
- return phys_to_virt(buf->cur->table[buf->cur_idx].base << TOPA_SHIFT);
+ return phys_to_virt(TOPA_ENTRY(buf->cur, buf->cur_idx)->base << TOPA_SHIFT);
}
/**
@@ -800,7 +836,7 @@ static void *pt_buffer_region(struct pt_buffer *buf)
*/
static size_t pt_buffer_region_size(struct pt_buffer *buf)
{
- return sizes(buf->cur->table[buf->cur_idx].size);
+ return TOPA_ENTRY_SIZE(buf->cur, buf->cur_idx);
}
/**
@@ -830,7 +866,7 @@ static void pt_handle_status(struct pt *pt)
* know.
*/
if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) ||
- buf->output_off == sizes(TOPA_ENTRY(buf->cur, buf->cur_idx)->size)) {
+ buf->output_off == pt_buffer_region_size(buf)) {
perf_aux_output_flag(&pt->handle,
PERF_AUX_FLAG_TRUNCATED);
advance++;
@@ -868,9 +904,11 @@ static void pt_handle_status(struct pt *pt)
static void pt_read_offset(struct pt_buffer *buf)
{
u64 offset, base_topa;
+ struct topa_page *tp;
rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, base_topa);
- buf->cur = phys_to_virt(base_topa);
+ tp = phys_to_virt(base_topa);
+ buf->cur = &tp->topa;
rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, offset);
/* offset within current output region */
@@ -879,29 +917,97 @@ static void pt_read_offset(struct pt_buffer *buf)
buf->cur_idx = (offset & 0xffffff80) >> 7;
}
-/**
- * pt_topa_next_entry() - obtain index of the first page in the next ToPA entry
- * @buf: PT buffer.
- * @pg: Page offset in the buffer.
- *
- * When advancing to the next output region (ToPA entry), given a page offset
- * into the buffer, we need to find the offset of the first page in the next
- * region.
- */
-static unsigned int pt_topa_next_entry(struct pt_buffer *buf, unsigned int pg)
+static struct topa_entry *
+pt_topa_entry_for_page(struct pt_buffer *buf, unsigned int pg)
{
- struct topa_entry *te = buf->topa_index[pg];
+ struct topa_page *tp;
+ struct topa *topa;
+ unsigned int idx, cur_pg = 0, z_pg = 0, start_idx = 0;
- /* one region */
- if (buf->first == buf->last && buf->first->last == 1)
- return pg;
+ /*
+ * Indicates a bug in the caller.
+ */
+ if (WARN_ON_ONCE(pg >= buf->nr_pages))
+ return NULL;
+
+ /*
+ * First, find the ToPA table where @pg fits. With high
+ * order allocations, there shouldn't be many of these.
+ */
+ list_for_each_entry(topa, &buf->tables, list) {
+ if (topa->offset + topa->size > pg << PAGE_SHIFT)
+ goto found;
+ }
+
+ /*
+ * Hitting this means we have a problem in the ToPA
+ * allocation code.
+ */
+ WARN_ON_ONCE(1);
- do {
- pg++;
- pg &= buf->nr_pages - 1;
- } while (buf->topa_index[pg] == te);
+ return NULL;
- return pg;
+found:
+ /*
+ * Indicates a problem in the ToPA allocation code.
+ */
+ if (WARN_ON_ONCE(topa->last == -1))
+ return NULL;
+
+ tp = topa_to_page(topa);
+ cur_pg = PFN_DOWN(topa->offset);
+ if (topa->z_count) {
+ z_pg = TOPA_ENTRY_PAGES(topa, 0) * (topa->z_count + 1);
+ start_idx = topa->z_count + 1;
+ }
+
+ /*
+ * Multiple entries at the beginning of the table have the same size,
+ * ideally all of them; if @pg falls there, the search is done.
+ */
+ if (pg >= cur_pg && pg < cur_pg + z_pg) {
+ idx = (pg - cur_pg) / TOPA_ENTRY_PAGES(topa, 0);
+ return &tp->table[idx];
+ }
+
+ /*
+ * Otherwise, slow path: iterate through the remaining entries.
+ */
+ for (idx = start_idx, cur_pg += z_pg; idx < topa->last; idx++) {
+ if (cur_pg + TOPA_ENTRY_PAGES(topa, idx) > pg)
+ return &tp->table[idx];
+
+ cur_pg += TOPA_ENTRY_PAGES(topa, idx);
+ }
+
+ /*
+ * Means we couldn't find a ToPA entry in the table that does match.
+ */
+ WARN_ON_ONCE(1);
+
+ return NULL;
+}
+
+static struct topa_entry *
+pt_topa_prev_entry(struct pt_buffer *buf, struct topa_entry *te)
+{
+ unsigned long table = (unsigned long)te & ~(PAGE_SIZE - 1);
+ struct topa_page *tp;
+ struct topa *topa;
+
+ tp = (struct topa_page *)table;
+ if (tp->table != te)
+ return --te;
+
+ topa = &tp->topa;
+ if (topa == buf->first)
+ topa = buf->last;
+ else
+ topa = list_prev_entry(topa, list);
+
+ tp = topa_to_page(topa);
+
+ return &tp->table[topa->last - 1];
}
/**
@@ -925,8 +1031,7 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf,
unsigned long idx, npages, wakeup;
/* can't stop in the middle of an output region */
- if (buf->output_off + handle->size + 1 <
- sizes(TOPA_ENTRY(buf->cur, buf->cur_idx)->size)) {
+ if (buf->output_off + handle->size + 1 < pt_buffer_region_size(buf)) {
perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
return -EINVAL;
}
@@ -937,9 +1042,13 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf,
return 0;
/* clear STOP and INT from current entry */
- buf->topa_index[buf->stop_pos]->stop = 0;
- buf->topa_index[buf->stop_pos]->intr = 0;
- buf->topa_index[buf->intr_pos]->intr = 0;
+ if (buf->stop_te) {
+ buf->stop_te->stop = 0;
+ buf->stop_te->intr = 0;
+ }
+
+ if (buf->intr_te)
+ buf->intr_te->intr = 0;
/* how many pages till the STOP marker */
npages = handle->size >> PAGE_SHIFT;
@@ -950,7 +1059,12 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf,
idx = (head >> PAGE_SHIFT) + npages;
idx &= buf->nr_pages - 1;
- buf->stop_pos = idx;
+
+ if (idx != buf->stop_pos) {
+ buf->stop_pos = idx;
+ buf->stop_te = pt_topa_entry_for_page(buf, idx);
+ buf->stop_te = pt_topa_prev_entry(buf, buf->stop_te);
+ }
wakeup = handle->wakeup >> PAGE_SHIFT;
@@ -960,51 +1074,20 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf,
idx = wakeup;
idx &= buf->nr_pages - 1;
- buf->intr_pos = idx;
+ if (idx != buf->intr_pos) {
+ buf->intr_pos = idx;
+ buf->intr_te = pt_topa_entry_for_page(buf, idx);
+ buf->intr_te = pt_topa_prev_entry(buf, buf->intr_te);
+ }
- buf->topa_index[buf->stop_pos]->stop = 1;
- buf->topa_index[buf->stop_pos]->intr = 1;
- buf->topa_index[buf->intr_pos]->intr = 1;
+ buf->stop_te->stop = 1;
+ buf->stop_te->intr = 1;
+ buf->intr_te->intr = 1;
return 0;
}
/**
- * pt_buffer_setup_topa_index() - build topa_index[] table of regions
- * @buf: PT buffer.
- *
- * topa_index[] references output regions indexed by offset into the
- * buffer for purposes of quick reverse lookup.
- */
-static void pt_buffer_setup_topa_index(struct pt_buffer *buf)
-{
- struct topa *cur = buf->first, *prev = buf->last;
- struct topa_entry *te_cur = TOPA_ENTRY(cur, 0),
- *te_prev = TOPA_ENTRY(prev, prev->last - 1);
- int pg = 0, idx = 0;
-
- while (pg < buf->nr_pages) {
- int tidx;
-
- /* pages within one topa entry */
- for (tidx = 0; tidx < 1 << te_cur->size; tidx++, pg++)
- buf->topa_index[pg] = te_prev;
-
- te_prev = te_cur;
-
- if (idx == cur->last - 1) {
- /* advance to next topa table */
- idx = 0;
- cur = list_entry(cur->list.next, struct topa, list);
- } else {
- idx++;
- }
- te_cur = TOPA_ENTRY(cur, idx);
- }
-
-}
-
-/**
* pt_buffer_reset_offsets() - adjust buffer's write pointers from aux_head
* @buf: PT buffer.
* @head: Write pointer (aux_head) from AUX buffer.
@@ -1021,18 +1104,20 @@ static void pt_buffer_setup_topa_index(struct pt_buffer *buf)
*/
static void pt_buffer_reset_offsets(struct pt_buffer *buf, unsigned long head)
{
+ struct topa_page *cur_tp;
+ struct topa_entry *te;
int pg;
if (buf->snapshot)
head &= (buf->nr_pages << PAGE_SHIFT) - 1;
pg = (head >> PAGE_SHIFT) & (buf->nr_pages - 1);
- pg = pt_topa_next_entry(buf, pg);
+ te = pt_topa_entry_for_page(buf, pg);
- buf->cur = (struct topa *)((unsigned long)buf->topa_index[pg] & PAGE_MASK);
- buf->cur_idx = ((unsigned long)buf->topa_index[pg] -
- (unsigned long)buf->cur) / sizeof(struct topa_entry);
- buf->output_off = head & (sizes(buf->cur->table[buf->cur_idx].size) - 1);
+ cur_tp = topa_entry_to_page(te);
+ buf->cur = &cur_tp->topa;
+ buf->cur_idx = te - TOPA_ENTRY(buf->cur, 0);
+ buf->output_off = head & (pt_buffer_region_size(buf) - 1);
local64_set(&buf->head, head);
local_set(&buf->data_size, 0);
@@ -1061,31 +1146,29 @@ static void pt_buffer_fini_topa(struct pt_buffer *buf)
* @size: Total size of all regions within this ToPA.
* @gfp: Allocation flags.
*/
-static int pt_buffer_init_topa(struct pt_buffer *buf, unsigned long nr_pages,
- gfp_t gfp)
+static int pt_buffer_init_topa(struct pt_buffer *buf, int cpu,
+ unsigned long nr_pages, gfp_t gfp)
{
struct topa *topa;
int err;
- topa = topa_alloc(buf->cpu, gfp);
+ topa = topa_alloc(cpu, gfp);
if (!topa)
return -ENOMEM;
topa_insert_table(buf, topa);
while (buf->nr_pages < nr_pages) {
- err = topa_insert_pages(buf, gfp);
+ err = topa_insert_pages(buf, cpu, gfp);
if (err) {
pt_buffer_fini_topa(buf);
return -ENOMEM;
}
}
- pt_buffer_setup_topa_index(buf);
-
/* link last table to the first one, unless we're double buffering */
if (intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) {
- TOPA_ENTRY(buf->last, -1)->base = buf->first->phys >> TOPA_SHIFT;
+ TOPA_ENTRY(buf->last, -1)->base = topa_pfn(buf->first);
TOPA_ENTRY(buf->last, -1)->end = 1;
}
@@ -1119,18 +1202,18 @@ pt_buffer_setup_aux(struct perf_event *event, void **pages,
cpu = raw_smp_processor_id();
node = cpu_to_node(cpu);
- buf = kzalloc_node(offsetof(struct pt_buffer, topa_index[nr_pages]),
- GFP_KERNEL, node);
+ buf = kzalloc_node(sizeof(struct pt_buffer), GFP_KERNEL, node);
if (!buf)
return NULL;
- buf->cpu = cpu;
buf->snapshot = snapshot;
buf->data_pages = pages;
+ buf->stop_pos = -1;
+ buf->intr_pos = -1;
INIT_LIST_HEAD(&buf->tables);
- ret = pt_buffer_init_topa(buf, nr_pages, GFP_KERNEL);
+ ret = pt_buffer_init_topa(buf, cpu, nr_pages, GFP_KERNEL);
if (ret) {
kfree(buf);
return NULL;
@@ -1296,7 +1379,7 @@ void intel_pt_interrupt(void)
return;
}
- pt_config_buffer(buf->cur->table, buf->cur_idx,
+ pt_config_buffer(topa_to_page(buf->cur)->table, buf->cur_idx,
buf->output_off);
pt_config(event);
}
@@ -1361,7 +1444,7 @@ static void pt_event_start(struct perf_event *event, int mode)
WRITE_ONCE(pt->handle_nmi, 1);
hwc->state = 0;
- pt_config_buffer(buf->cur->table, buf->cur_idx,
+ pt_config_buffer(topa_to_page(buf->cur)->table, buf->cur_idx,
buf->output_off);
pt_config(event);
@@ -1481,6 +1564,11 @@ void cpu_emergency_stop_pt(void)
pt_event_stop(pt->handle.event, PERF_EF_UPDATE);
}
+int is_intel_pt_event(struct perf_event *event)
+{
+ return event->pmu == &pt_pmu.pmu;
+}
+
static __init int pt_init(void)
{
int ret, cpu, prior_warn = 0;
diff --git a/arch/x86/events/intel/pt.h b/arch/x86/events/intel/pt.h
index 63fe4063fbd6..1d2bb7572374 100644
--- a/arch/x86/events/intel/pt.h
+++ b/arch/x86/events/intel/pt.h
@@ -53,7 +53,6 @@ struct pt_pmu {
/**
* struct pt_buffer - buffer configuration; one buffer per task_struct or
* cpu, depending on perf event configuration
- * @cpu: cpu for per-cpu allocation
* @tables: list of ToPA tables in this buffer
* @first: shorthand for first topa table
* @last: shorthand for last topa table
@@ -65,13 +64,14 @@ struct pt_pmu {
* @lost: if data was lost/truncated
* @head: logical write offset inside the buffer
* @snapshot: if this is for a snapshot/overwrite counter
- * @stop_pos: STOP topa entry in the buffer
- * @intr_pos: INT topa entry in the buffer
+ * @stop_pos: STOP topa entry index
+ * @intr_pos: INT topa entry index
+ * @stop_te: STOP topa entry pointer
+ * @intr_te: INT topa entry pointer
* @data_pages: array of pages from perf
* @topa_index: table of topa entries indexed by page offset
*/
struct pt_buffer {
- int cpu;
struct list_head tables;
struct topa *first, *last, *cur;
unsigned int cur_idx;
@@ -80,9 +80,9 @@ struct pt_buffer {
local_t data_size;
local64_t head;
bool snapshot;
- unsigned long stop_pos, intr_pos;
+ long stop_pos, intr_pos;
+ struct topa_entry *stop_te, *intr_te;
void **data_pages;
- struct topa_entry *topa_index[0];
};
#define PT_FILTERS_NUM 4
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
index 64ab51ffdf06..f34b9491ea6e 100644
--- a/arch/x86/events/intel/rapl.c
+++ b/arch/x86/events/intel/rapl.c
@@ -634,7 +634,7 @@ static void cleanup_rapl_pmus(void)
kfree(rapl_pmus);
}
-const struct attribute_group *rapl_attr_update[] = {
+static const struct attribute_group *rapl_attr_update[] = {
&rapl_events_cores_group,
&rapl_events_pkg_group,
&rapl_events_ram_group,
diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c
index 9431447541e9..5812e8747d1f 100644
--- a/arch/x86/events/msr.c
+++ b/arch/x86/events/msr.c
@@ -167,7 +167,7 @@ static const struct attribute_group *attr_groups[] = {
NULL,
};
-const struct attribute_group *attr_update[] = {
+static const struct attribute_group *attr_update[] = {
&group_aperf,
&group_mperf,
&group_pperf,
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 8751008fc170..ecacfbf4ebc1 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -76,6 +76,7 @@ static inline bool constraint_match(struct event_constraint *c, u64 ecode)
#define PERF_X86_EVENT_EXCL_ACCT 0x0100 /* accounted EXCL event */
#define PERF_X86_EVENT_AUTO_RELOAD 0x0200 /* use PEBS auto-reload */
#define PERF_X86_EVENT_LARGE_PEBS 0x0400 /* use large PEBS */
+#define PERF_X86_EVENT_PEBS_VIA_PT 0x0800 /* use PT buffer for PEBS */
struct amd_nb {
int nb_id; /* NorthBridge id */
@@ -85,6 +86,11 @@ struct amd_nb {
};
#define PEBS_COUNTER_MASK ((1ULL << MAX_PEBS_EVENTS) - 1)
+#define PEBS_PMI_AFTER_EACH_RECORD BIT_ULL(60)
+#define PEBS_OUTPUT_OFFSET 61
+#define PEBS_OUTPUT_MASK (3ull << PEBS_OUTPUT_OFFSET)
+#define PEBS_OUTPUT_PT (1ull << PEBS_OUTPUT_OFFSET)
+#define PEBS_VIA_PT_MASK (PEBS_OUTPUT_PT | PEBS_PMI_AFTER_EACH_RECORD)
/*
* Flags PEBS can handle without an PMI.
@@ -211,6 +217,8 @@ struct cpu_hw_events {
u64 pebs_enabled;
int n_pebs;
int n_large_pebs;
+ int n_pebs_via_pt;
+ int pebs_output;
/* Current super set of events hardware configuration */
u64 pebs_data_cfg;
@@ -510,6 +518,8 @@ union perf_capabilities {
*/
u64 full_width_write:1;
u64 pebs_baseline:1;
+ u64 pebs_metrics_available:1;
+ u64 pebs_output_pt_available:1;
};
u64 capabilities;
};
@@ -692,6 +702,8 @@ struct x86_pmu {
* Check period value for PERF_EVENT_IOC_PERIOD ioctl.
*/
int (*check_period) (struct perf_event *event, u64 period);
+
+ int (*aux_output_match) (struct perf_event *event);
};
struct x86_perf_task_context {
@@ -901,6 +913,11 @@ static inline int amd_pmu_init(void)
#endif /* CONFIG_CPU_SUP_AMD */
+static inline int is_pebs_pt(struct perf_event *event)
+{
+ return !!(event->hw.flags & PERF_X86_EVENT_PEBS_VIA_PT);
+}
+
#ifdef CONFIG_CPU_SUP_INTEL
static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period)
diff --git a/arch/x86/include/asm/intel_pt.h b/arch/x86/include/asm/intel_pt.h
index 634f99b1dc22..423b788f495e 100644
--- a/arch/x86/include/asm/intel_pt.h
+++ b/arch/x86/include/asm/intel_pt.h
@@ -28,10 +28,12 @@ enum pt_capabilities {
void cpu_emergency_stop_pt(void);
extern u32 intel_pt_validate_hw_cap(enum pt_capabilities cap);
extern u32 intel_pt_validate_cap(u32 *caps, enum pt_capabilities cap);
+extern int is_intel_pt_event(struct perf_event *event);
#else
static inline void cpu_emergency_stop_pt(void) {}
static inline u32 intel_pt_validate_hw_cap(enum pt_capabilities cap) { return 0; }
static inline u32 intel_pt_validate_cap(u32 *caps, enum pt_capabilities capability) { return 0; }
+static inline int is_intel_pt_event(struct perf_event *event) { return 0; }
#endif
#endif /* _ASM_X86_INTEL_PT_H */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 271d837d69a8..de753206b427 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -375,6 +375,10 @@
/* Alternative perfctr range with full access. */
#define MSR_IA32_PMC0 0x000004c1
+/* Auto-reload via MSR instead of DS area */
+#define MSR_RELOAD_PMC0 0x000014c1
+#define MSR_RELOAD_FIXED_CTR0 0x00001309
+
/* AMD64 MSRs. Not complete. See the architecture manual for a more
complete list. */