aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/events
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2019-03-14 13:25:02 +0100
committerIngo Molnar <mingo@kernel.org>2019-04-03 09:25:33 +0200
commitf80deefa41890f9484802d7b67f11daf28055150 (patch)
tree9ec0bdc7151388d2c479b257ca6d41c9264e1c8f /arch/x86/events
parentperf/x86: Optimize x86_schedule_events() (diff)
downloadlinux-dev-f80deefa41890f9484802d7b67f11daf28055150.tar.xz
linux-dev-f80deefa41890f9484802d7b67f11daf28055150.zip
perf/x86: Add sanity checks to x86_schedule_events()
By computing the 'committed' index earlier, we can use it to validate the cached constraint state. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vince Weaver <vincent.weaver@maine.edu> Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/events')
-rw-r--r--arch/x86/events/core.c32
1 files changed, 19 insertions, 13 deletions
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index fa88acf0daad..24dab9ad4fd6 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -849,11 +849,22 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
struct event_constraint *c;
unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
struct perf_event *e;
- int i, wmin, wmax, unsched = 0;
+ int n0, i, wmin, wmax, unsched = 0;
struct hw_perf_event *hwc;
bitmap_zero(used_mask, X86_PMC_IDX_MAX);
+ /*
+ * Compute the number of events already present; see x86_pmu_add(),
+ * validate_group() and x86_pmu_commit_txn(). For the former two
+ * cpuc->n_events hasn't been updated yet, while for the latter
+ * cpuc->n_txn contains the number of events added in the current
+ * transaction.
+ */
+ n0 = cpuc->n_events;
+ if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
+ n0 -= cpuc->n_txn;
+
if (x86_pmu.start_scheduling)
x86_pmu.start_scheduling(cpuc);
@@ -861,6 +872,12 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
c = cpuc->event_constraint[i];
/*
+ * Previously scheduled events should have a cached constraint,
+ * while new events should not have one.
+ */
+ WARN_ON_ONCE((c && i >= n0) || (!c && i < n0));
+
+ /*
* Request constraints for new events; or for those events that
* have a dynamic constraint -- for those the constraint can
* change due to external factors (sibling state, allow_tfa).
@@ -937,18 +954,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
x86_pmu.commit_scheduling(cpuc, i, assign[i]);
}
} else {
- /*
- * Compute the number of events already present; see
- * x86_pmu_add(), validate_group() and x86_pmu_commit_txn().
- * For the former two cpuc->n_events hasn't been updated yet,
- * while for the latter cpuc->n_txn contains the number of
- * events added in the current transaction.
- */
- i = cpuc->n_events;
- if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
- i -= cpuc->n_txn;
-
- for (; i < n; i++) {
+ for (i = n0; i < n; i++) {
e = cpuc->event_list[i];
/*