diff options
author | 2025-01-13 11:43:45 -0800 | |
---|---|---|
committer | 2025-02-12 20:06:11 -0800 | |
commit | dc6d2bc2d893a878e7b58578ff01b4738708deb4 (patch) | |
tree | b5524ff4429f1e88533f518753091462bc4bd8b4 /tools/perf/util/synthetic-events.c | |
parent | perf test stat_all_metrics: Ensure missing events fail test (diff) | |
download | linux-rng-dc6d2bc2d893a878e7b58578ff01b4738708deb4.tar.xz linux-rng-dc6d2bc2d893a878e7b58578ff01b4738708deb4.zip |
perf sample: Make user_regs and intr_regs optional
The struct dump_regs contains 512 bytes of cache_regs, meaning the two
values in perf_sample contribute 1088 bytes of its total 1384 bytes
size. Initializing this much memory has a cost reported by Tavian
Barnes <tavianator@tavianator.com> as about 2.5% when running `perf
script --itrace=i0`:
https://lore.kernel.org/lkml/d841b97b3ad2ca8bcab07e4293375fb7c32dfce7.1736618095.git.tavianator@tavianator.com/
Adrian Hunter <adrian.hunter@intel.com> replied that the zero
initialization was necessary and couldn't simply be removed.
This patch aims to strike a middle ground of still zeroing the
perf_sample, but removing 79% of its size by make user_regs and
intr_regs optional pointers to zalloc-ed memory. To support the
allocation accessors are created for user_regs and intr_regs. To
support correct cleanup perf_sample__init and perf_sample__exit
functions are created and added throughout the code base.
Signed-off-by: Ian Rogers <irogers@google.com>
Link: https://lore.kernel.org/r/20250113194345.1537821-1-irogers@google.com
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Diffstat (limited to 'tools/perf/util/synthetic-events.c')
-rw-r--r-- | tools/perf/util/synthetic-events.c | 24 |
1 files changed, 12 insertions, 12 deletions
diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c index 6923b0d5efed..2dfc4260d36d 100644 --- a/tools/perf/util/synthetic-events.c +++ b/tools/perf/util/synthetic-events.c @@ -1508,9 +1508,9 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, } if (type & PERF_SAMPLE_REGS_USER) { - if (sample->user_regs.abi) { + if (sample->user_regs && sample->user_regs->abi) { result += sizeof(u64); - sz = hweight64(sample->user_regs.mask) * sizeof(u64); + sz = hweight64(sample->user_regs->mask) * sizeof(u64); result += sz; } else { result += sizeof(u64); @@ -1536,9 +1536,9 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, result += sizeof(u64); if (type & PERF_SAMPLE_REGS_INTR) { - if (sample->intr_regs.abi) { + if (sample->intr_regs && sample->intr_regs->abi) { result += sizeof(u64); - sz = hweight64(sample->intr_regs.mask) * sizeof(u64); + sz = hweight64(sample->intr_regs->mask) * sizeof(u64); result += sz; } else { result += sizeof(u64); @@ -1707,10 +1707,10 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_fo } if (type & PERF_SAMPLE_REGS_USER) { - if (sample->user_regs.abi) { - *array++ = sample->user_regs.abi; - sz = hweight64(sample->user_regs.mask) * sizeof(u64); - memcpy(array, sample->user_regs.regs, sz); + if (sample->user_regs && sample->user_regs->abi) { + *array++ = sample->user_regs->abi; + sz = hweight64(sample->user_regs->mask) * sizeof(u64); + memcpy(array, sample->user_regs->regs, sz); array = (void *)array + sz; } else { *array++ = 0; @@ -1743,10 +1743,10 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_fo } if (type & PERF_SAMPLE_REGS_INTR) { - if (sample->intr_regs.abi) { - *array++ = sample->intr_regs.abi; - sz = hweight64(sample->intr_regs.mask) * sizeof(u64); - memcpy(array, sample->intr_regs.regs, sz); + if (sample->intr_regs && sample->intr_regs->abi) { + *array++ = sample->intr_regs->abi; + sz = hweight64(sample->intr_regs->mask) * sizeof(u64); + memcpy(array, sample->intr_regs->regs, sz); array = (void *)array + sz; } else { *array++ = 0; |