diff options
author | 2025-05-16 11:28:38 -0700 | |
---|---|---|
committer | 2025-05-17 10:02:27 +0200 | |
commit | ca559503b89c30bc49178d0e4a1e0b23f991fb9f (patch) | |
tree | 8fb50bc3d16ce0b35e4a6b9b3388303d9dc7bc1b /kernel | |
parent | perf/x86/intel/ds: Remove redundant assignments to sample.period (diff) | |
download | wireguard-linux-ca559503b89c30bc49178d0e4a1e0b23f991fb9f.tar.xz wireguard-linux-ca559503b89c30bc49178d0e4a1e0b23f991fb9f.zip |
perf/core: Add the is_event_in_freq_mode() helper to simplify the code
Add a helper to check if an event is in freq mode to improve readability.
No functional changes.
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: https://lore.kernel.org/r/20250516182853.2610284-2-kan.liang@linux.intel.com
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/events/core.c | 15 |
1 files changed, 10 insertions, 5 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index b8461074600b..952340f1df9d 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -2351,6 +2351,11 @@ event_filter_match(struct perf_event *event) perf_cgroup_match(event); } +static inline bool is_event_in_freq_mode(struct perf_event *event) +{ + return event->attr.freq && event->attr.sample_freq; +} + static void event_sched_out(struct perf_event *event, struct perf_event_context *ctx) { @@ -2388,7 +2393,7 @@ event_sched_out(struct perf_event *event, struct perf_event_context *ctx) if (!is_software_event(event)) cpc->active_oncpu--; - if (event->attr.freq && event->attr.sample_freq) { + if (is_event_in_freq_mode(event)) { ctx->nr_freq--; epc->nr_freq--; } @@ -2686,7 +2691,7 @@ event_sched_in(struct perf_event *event, struct perf_event_context *ctx) if (!is_software_event(event)) cpc->active_oncpu++; - if (event->attr.freq && event->attr.sample_freq) { + if (is_event_in_freq_mode(event)) { ctx->nr_freq++; epc->nr_freq++; } @@ -4252,11 +4257,11 @@ static void perf_adjust_freq_unthr_events(struct list_head *event_list) if (hwc->interrupts == MAX_INTERRUPTS) { hwc->interrupts = 0; perf_log_throttle(event, 1); - if (!event->attr.freq || !event->attr.sample_freq) + if (!is_event_in_freq_mode(event)) event->pmu->start(event, 0); } - if (!event->attr.freq || !event->attr.sample_freq) + if (!is_event_in_freq_mode(event)) continue; /* @@ -12848,7 +12853,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, hwc = &event->hw; hwc->sample_period = attr->sample_period; - if (attr->freq && attr->sample_freq) + if (is_event_in_freq_mode(event)) hwc->sample_period = 1; hwc->last_period = hwc->sample_period; |