diff options
Diffstat (limited to '')
-rw-r--r-- | tools/perf/builtin-sched.c | 1003 |
1 files changed, 695 insertions, 308 deletions
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 0e16f9d5a947..26ece6e9bfd1 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -7,6 +7,7 @@ #include "util/evlist.h" #include "util/evsel.h" #include "util/evsel_fprintf.h" +#include "util/mutex.h" #include "util/symbol.h" #include "util/thread.h" #include "util/header.h" @@ -26,6 +27,7 @@ #include "util/debug.h" #include "util/event.h" +#include "util/util.h" #include <linux/kernel.h> #include <linux/log2.h> @@ -50,6 +52,7 @@ #define COMM_LEN 20 #define SYM_LEN 129 #define MAX_PID 1024000 +#define MAX_PRIO 140 static const char *cpu_list; static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); @@ -66,7 +69,6 @@ struct task_desc { struct sched_atom **atoms; pthread_t thread; - sem_t sleep_sem; sem_t ready_for_work; sem_t work_done_sem; @@ -78,12 +80,10 @@ enum sched_event_type { SCHED_EVENT_RUN, SCHED_EVENT_SLEEP, SCHED_EVENT_WAKEUP, - SCHED_EVENT_MIGRATION, }; struct sched_atom { enum sched_event_type type; - int specific_wait; u64 timestamp; u64 duration; unsigned long nr; @@ -91,24 +91,6 @@ struct sched_atom { struct task_desc *wakee; }; -#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP" - -/* task state bitmask, copied from include/linux/sched.h */ -#define TASK_RUNNING 0 -#define TASK_INTERRUPTIBLE 1 -#define TASK_UNINTERRUPTIBLE 2 -#define __TASK_STOPPED 4 -#define __TASK_TRACED 8 -/* in tsk->exit_state */ -#define EXIT_DEAD 16 -#define EXIT_ZOMBIE 32 -#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD) -/* in tsk->state again */ -#define TASK_DEAD 64 -#define TASK_WAKEKILL 128 -#define TASK_WAKING 256 -#define TASK_PARKED 512 - enum thread_state { THREAD_SLEEPING = 0, THREAD_WAIT_CPU, @@ -167,12 +149,15 @@ struct trace_sched_handler { struct perf_sched_map { DECLARE_BITMAP(comp_cpus_mask, MAX_CPUS); - int *comp_cpus; + struct perf_cpu *comp_cpus; bool comp; struct perf_thread_map *color_pids; const char *color_pids_str; struct perf_cpu_map *color_cpus; const char *color_cpus_str; + const char *task_name; + struct strlist *task_names; + bool fuzzy; struct perf_cpu_map *cpus; const char *cpus_str; }; @@ -184,16 +169,17 @@ struct perf_sched { struct task_desc **pid_to_task; struct task_desc **tasks; const struct trace_sched_handler *tp_handler; - pthread_mutex_t start_work_mutex; - pthread_mutex_t work_done_wait_mutex; + struct mutex start_work_mutex; + struct mutex work_done_wait_mutex; int profile_cpu; /* * Track the current task - that way we can know whether there's any * weird events, such as a task being switched away that is not current. */ - int max_cpu; - u32 curr_pid[MAX_CPUS]; - struct thread *curr_thread[MAX_CPUS]; + struct perf_cpu max_cpu; + u32 *curr_pid; + struct thread **curr_thread; + struct thread **curr_out_thread; char next_shortname1; char next_shortname2; unsigned int replay_repeat; @@ -223,7 +209,7 @@ struct perf_sched { u64 run_avg; u64 all_runtime; u64 all_count; - u64 cpu_last_switched[MAX_CPUS]; + u64 *cpu_last_switched; struct rb_root_cached atom_root, sorted_atom_root, merged_atom_root; struct list_head sort_list, cmp_pid; bool force; @@ -240,11 +226,16 @@ struct perf_sched { bool show_wakeups; bool show_next; bool show_migrations; + bool pre_migrations; bool show_state; + bool show_prio; u64 skipped_samples; const char *time_str; struct perf_time_interval ptime; struct perf_time_interval hist_time; + volatile bool thread_funcs_exit; + const char *prio_str; + DECLARE_BITMAP(prio_bitmap, MAX_PRIO); }; /* per thread run time data */ @@ -255,7 +246,9 @@ struct thread_runtime { u64 dt_iowait; /* time between CPU access by iowait (off cpu) */ u64 dt_preempt; /* time between CPU access by preempt (off cpu) */ u64 dt_delay; /* time between wakeup and sched-in */ + u64 dt_pre_mig; /* time between migration and wakeup */ u64 ready_to_run; /* time of wakeup */ + u64 migrated; /* time when a thread is migrated */ struct stats run_stats; u64 total_run_time; @@ -263,13 +256,16 @@ struct thread_runtime { u64 total_iowait_time; u64 total_preempt_time; u64 total_delay_time; + u64 total_pre_mig_time; - int last_state; + char last_state; char shortname[3]; bool comm_changed; u64 migrations; + + int prio; }; /* per event run time data */ @@ -427,14 +423,13 @@ static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *t wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem)); sem_init(wakee_event->wait_sem, 0, 0); - wakee_event->specific_wait = 1; event->wait_sem = wakee_event->wait_sem; sched->nr_wakeup_events++; } static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task, - u64 timestamp, u64 task_state __maybe_unused) + u64 timestamp) { struct sched_atom *event = get_new_event(task, timestamp); @@ -474,7 +469,7 @@ static struct task_desc *register_pid(struct perf_sched *sched, * every task starts in sleeping state - this gets ignored * if there's no wakeup pointing to this sleep state: */ - add_sched_event_sleep(sched, task, 0, 0); + add_sched_event_sleep(sched, task, 0); sched->pid_to_task[pid] = task; sched->nr_tasks++; @@ -535,8 +530,6 @@ static void perf_sched__process_event(struct perf_sched *sched, ret = sem_post(atom->wait_sem); BUG_ON(ret); break; - case SCHED_EVENT_MIGRATION: - break; default: BUG_ON(1); } @@ -632,35 +625,34 @@ static void *thread_func(void *ctx) prctl(PR_SET_NAME, comm2); if (fd < 0) return NULL; -again: - ret = sem_post(&this_task->ready_for_work); - BUG_ON(ret); - ret = pthread_mutex_lock(&sched->start_work_mutex); - BUG_ON(ret); - ret = pthread_mutex_unlock(&sched->start_work_mutex); - BUG_ON(ret); - cpu_usage_0 = get_cpu_usage_nsec_self(fd); + while (!sched->thread_funcs_exit) { + ret = sem_post(&this_task->ready_for_work); + BUG_ON(ret); + mutex_lock(&sched->start_work_mutex); + mutex_unlock(&sched->start_work_mutex); - for (i = 0; i < this_task->nr_events; i++) { - this_task->curr_event = i; - perf_sched__process_event(sched, this_task->atoms[i]); - } + cpu_usage_0 = get_cpu_usage_nsec_self(fd); - cpu_usage_1 = get_cpu_usage_nsec_self(fd); - this_task->cpu_usage = cpu_usage_1 - cpu_usage_0; - ret = sem_post(&this_task->work_done_sem); - BUG_ON(ret); + for (i = 0; i < this_task->nr_events; i++) { + this_task->curr_event = i; + perf_sched__process_event(sched, this_task->atoms[i]); + } - ret = pthread_mutex_lock(&sched->work_done_wait_mutex); - BUG_ON(ret); - ret = pthread_mutex_unlock(&sched->work_done_wait_mutex); - BUG_ON(ret); + cpu_usage_1 = get_cpu_usage_nsec_self(fd); + this_task->cpu_usage = cpu_usage_1 - cpu_usage_0; + ret = sem_post(&this_task->work_done_sem); + BUG_ON(ret); - goto again; + mutex_lock(&sched->work_done_wait_mutex); + mutex_unlock(&sched->work_done_wait_mutex); + } + return NULL; } static void create_tasks(struct perf_sched *sched) + EXCLUSIVE_LOCK_FUNCTION(sched->start_work_mutex) + EXCLUSIVE_LOCK_FUNCTION(sched->work_done_wait_mutex) { struct task_desc *task; pthread_attr_t attr; @@ -670,19 +662,16 @@ static void create_tasks(struct perf_sched *sched) err = pthread_attr_init(&attr); BUG_ON(err); err = pthread_attr_setstacksize(&attr, - (size_t) max(16 * 1024, PTHREAD_STACK_MIN)); - BUG_ON(err); - err = pthread_mutex_lock(&sched->start_work_mutex); - BUG_ON(err); - err = pthread_mutex_lock(&sched->work_done_wait_mutex); + (size_t) max(16 * 1024, (int)PTHREAD_STACK_MIN)); BUG_ON(err); + mutex_lock(&sched->start_work_mutex); + mutex_lock(&sched->work_done_wait_mutex); for (i = 0; i < sched->nr_tasks; i++) { struct sched_thread_parms *parms = malloc(sizeof(*parms)); BUG_ON(parms == NULL); parms->task = task = sched->tasks[i]; parms->sched = sched; parms->fd = self_open_counters(sched, i); - sem_init(&task->sleep_sem, 0, 0); sem_init(&task->ready_for_work, 0, 0); sem_init(&task->work_done_sem, 0, 0); task->curr_event = 0; @@ -691,7 +680,29 @@ static void create_tasks(struct perf_sched *sched) } } +static void destroy_tasks(struct perf_sched *sched) + UNLOCK_FUNCTION(sched->start_work_mutex) + UNLOCK_FUNCTION(sched->work_done_wait_mutex) +{ + struct task_desc *task; + unsigned long i; + int err; + + mutex_unlock(&sched->start_work_mutex); + mutex_unlock(&sched->work_done_wait_mutex); + /* Get rid of threads so they won't be upset by mutex destrunction */ + for (i = 0; i < sched->nr_tasks; i++) { + task = sched->tasks[i]; + err = pthread_join(task->thread, NULL); + BUG_ON(err); + sem_destroy(&task->ready_for_work); + sem_destroy(&task->work_done_sem); + } +} + static void wait_for_tasks(struct perf_sched *sched) + EXCLUSIVE_LOCKS_REQUIRED(sched->work_done_wait_mutex) + EXCLUSIVE_LOCKS_REQUIRED(sched->start_work_mutex) { u64 cpu_usage_0, cpu_usage_1; struct task_desc *task; @@ -699,7 +710,7 @@ static void wait_for_tasks(struct perf_sched *sched) sched->start_time = get_nsecs(); sched->cpu_usage = 0; - pthread_mutex_unlock(&sched->work_done_wait_mutex); + mutex_unlock(&sched->work_done_wait_mutex); for (i = 0; i < sched->nr_tasks; i++) { task = sched->tasks[i]; @@ -707,12 +718,11 @@ static void wait_for_tasks(struct perf_sched *sched) BUG_ON(ret); sem_init(&task->ready_for_work, 0, 0); } - ret = pthread_mutex_lock(&sched->work_done_wait_mutex); - BUG_ON(ret); + mutex_lock(&sched->work_done_wait_mutex); cpu_usage_0 = get_cpu_usage_nsec_parent(); - pthread_mutex_unlock(&sched->start_work_mutex); + mutex_unlock(&sched->start_work_mutex); for (i = 0; i < sched->nr_tasks; i++) { task = sched->tasks[i]; @@ -734,17 +744,17 @@ static void wait_for_tasks(struct perf_sched *sched) sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * (sched->replay_repeat - 1) + sched->parent_cpu_usage)/sched->replay_repeat; - ret = pthread_mutex_lock(&sched->start_work_mutex); - BUG_ON(ret); + mutex_lock(&sched->start_work_mutex); for (i = 0; i < sched->nr_tasks; i++) { task = sched->tasks[i]; - sem_init(&task->sleep_sem, 0, 0); task->curr_event = 0; } } static void run_one_test(struct perf_sched *sched) + EXCLUSIVE_LOCKS_REQUIRED(sched->work_done_wait_mutex) + EXCLUSIVE_LOCKS_REQUIRED(sched->start_work_mutex) { u64 T0, T1, delta, avg_delta, fluct; @@ -838,7 +848,6 @@ static int replay_switch_event(struct perf_sched *sched, *next_comm = evsel__strval(evsel, sample, "next_comm"); const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"), next_pid = evsel__intval(evsel, sample, "next_pid"); - const u64 prev_state = evsel__intval(evsel, sample, "prev_state"); struct task_desc *prev, __maybe_unused *next; u64 timestamp0, timestamp = sample->time; int cpu = sample->cpu; @@ -870,7 +879,7 @@ static int replay_switch_event(struct perf_sched *sched, sched->cpu_last_switched[cpu] = timestamp; add_sched_event_run(sched, prev, timestamp, delta); - add_sched_event_sleep(sched, prev, timestamp, prev_state); + add_sched_event_sleep(sched, prev, timestamp); return 0; } @@ -894,12 +903,12 @@ static int replay_fork_event(struct perf_sched *sched, if (verbose > 0) { printf("fork event\n"); - printf("... parent: %s/%d\n", thread__comm_str(parent), parent->tid); - printf("... child: %s/%d\n", thread__comm_str(child), child->tid); + printf("... parent: %s/%d\n", thread__comm_str(parent), thread__tid(parent)); + printf("... child: %s/%d\n", thread__comm_str(child), thread__tid(child)); } - register_pid(sched, parent->tid, thread__comm_str(parent)); - register_pid(sched, child->tid, thread__comm_str(child)); + register_pid(sched, thread__tid(parent), thread__comm_str(parent)); + register_pid(sched, thread__tid(child), thread__comm_str(child)); out_put: thread__put(child); thread__put(parent); @@ -912,6 +921,11 @@ struct sort_dimension { struct list_head list; }; +static inline void init_prio(struct thread_runtime *r) +{ + r->prio = -1; +} + /* * handle runtime stats saved per thread */ @@ -924,6 +938,7 @@ static struct thread_runtime *thread__init_runtime(struct thread *thread) return NULL; init_stats(&r->run_stats); + init_prio(r); thread__set_priv(thread, r); return r; @@ -1028,13 +1043,6 @@ static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread) return 0; } -static char sched_out_state(u64 prev_state) -{ - const char *str = TASK_STATE_TO_CHAR_STR; - - return str[prev_state]; -} - static int add_sched_out_event(struct work_atoms *atoms, char run_state, @@ -1110,7 +1118,7 @@ static int latency_switch_event(struct perf_sched *sched, { const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"), next_pid = evsel__intval(evsel, sample, "next_pid"); - const u64 prev_state = evsel__intval(evsel, sample, "prev_state"); + const char prev_state = evsel__taskstate(evsel, sample, "prev_state"); struct work_atoms *out_events, *in_events; struct thread *sched_out, *sched_in; u64 timestamp0, timestamp = sample->time; @@ -1146,7 +1154,7 @@ static int latency_switch_event(struct perf_sched *sched, goto out_put; } } - if (add_sched_out_event(out_events, sched_out_state(prev_state), timestamp)) + if (add_sched_out_event(out_events, prev_state, timestamp)) return -1; in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid); @@ -1294,7 +1302,7 @@ static int latency_migrate_task_event(struct perf_sched *sched, if (!atoms) { if (thread_atoms_insert(sched, migrant)) goto out_put; - register_pid(sched, migrant->tid, thread__comm_str(migrant)); + register_pid(sched, thread__tid(migrant), thread__comm_str(migrant)); atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid); if (!atoms) { pr_err("migration-event: Internal tree error"); @@ -1337,10 +1345,13 @@ static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_ sched->all_runtime += work_list->total_runtime; sched->all_count += work_list->nb_atoms; - if (work_list->num_merged > 1) - ret = printf(" %s:(%d) ", thread__comm_str(work_list->thread), work_list->num_merged); - else - ret = printf(" %s:%d ", thread__comm_str(work_list->thread), work_list->thread->tid); + if (work_list->num_merged > 1) { + ret = printf(" %s:(%d) ", thread__comm_str(work_list->thread), + work_list->num_merged); + } else { + ret = printf(" %s:%d ", thread__comm_str(work_list->thread), + thread__tid(work_list->thread)); + } for (i = 0; i < 24 - ret; i++) printf(" "); @@ -1358,13 +1369,17 @@ static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_ static int pid_cmp(struct work_atoms *l, struct work_atoms *r) { - if (l->thread == r->thread) + pid_t l_tid, r_tid; + + if (RC_CHK_EQUAL(l->thread, r->thread)) return 0; - if (l->thread->tid < r->thread->tid) + l_tid = thread__tid(l->thread); + r_tid = thread__tid(r->thread); + if (l_tid < r_tid) return -1; - if (l->thread->tid > r->thread->tid) + if (l_tid > r_tid) return 1; - return (int)(l->thread - r->thread); + return (int)(RC_CHK_ACCESS(l->thread) - RC_CHK_ACCESS(r->thread)); } static int avg_cmp(struct work_atoms *l, struct work_atoms *r) @@ -1481,7 +1496,7 @@ again: } } -static int process_sched_wakeup_event(struct perf_tool *tool, +static int process_sched_wakeup_event(const struct perf_tool *tool, struct evsel *evsel, struct perf_sample *sample, struct machine *machine) @@ -1494,6 +1509,14 @@ static int process_sched_wakeup_event(struct perf_tool *tool, return 0; } +static int process_sched_wakeup_ignore(const struct perf_tool *tool __maybe_unused, + struct evsel *evsel __maybe_unused, + struct perf_sample *sample __maybe_unused, + struct machine *machine __maybe_unused) +{ + return 0; +} + union map_priv { void *ptr; bool color; @@ -1526,37 +1549,108 @@ map__findnew_thread(struct perf_sched *sched, struct machine *machine, pid_t pid return thread; } +static bool sched_match_task(struct perf_sched *sched, const char *comm_str) +{ + bool fuzzy_match = sched->map.fuzzy; + struct strlist *task_names = sched->map.task_names; + struct str_node *node; + + strlist__for_each_entry(node, task_names) { + bool match_found = fuzzy_match ? !!strstr(comm_str, node->s) : + !strcmp(comm_str, node->s); + if (match_found) + return true; + } + + return false; +} + +static void print_sched_map(struct perf_sched *sched, struct perf_cpu this_cpu, int cpus_nr, + const char *color, bool sched_out) +{ + for (int i = 0; i < cpus_nr; i++) { + struct perf_cpu cpu = { + .cpu = sched->map.comp ? sched->map.comp_cpus[i].cpu : i, + }; + struct thread *curr_thread = sched->curr_thread[cpu.cpu]; + struct thread *curr_out_thread = sched->curr_out_thread[cpu.cpu]; + struct thread_runtime *curr_tr; + const char *pid_color = color; + const char *cpu_color = color; + char symbol = ' '; + struct thread *thread_to_check = sched_out ? curr_out_thread : curr_thread; + + if (thread_to_check && thread__has_color(thread_to_check)) + pid_color = COLOR_PIDS; + + if (sched->map.color_cpus && perf_cpu_map__has(sched->map.color_cpus, cpu)) + cpu_color = COLOR_CPUS; + + if (cpu.cpu == this_cpu.cpu) + symbol = '*'; + + color_fprintf(stdout, cpu.cpu != this_cpu.cpu ? color : cpu_color, "%c", symbol); + + thread_to_check = sched_out ? sched->curr_out_thread[cpu.cpu] : + sched->curr_thread[cpu.cpu]; + + if (thread_to_check) { + curr_tr = thread__get_runtime(thread_to_check); + if (curr_tr == NULL) + return; + + if (sched_out) { + if (cpu.cpu == this_cpu.cpu) + color_fprintf(stdout, color, "- "); + else { + curr_tr = thread__get_runtime(sched->curr_thread[cpu.cpu]); + if (curr_tr != NULL) + color_fprintf(stdout, pid_color, "%2s ", + curr_tr->shortname); + } + } else + color_fprintf(stdout, pid_color, "%2s ", curr_tr->shortname); + } else + color_fprintf(stdout, color, " "); + } +} + static int map_switch_event(struct perf_sched *sched, struct evsel *evsel, struct perf_sample *sample, struct machine *machine) { const u32 next_pid = evsel__intval(evsel, sample, "next_pid"); - struct thread *sched_in; + const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"); + struct thread *sched_in, *sched_out; struct thread_runtime *tr; int new_shortname; u64 timestamp0, timestamp = sample->time; s64 delta; - int i, this_cpu = sample->cpu; + struct perf_cpu this_cpu = { + .cpu = sample->cpu, + }; int cpus_nr; + int proceed; bool new_cpu = false; const char *color = PERF_COLOR_NORMAL; char stimestamp[32]; + const char *str; - BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0); + BUG_ON(this_cpu.cpu >= MAX_CPUS || this_cpu.cpu < 0); - if (this_cpu > sched->max_cpu) + if (this_cpu.cpu > sched->max_cpu.cpu) sched->max_cpu = this_cpu; if (sched->map.comp) { cpus_nr = bitmap_weight(sched->map.comp_cpus_mask, MAX_CPUS); - if (!test_and_set_bit(this_cpu, sched->map.comp_cpus_mask)) { + if (!__test_and_set_bit(this_cpu.cpu, sched->map.comp_cpus_mask)) { sched->map.comp_cpus[cpus_nr++] = this_cpu; new_cpu = true; } } else - cpus_nr = sched->max_cpu; + cpus_nr = sched->max_cpu.cpu; - timestamp0 = sched->cpu_last_switched[this_cpu]; - sched->cpu_last_switched[this_cpu] = timestamp; + timestamp0 = sched->cpu_last_switched[this_cpu.cpu]; + sched->cpu_last_switched[this_cpu.cpu] = timestamp; if (timestamp0) delta = timestamp - timestamp0; else @@ -1568,7 +1662,8 @@ static int map_switch_event(struct perf_sched *sched, struct evsel *evsel, } sched_in = map__findnew_thread(sched, machine, -1, next_pid); - if (sched_in == NULL) + sched_out = map__findnew_thread(sched, machine, -1, prev_pid); + if (sched_in == NULL || sched_out == NULL) return -1; tr = thread__get_runtime(sched_in); @@ -1577,10 +1672,10 @@ static int map_switch_event(struct perf_sched *sched, struct evsel *evsel, return -1; } - sched->curr_thread[this_cpu] = thread__get(sched_in); - - printf(" "); + sched->curr_thread[this_cpu.cpu] = thread__get(sched_in); + sched->curr_out_thread[this_cpu.cpu] = thread__get(sched_out); + str = thread__comm_str(sched_in); new_shortname = 0; if (!tr->shortname[0]) { if (!strcmp(thread__comm_str(sched_in), "swapper")) { @@ -1590,7 +1685,7 @@ static int map_switch_event(struct perf_sched *sched, struct evsel *evsel, */ tr->shortname[0] = '.'; tr->shortname[1] = ' '; - } else { + } else if (!sched->map.task_name || sched_match_task(sched, str)) { tr->shortname[0] = sched->next_shortname1; tr->shortname[1] = sched->next_shortname2; @@ -1603,70 +1698,86 @@ static int map_switch_event(struct perf_sched *sched, struct evsel *evsel, else sched->next_shortname2 = '0'; } + } else { + tr->shortname[0] = '-'; + tr->shortname[1] = ' '; } new_shortname = 1; } - for (i = 0; i < cpus_nr; i++) { - int cpu = sched->map.comp ? sched->map.comp_cpus[i] : i; - struct thread *curr_thread = sched->curr_thread[cpu]; - struct thread_runtime *curr_tr; - const char *pid_color = color; - const char *cpu_color = color; - - if (curr_thread && thread__has_color(curr_thread)) - pid_color = COLOR_PIDS; - - if (sched->map.cpus && !cpu_map__has(sched->map.cpus, cpu)) - continue; - - if (sched->map.color_cpus && cpu_map__has(sched->map.color_cpus, cpu)) - cpu_color = COLOR_CPUS; + if (sched->map.cpus && !perf_cpu_map__has(sched->map.cpus, this_cpu)) + goto out; - if (cpu != this_cpu) - color_fprintf(stdout, color, " "); + proceed = 0; + str = thread__comm_str(sched_in); + /* + * Check which of sched_in and sched_out matches the passed --task-name + * arguments and call the corresponding print_sched_map. + */ + if (sched->map.task_name && !sched_match_task(sched, str)) { + if (!sched_match_task(sched, thread__comm_str(sched_out))) + goto out; else - color_fprintf(stdout, cpu_color, "*"); + goto sched_out; - if (sched->curr_thread[cpu]) { - curr_tr = thread__get_runtime(sched->curr_thread[cpu]); - if (curr_tr == NULL) { - thread__put(sched_in); - return -1; - } - color_fprintf(stdout, pid_color, "%2s ", curr_tr->shortname); - } else - color_fprintf(stdout, color, " "); + } else { + str = thread__comm_str(sched_out); + if (!(sched->map.task_name && !sched_match_task(sched, str))) + proceed = 1; } - if (sched->map.cpus && !cpu_map__has(sched->map.cpus, this_cpu)) - goto out; + printf(" "); + + print_sched_map(sched, this_cpu, cpus_nr, color, false); timestamp__scnprintf_usec(timestamp, stimestamp, sizeof(stimestamp)); color_fprintf(stdout, color, " %12s secs ", stimestamp); - if (new_shortname || tr->comm_changed || (verbose > 0 && sched_in->tid)) { + if (new_shortname || tr->comm_changed || (verbose > 0 && thread__tid(sched_in))) { const char *pid_color = color; if (thread__has_color(sched_in)) pid_color = COLOR_PIDS; color_fprintf(stdout, pid_color, "%s => %s:%d", - tr->shortname, thread__comm_str(sched_in), sched_in->tid); + tr->shortname, thread__comm_str(sched_in), thread__tid(sched_in)); tr->comm_changed = false; } if (sched->map.comp && new_cpu) - color_fprintf(stdout, color, " (CPU %d)", this_cpu); + color_fprintf(stdout, color, " (CPU %d)", this_cpu.cpu); + + if (proceed != 1) { + color_fprintf(stdout, color, "\n"); + goto out; + } + +sched_out: + if (sched->map.task_name) { + tr = thread__get_runtime(sched->curr_out_thread[this_cpu.cpu]); + if (strcmp(tr->shortname, "") == 0) + goto out; + + if (proceed == 1) + color_fprintf(stdout, color, "\n"); + + printf(" "); + print_sched_map(sched, this_cpu, cpus_nr, color, true); + timestamp__scnprintf_usec(timestamp, stimestamp, sizeof(stimestamp)); + color_fprintf(stdout, color, " %12s secs ", stimestamp); + } -out: color_fprintf(stdout, color, "\n"); +out: + if (sched->map.task_name) + thread__put(sched_out); + thread__put(sched_in); return 0; } -static int process_sched_switch_event(struct perf_tool *tool, +static int process_sched_switch_event(const struct perf_tool *tool, struct evsel *evsel, struct perf_sample *sample, struct machine *machine) @@ -1692,7 +1803,7 @@ static int process_sched_switch_event(struct perf_tool *tool, return err; } -static int process_sched_runtime_event(struct perf_tool *tool, +static int process_sched_runtime_event(const struct perf_tool *tool, struct evsel *evsel, struct perf_sample *sample, struct machine *machine) @@ -1705,14 +1816,14 @@ static int process_sched_runtime_event(struct perf_tool *tool, return 0; } -static int perf_sched__process_fork_event(struct perf_tool *tool, +static int perf_sched__process_fork_event(const struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct machine *machine) { struct perf_sched *sched = container_of(tool, struct perf_sched, tool); - /* run the fork event through the perf machineruy */ + /* run the fork event through the perf machinery */ perf_event__process_fork(tool, event, sample, machine); /* and then run additional processing needed for this command */ @@ -1722,7 +1833,7 @@ static int perf_sched__process_fork_event(struct perf_tool *tool, return 0; } -static int process_sched_migrate_task_event(struct perf_tool *tool, +static int process_sched_migrate_task_event(const struct perf_tool *tool, struct evsel *evsel, struct perf_sample *sample, struct machine *machine) @@ -1735,12 +1846,12 @@ static int process_sched_migrate_task_event(struct perf_tool *tool, return 0; } -typedef int (*tracepoint_handler)(struct perf_tool *tool, +typedef int (*tracepoint_handler)(const struct perf_tool *tool, struct evsel *evsel, struct perf_sample *sample, struct machine *machine); -static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_unused, +static int perf_sched__process_tracepoint_sample(const struct perf_tool *tool __maybe_unused, union perf_event *event __maybe_unused, struct perf_sample *sample, struct evsel *evsel, @@ -1756,7 +1867,7 @@ static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_ return err; } -static int perf_sched__process_comm(struct perf_tool *tool __maybe_unused, +static int perf_sched__process_comm(const struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample, struct machine *machine) @@ -1789,10 +1900,11 @@ static int perf_sched__process_comm(struct perf_tool *tool __maybe_unused, static int perf_sched__read_events(struct perf_sched *sched) { - const struct evsel_str_handler handlers[] = { + struct evsel_str_handler handlers[] = { { "sched:sched_switch", process_sched_switch_event, }, { "sched:sched_stat_runtime", process_sched_runtime_event, }, { "sched:sched_wakeup", process_sched_wakeup_event, }, + { "sched:sched_waking", process_sched_wakeup_event, }, { "sched:sched_wakeup_new", process_sched_wakeup_event, }, { "sched:sched_migrate_task", process_sched_migrate_task_event, }, }; @@ -1804,7 +1916,7 @@ static int perf_sched__read_events(struct perf_sched *sched) }; int rc = -1; - session = perf_session__new(&data, false, &sched->tool); + session = perf_session__new(&data, &sched->tool); if (IS_ERR(session)) { pr_debug("Error creating perf session"); return PTR_ERR(session); @@ -1812,6 +1924,10 @@ static int perf_sched__read_events(struct perf_sched *sched) symbol__init(&session->header.env); + /* prefer sched_waking if it is captured */ + if (evlist__find_tracepoint_by_name(session->evlist, "sched:sched_waking")) + handlers[2].handler = process_sched_wakeup_ignore; + if (perf_session__set_tracepoints_handlers(session, handlers)) goto out_delete; @@ -1908,8 +2024,8 @@ static char *timehist_get_commstr(struct thread *thread) { static char str[32]; const char *comm = thread__comm_str(thread); - pid_t tid = thread->tid; - pid_t pid = thread->pid_; + pid_t tid = thread__tid(thread); + pid_t pid = thread__pid(thread); int n; if (pid == 0) @@ -1927,9 +2043,27 @@ static char *timehist_get_commstr(struct thread *thread) return str; } +/* prio field format: xxx or xxx->yyy */ +#define MAX_PRIO_STR_LEN 8 +static char *timehist_get_priostr(struct evsel *evsel, + struct thread *thread, + struct perf_sample *sample) +{ + static char prio_str[16]; + int prev_prio = (int)evsel__intval(evsel, sample, "prev_prio"); + struct thread_runtime *tr = thread__priv(thread); + + if (tr->prio != prev_prio && tr->prio != -1) + scnprintf(prio_str, sizeof(prio_str), "%d->%d", tr->prio, prev_prio); + else + scnprintf(prio_str, sizeof(prio_str), "%d", prev_prio); + + return prio_str; +} + static void timehist_header(struct perf_sched *sched) { - u32 ncpus = sched->max_cpu + 1; + u32 ncpus = sched->max_cpu.cpu + 1; u32 i, j; printf("%15s %6s ", "time", "cpu"); @@ -1944,8 +2078,15 @@ static void timehist_header(struct perf_sched *sched) printf(" "); } - printf(" %-*s %9s %9s %9s", comm_width, - "task name", "wait time", "sch delay", "run time"); + printf(" %-*s", comm_width, "task name"); + + if (sched->show_prio) + printf(" %-*s", MAX_PRIO_STR_LEN, "prio"); + + printf(" %9s %9s %9s", "wait time", "sch delay", "run time"); + + if (sched->pre_migrations) + printf(" %9s", "pre-mig time"); if (sched->show_state) printf(" %s", "state"); @@ -1960,11 +2101,15 @@ static void timehist_header(struct perf_sched *sched) if (sched->show_cpu_visual) printf(" %*s ", ncpus, ""); - printf(" %-*s %9s %9s %9s", comm_width, - "[tid/pid]", "(msec)", "(msec)", "(msec)"); + printf(" %-*s", comm_width, "[tid/pid]"); - if (sched->show_state) - printf(" %5s", ""); + if (sched->show_prio) + printf(" %-*s", MAX_PRIO_STR_LEN, ""); + + printf(" %9s %9s %9s", "(msec)", "(msec)", "(msec)"); + + if (sched->pre_migrations) + printf(" %9s", "(msec)"); printf("\n"); @@ -1976,26 +2121,20 @@ static void timehist_header(struct perf_sched *sched) if (sched->show_cpu_visual) printf(" %.*s ", ncpus, graph_dotted_line); - printf(" %.*s %.9s %.9s %.9s", comm_width, - graph_dotted_line, graph_dotted_line, graph_dotted_line, - graph_dotted_line); + printf(" %.*s", comm_width, graph_dotted_line); - if (sched->show_state) - printf(" %.5s", graph_dotted_line); + if (sched->show_prio) + printf(" %.*s", MAX_PRIO_STR_LEN, graph_dotted_line); - printf("\n"); -} + printf(" %.9s %.9s %.9s", graph_dotted_line, graph_dotted_line, graph_dotted_line); -static char task_state_char(struct thread *thread, int state) -{ - static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; - unsigned bit = state ? ffs(state) : 0; + if (sched->pre_migrations) + printf(" %.9s", graph_dotted_line); - /* 'I' for idle */ - if (thread->tid == 0) - return 'I'; + if (sched->show_state) + printf(" %.5s", graph_dotted_line); - return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?'; + printf("\n"); } static void timehist_print_sample(struct perf_sched *sched, @@ -2003,12 +2142,12 @@ static void timehist_print_sample(struct perf_sched *sched, struct perf_sample *sample, struct addr_location *al, struct thread *thread, - u64 t, int state) + u64 t, const char state) { struct thread_runtime *tr = thread__priv(thread); const char *next_comm = evsel__strval(evsel, sample, "next_comm"); const u32 next_pid = evsel__intval(evsel, sample, "next_pid"); - u32 max_cpus = sched->max_cpu + 1; + u32 max_cpus = sched->max_cpu.cpu + 1; char tstr[64]; char nstr[30]; u64 wait_time; @@ -2027,7 +2166,7 @@ static void timehist_print_sample(struct perf_sched *sched, for (i = 0; i < max_cpus; ++i) { /* flag idle times with 'i'; others are sched events */ if (i == sample->cpu) - c = (thread->tid == 0) ? 'i' : 's'; + c = (thread__tid(thread) == 0) ? 'i' : 's'; else c = ' '; printf("%c", c); @@ -2037,14 +2176,19 @@ static void timehist_print_sample(struct perf_sched *sched, printf(" %-*s ", comm_width, timehist_get_commstr(thread)); + if (sched->show_prio) + printf(" %-*s ", MAX_PRIO_STR_LEN, timehist_get_priostr(evsel, thread, sample)); + wait_time = tr->dt_sleep + tr->dt_iowait + tr->dt_preempt; print_sched_time(wait_time, 6); print_sched_time(tr->dt_delay, 6); print_sched_time(tr->dt_run, 6); + if (sched->pre_migrations) + print_sched_time(tr->dt_pre_mig, 6); if (sched->show_state) - printf(" %5c ", task_state_char(thread, state)); + printf(" %5c ", thread__tid(thread) == 0 ? 'I' : state); if (sched->show_next) { snprintf(nstr, sizeof(nstr), "next: %s[%d]", next_comm, next_pid); @@ -2054,7 +2198,7 @@ static void timehist_print_sample(struct perf_sched *sched, if (sched->show_wakeups && !sched->show_next) printf(" %-*s", comm_width, ""); - if (thread->tid == 0) + if (thread__tid(thread) == 0) goto out; if (sched->show_callchain) @@ -2064,7 +2208,7 @@ static void timehist_print_sample(struct perf_sched *sched, EVSEL__PRINT_SYM | EVSEL__PRINT_ONELINE | EVSEL__PRINT_CALLCHAIN_ARROW | EVSEL__PRINT_SKIP_IGNORED, - &callchain_cursor, symbol_conf.bt_stop_list, stdout); + get_tls_callchain_cursor(), symbol_conf.bt_stop_list, stdout); out: printf("\n"); @@ -2079,18 +2223,21 @@ out: * last_time = time of last sched change event for current task * (i.e, time process was last scheduled out) * ready_to_run = time of wakeup for current task + * migrated = time of task migration to another CPU * - * -----|------------|------------|------------|------ - * last ready tprev t + * -----|-------------|-------------|-------------|-------------|----- + * last ready migrated tprev t * time to run * - * |-------- dt_wait --------| - * |- dt_delay -|-- dt_run --| + * |---------------- dt_wait ----------------| + * |--------- dt_delay ---------|-- dt_run --| + * |- dt_pre_mig -| * - * dt_run = run time of current task - * dt_wait = time between last schedule out event for task and tprev - * represents time spent off the cpu - * dt_delay = time between wakeup and schedule-in of task + * dt_run = run time of current task + * dt_wait = time between last schedule out event for task and tprev + * represents time spent off the cpu + * dt_delay = time between wakeup and schedule-in of task + * dt_pre_mig = time between wakeup and migration to another CPU */ static void timehist_update_runtime_stats(struct thread_runtime *r, @@ -2101,6 +2248,7 @@ static void timehist_update_runtime_stats(struct thread_runtime *r, r->dt_iowait = 0; r->dt_preempt = 0; r->dt_run = 0; + r->dt_pre_mig = 0; if (tprev) { r->dt_run = t - tprev; @@ -2109,6 +2257,9 @@ static void timehist_update_runtime_stats(struct thread_runtime *r, pr_debug("time travel: wakeup time for task > previous sched_switch event\n"); else r->dt_delay = tprev - r->ready_to_run; + + if ((r->migrated > r->ready_to_run) && (r->migrated < tprev)) + r->dt_pre_mig = r->migrated - r->ready_to_run; } if (r->last_time > tprev) @@ -2116,9 +2267,9 @@ static void timehist_update_runtime_stats(struct thread_runtime *r, else if (r->last_time) { u64 dt_wait = tprev - r->last_time; - if (r->last_state == TASK_RUNNING) + if (r->last_state == 'R') r->dt_preempt = dt_wait; - else if (r->last_state == TASK_UNINTERRUPTIBLE) + else if (r->last_state == 'D') r->dt_iowait = dt_wait; else r->dt_sleep = dt_wait; @@ -2132,13 +2283,14 @@ static void timehist_update_runtime_stats(struct thread_runtime *r, r->total_sleep_time += r->dt_sleep; r->total_iowait_time += r->dt_iowait; r->total_preempt_time += r->dt_preempt; + r->total_pre_mig_time += r->dt_pre_mig; } static bool is_idle_sample(struct perf_sample *sample, struct evsel *evsel) { /* pid 0 == swapper == idle task */ - if (strcmp(evsel__name(evsel), "sched:sched_switch") == 0) + if (evsel__name_is(evsel, "sched:sched_switch")) return evsel__intval(evsel, sample, "prev_pid") == 0; return sample->pid == 0; @@ -2149,7 +2301,7 @@ static void save_task_callchain(struct perf_sched *sched, struct evsel *evsel, struct machine *machine) { - struct callchain_cursor *cursor = &callchain_cursor; + struct callchain_cursor *cursor; struct thread *thread; /* want main thread for process - has maps */ @@ -2162,6 +2314,8 @@ static void save_task_callchain(struct perf_sched *sched, if (!sched->show_callchain || sample->callchain == NULL) return; + cursor = get_tls_callchain_cursor(); + if (thread__resolve_callchain(thread, cursor, evsel, sample, NULL, NULL, sched->max_stack + 2) != 0) { if (verbose > 0) @@ -2202,6 +2356,7 @@ static int init_idle_thread(struct thread *thread) if (itr == NULL) return -ENOMEM; + init_prio(&itr->tr); init_stats(&itr->tr.run_stats); callchain_init(&itr->callchain); callchain_cursor_reset(&itr->cursor); @@ -2291,10 +2446,16 @@ static void save_idle_callchain(struct perf_sched *sched, struct idle_thread_runtime *itr, struct perf_sample *sample) { + struct callchain_cursor *cursor; + if (!sched->show_callchain || sample->callchain == NULL) return; - callchain_cursor__copy(&itr->cursor, &callchain_cursor); + cursor = get_tls_callchain_cursor(); + if (cursor == NULL) + return; + + callchain_cursor__copy(&itr->cursor, cursor); } static struct thread *timehist_get_thread(struct perf_sched *sched, @@ -2350,14 +2511,35 @@ static bool timehist_skip_sample(struct perf_sched *sched, struct perf_sample *sample) { bool rc = false; + int prio = -1; + struct thread_runtime *tr = NULL; if (thread__is_filtered(thread)) { rc = true; sched->skipped_samples++; } + if (sched->prio_str) { + /* + * Because priority may be changed during task execution, + * first read priority from prev sched_in event for current task. + * If prev sched_in event is not saved, then read priority from + * current task sched_out event. + */ + tr = thread__get_runtime(thread); + if (tr && tr->prio != -1) + prio = tr->prio; + else if (evsel__name_is(evsel, "sched:sched_switch")) + prio = evsel__intval(evsel, sample, "prev_prio"); + + if (prio != -1 && !test_bit(prio, sched->prio_bitmap)) { + rc = true; + sched->skipped_samples++; + } + } + if (sched->idle_hist) { - if (strcmp(evsel__name(evsel), "sched:sched_switch")) + if (!evsel__name_is(evsel, "sched:sched_switch")) rc = true; else if (evsel__intval(evsel, sample, "prev_pid") != 0 && evsel__intval(evsel, sample, "next_pid") != 0) @@ -2389,7 +2571,7 @@ static void timehist_print_wakeup_event(struct perf_sched *sched, timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr)); printf("%15s [%04d] ", tstr, sample->cpu); if (sched->show_cpu_visual) - printf(" %*s ", sched->max_cpu + 1, ""); + printf(" %*s ", sched->max_cpu.cpu + 1, ""); printf(" %-*s ", comm_width, timehist_get_commstr(thread)); @@ -2401,7 +2583,7 @@ static void timehist_print_wakeup_event(struct perf_sched *sched, printf("\n"); } -static int timehist_sched_wakeup_ignore(struct perf_tool *tool __maybe_unused, +static int timehist_sched_wakeup_ignore(const struct perf_tool *tool __maybe_unused, union perf_event *event __maybe_unused, struct evsel *evsel __maybe_unused, struct perf_sample *sample __maybe_unused, @@ -2410,7 +2592,7 @@ static int timehist_sched_wakeup_ignore(struct perf_tool *tool __maybe_unused, return 0; } -static int timehist_sched_wakeup_event(struct perf_tool *tool, +static int timehist_sched_wakeup_event(const struct perf_tool *tool, union perf_event *event __maybe_unused, struct evsel *evsel, struct perf_sample *sample, @@ -2449,13 +2631,13 @@ static void timehist_print_migration_event(struct perf_sched *sched, { struct thread *thread; char tstr[64]; - u32 max_cpus = sched->max_cpu + 1; + u32 max_cpus; u32 ocpu, dcpu; if (sched->summary_only) return; - max_cpus = sched->max_cpu + 1; + max_cpus = sched->max_cpu.cpu + 1; ocpu = evsel__intval(evsel, sample, "orig_cpu"); dcpu = evsel__intval(evsel, sample, "dest_cpu"); @@ -2494,7 +2676,7 @@ static void timehist_print_migration_event(struct perf_sched *sched, printf("\n"); } -static int timehist_migrate_task_event(struct perf_tool *tool, +static int timehist_migrate_task_event(const struct perf_tool *tool, union perf_event *event __maybe_unused, struct evsel *evsel, struct perf_sample *sample, @@ -2515,14 +2697,42 @@ static int timehist_migrate_task_event(struct perf_tool *tool, return -1; tr->migrations++; + tr->migrated = sample->time; /* show migrations if requested */ - timehist_print_migration_event(sched, evsel, sample, machine, thread); + if (sched->show_migrations) { + timehist_print_migration_event(sched, evsel, sample, + machine, thread); + } return 0; } -static int timehist_sched_change_event(struct perf_tool *tool, +static void timehist_update_task_prio(struct evsel *evsel, + struct perf_sample *sample, + struct machine *machine) +{ + struct thread *thread; + struct thread_runtime *tr = NULL; + const u32 next_pid = evsel__intval(evsel, sample, "next_pid"); + const u32 next_prio = evsel__intval(evsel, sample, "next_prio"); + + if (next_pid == 0) + thread = get_idle_thread(sample->cpu); + else + thread = machine__findnew_thread(machine, -1, next_pid); + + if (thread == NULL) + return; + + tr = thread__get_runtime(thread); + if (tr == NULL) + return; + + tr->prio = next_prio; +} + +static int timehist_sched_change_event(const struct perf_tool *tool, union perf_event *event, struct evsel *evsel, struct perf_sample *sample, @@ -2535,8 +2745,9 @@ static int timehist_sched_change_event(struct perf_tool *tool, struct thread_runtime *tr = NULL; u64 tprev, t = sample->time; int rc = 0; - int state = evsel__intval(evsel, sample, "prev_state"); + const char state = evsel__taskstate(evsel, sample, "prev_state"); + addr_location__init(&al); if (machine__resolve(machine, &al, sample) < 0) { pr_err("problem processing %d event. skipping it\n", event->header.type); @@ -2544,6 +2755,9 @@ static int timehist_sched_change_event(struct perf_tool *tool, goto out; } + if (sched->show_prio || sched->prio_str) + timehist_update_task_prio(evsel, sample, machine); + thread = timehist_get_thread(sched, sample, machine, evsel); if (thread == NULL) { rc = -1; @@ -2577,16 +2791,19 @@ static int timehist_sched_change_event(struct perf_tool *tool, * - previous sched event is out of window - we are done * - sample time is beyond window user cares about - reset it * to close out stats for time window interest + * - If tprev is 0, that is, sched_in event for current task is + * not recorded, cannot determine whether sched_in event is + * within time window interest - ignore it */ if (ptime->end) { - if (tprev > ptime->end) + if (!tprev || tprev > ptime->end) goto out; if (t > ptime->end) t = ptime->end; } - if (!sched->idle_hist || thread->tid == 0) { + if (!sched->idle_hist || thread__tid(thread) == 0) { if (!cpu_list || test_bit(sample->cpu, cpu_bitmap)) timehist_update_runtime_stats(tr, t, tprev); @@ -2594,8 +2811,6 @@ static int timehist_sched_change_event(struct perf_tool *tool, struct idle_thread_runtime *itr = (void *)tr; struct thread_runtime *last_tr; - BUG_ON(thread->tid != 0); - if (itr->last_thread == NULL) goto out; @@ -2621,10 +2836,10 @@ static int timehist_sched_change_event(struct perf_tool *tool, itr->last_thread = NULL; } - } - if (!sched->summary_only) - timehist_print_sample(sched, evsel, sample, &al, thread, t, state); + if (!sched->summary_only) + timehist_print_sample(sched, evsel, sample, &al, thread, t, state); + } out: if (sched->hist_time.start == 0 && t >= ptime->start) @@ -2639,16 +2854,22 @@ out: /* last state is used to determine where to account wait time */ tr->last_state = state; - /* sched out event for task so reset ready to run time */ - tr->ready_to_run = 0; + /* sched out event for task so reset ready to run time and migrated time */ + if (state == 'R') + tr->ready_to_run = t; + else + tr->ready_to_run = 0; + + tr->migrated = 0; } evsel__save_time(evsel, sample->time, sample->cpu); + addr_location__exit(&al); return rc; } -static int timehist_sched_switch_event(struct perf_tool *tool, +static int timehist_sched_switch_event(const struct perf_tool *tool, union perf_event *event, struct evsel *evsel, struct perf_sample *sample, @@ -2657,7 +2878,7 @@ static int timehist_sched_switch_event(struct perf_tool *tool, return timehist_sched_change_event(tool, event, evsel, sample, machine); } -static int process_lost(struct perf_tool *tool __maybe_unused, +static int process_lost(const struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample, struct machine *machine __maybe_unused) @@ -2679,7 +2900,7 @@ static void print_thread_runtime(struct thread *t, float stddev; printf("%*s %5d %9" PRIu64 " ", - comm_width, timehist_get_commstr(t), t->ppid, + comm_width, timehist_get_commstr(t), thread__ppid(t), (u64) r->run_stats.n); print_sched_time(r->total_run_time, 8); @@ -2699,7 +2920,7 @@ static void print_thread_waittime(struct thread *t, struct thread_runtime *r) { printf("%*s %5d %9" PRIu64 " ", - comm_width, timehist_get_commstr(t), t->ppid, + comm_width, timehist_get_commstr(t), thread__ppid(t), (u64) r->run_stats.n); print_sched_time(r->total_run_time, 8); @@ -2720,7 +2941,7 @@ struct total_run_stats { u64 total_run_time; }; -static int __show_thread_runtime(struct thread *t, void *priv) +static int show_thread_runtime(struct thread *t, void *priv) { struct total_run_stats *stats = priv; struct thread_runtime *r; @@ -2743,22 +2964,6 @@ static int __show_thread_runtime(struct thread *t, void *priv) return 0; } -static int show_thread_runtime(struct thread *t, void *priv) -{ - if (t->dead) - return 0; - - return __show_thread_runtime(t, priv); -} - -static int show_deadthread_runtime(struct thread *t, void *priv) -{ - if (!t->dead) - return 0; - - return __show_thread_runtime(t, priv); -} - static size_t callchain__fprintf_folded(FILE *fp, struct callchain_node *node) { const char *sep = " <- "; @@ -2850,11 +3055,6 @@ static void timehist_print_summary(struct perf_sched *sched, if (!task_count) printf("<no still running tasks>\n"); - printf("\nTerminated tasks:\n"); - machine__for_each_thread(m, show_deadthread_runtime, &totals); - if (task_count == totals.task_count) - printf("<no terminated tasks>\n"); - /* CPU idle stats not tracked when samples were skipped */ if (sched->skipped_samples && !sched->idle_hist) return; @@ -2918,16 +3118,16 @@ static void timehist_print_summary(struct perf_sched *sched, printf(" Total scheduling time (msec): "); print_sched_time(hist_time, 2); - printf(" (x %d)\n", sched->max_cpu); + printf(" (x %d)\n", sched->max_cpu.cpu); } -typedef int (*sched_handler)(struct perf_tool *tool, +typedef int (*sched_handler)(const struct perf_tool *tool, union perf_event *event, struct evsel *evsel, struct perf_sample *sample, struct machine *machine); -static int perf_timehist__process_sample(struct perf_tool *tool, +static int perf_timehist__process_sample(const struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct evsel *evsel, @@ -2935,9 +3135,11 @@ static int perf_timehist__process_sample(struct perf_tool *tool, { struct perf_sched *sched = container_of(tool, struct perf_sched, tool); int err = 0; - int this_cpu = sample->cpu; + struct perf_cpu this_cpu = { + .cpu = sample->cpu, + }; - if (this_cpu > sched->max_cpu) + if (this_cpu.cpu > sched->max_cpu.cpu) sched->max_cpu = this_cpu; if (evsel->handler != NULL) { @@ -2962,8 +3164,11 @@ static int timehist_check_attr(struct perf_sched *sched, return -1; } - if (sched->show_callchain && !evsel__has_callchain(evsel)) { - pr_info("Samples do not have callchains.\n"); + /* only need to save callchain related to sched_switch event */ + if (sched->show_callchain && + evsel__name_is(evsel, "sched:sched_switch") && + !evsel__has_callchain(evsel)) { + pr_info("Samples of sched_switch event do not have callchains.\n"); sched->show_callchain = 0; symbol_conf.use_callchain = 0; } @@ -2972,6 +3177,47 @@ static int timehist_check_attr(struct perf_sched *sched, return 0; } +static int timehist_parse_prio_str(struct perf_sched *sched) +{ + char *p; + unsigned long start_prio, end_prio; + const char *str = sched->prio_str; + + if (!str) + return 0; + + while (isdigit(*str)) { + p = NULL; + start_prio = strtoul(str, &p, 0); + if (start_prio >= MAX_PRIO || (*p != '\0' && *p != ',' && *p != '-')) + return -1; + + if (*p == '-') { + str = ++p; + p = NULL; + end_prio = strtoul(str, &p, 0); + + if (end_prio >= MAX_PRIO || (*p != '\0' && *p != ',')) + return -1; + + if (end_prio < start_prio) + return -1; + } else { + end_prio = start_prio; + } + + for (; start_prio <= end_prio; start_prio++) + __set_bit(start_prio, sched->prio_bitmap); + + if (*p) + ++p; + + str = p; + } + + return 0; +} + static int perf_sched__timehist(struct perf_sched *sched) { struct evsel_str_handler handlers[] = { @@ -3006,12 +3252,11 @@ static int perf_sched__timehist(struct perf_sched *sched) sched->tool.tracing_data = perf_event__process_tracing_data; sched->tool.build_id = perf_event__process_build_id; - sched->tool.ordered_events = true; sched->tool.ordering_requires_timestamps = true; symbol_conf.use_callchain = sched->show_callchain; - session = perf_session__new(&data, false, &sched->tool); + session = perf_session__new(&data, &sched->tool); if (IS_ERR(session)) return PTR_ERR(session); @@ -3027,17 +3272,22 @@ static int perf_sched__timehist(struct perf_sched *sched) if (perf_time__parse_str(&sched->ptime, sched->time_str) != 0) { pr_err("Invalid time string\n"); - return -EINVAL; + err = -EINVAL; + goto out; } if (timehist_check_attr(sched, evlist) != 0) goto out; + if (timehist_parse_prio_str(sched) != 0) { + pr_err("Invalid prio string\n"); + goto out; + } + setup_pager(); /* prefer sched_waking if it is captured */ - if (perf_evlist__find_tracepoint_by_name(session->evlist, - "sched:sched_waking")) + if (evlist__find_tracepoint_by_name(session->evlist, "sched:sched_waking")) handlers[1].handler = timehist_sched_wakeup_ignore; /* setup per-evsel handlers */ @@ -3045,21 +3295,20 @@ static int perf_sched__timehist(struct perf_sched *sched) goto out; /* sched_switch event at a minimum needs to exist */ - if (!perf_evlist__find_tracepoint_by_name(session->evlist, - "sched:sched_switch")) { + if (!evlist__find_tracepoint_by_name(session->evlist, "sched:sched_switch")) { pr_err("No sched_switch events found. Have you run 'perf sched record'?\n"); goto out; } - if (sched->show_migrations && - perf_session__set_tracepoints_handlers(session, migrate_handlers)) + if ((sched->show_migrations || sched->pre_migrations) && + perf_session__set_tracepoints_handlers(session, migrate_handlers)) goto out; /* pre-allocate struct for per-CPU idle stats */ - sched->max_cpu = session->header.env.nr_cpus_online; - if (sched->max_cpu == 0) - sched->max_cpu = 4; - if (init_idle_threads(sched->max_cpu)) + sched->max_cpu.cpu = session->header.env.nr_cpus_online; + if (sched->max_cpu.cpu == 0) + sched->max_cpu.cpu = 4; + if (init_idle_threads(sched->max_cpu.cpu)) goto out; /* summary_only implies summary option, but don't overwrite summary if set */ @@ -3168,20 +3417,50 @@ static void perf_sched__merge_lat(struct perf_sched *sched) } } +static int setup_cpus_switch_event(struct perf_sched *sched) +{ + unsigned int i; + + sched->cpu_last_switched = calloc(MAX_CPUS, sizeof(*(sched->cpu_last_switched))); + if (!sched->cpu_last_switched) + return -1; + + sched->curr_pid = malloc(MAX_CPUS * sizeof(*(sched->curr_pid))); + if (!sched->curr_pid) { + zfree(&sched->cpu_last_switched); + return -1; + } + + for (i = 0; i < MAX_CPUS; i++) + sched->curr_pid[i] = -1; + + return 0; +} + +static void free_cpus_switch_event(struct perf_sched *sched) +{ + zfree(&sched->curr_pid); + zfree(&sched->cpu_last_switched); +} + static int perf_sched__lat(struct perf_sched *sched) { + int rc = -1; struct rb_node *next; setup_pager(); + if (setup_cpus_switch_event(sched)) + return rc; + if (perf_sched__read_events(sched)) - return -1; + goto out_free_cpus_switch_event; perf_sched__merge_lat(sched); perf_sched__sort_lat(sched); printf("\n -------------------------------------------------------------------------------------------------------------------------------------------\n"); - printf(" Task | Runtime ms | Switches | Avg delay ms | Max delay ms | Max delay start | Max delay end |\n"); + printf(" Task | Runtime ms | Count | Avg delay ms | Max delay ms | Max delay start | Max delay end |\n"); printf(" -------------------------------------------------------------------------------------------------------------------------------------------\n"); next = rb_first_cached(&sched->sorted_atom_root); @@ -3204,31 +3483,32 @@ static int perf_sched__lat(struct perf_sched *sched) print_bad_events(sched); printf("\n"); - return 0; + rc = 0; + +out_free_cpus_switch_event: + free_cpus_switch_event(sched); + return rc; } static int setup_map_cpus(struct perf_sched *sched) { - struct perf_cpu_map *map; - - sched->max_cpu = sysconf(_SC_NPROCESSORS_CONF); + sched->max_cpu.cpu = sysconf(_SC_NPROCESSORS_CONF); if (sched->map.comp) { - sched->map.comp_cpus = zalloc(sched->max_cpu * sizeof(int)); + sched->map.comp_cpus = zalloc(sched->max_cpu.cpu * sizeof(int)); if (!sched->map.comp_cpus) return -1; } - if (!sched->map.cpus_str) - return 0; - - map = perf_cpu_map__new(sched->map.cpus_str); - if (!map) { - pr_err("failed to get cpus map from %s\n", sched->map.cpus_str); - return -1; + if (sched->map.cpus_str) { + sched->map.cpus = perf_cpu_map__new(sched->map.cpus_str); + if (!sched->map.cpus) { + pr_err("failed to get cpus map from %s\n", sched->map.cpus_str); + zfree(&sched->map.comp_cpus); + return -1; + } } - sched->map.cpus = map; return 0; } @@ -3268,33 +3548,73 @@ static int setup_color_cpus(struct perf_sched *sched) static int perf_sched__map(struct perf_sched *sched) { + int rc = -1; + + sched->curr_thread = calloc(MAX_CPUS, sizeof(*(sched->curr_thread))); + if (!sched->curr_thread) + return rc; + + sched->curr_out_thread = calloc(MAX_CPUS, sizeof(*(sched->curr_out_thread))); + if (!sched->curr_out_thread) + return rc; + + if (setup_cpus_switch_event(sched)) + goto out_free_curr_thread; + if (setup_map_cpus(sched)) - return -1; + goto out_free_cpus_switch_event; if (setup_color_pids(sched)) - return -1; + goto out_put_map_cpus; if (setup_color_cpus(sched)) - return -1; + goto out_put_color_pids; setup_pager(); if (perf_sched__read_events(sched)) - return -1; + goto out_put_color_cpus; + + rc = 0; print_bad_events(sched); - return 0; + +out_put_color_cpus: + perf_cpu_map__put(sched->map.color_cpus); + +out_put_color_pids: + perf_thread_map__put(sched->map.color_pids); + +out_put_map_cpus: + zfree(&sched->map.comp_cpus); + perf_cpu_map__put(sched->map.cpus); + +out_free_cpus_switch_event: + free_cpus_switch_event(sched); + +out_free_curr_thread: + zfree(&sched->curr_thread); + return rc; } static int perf_sched__replay(struct perf_sched *sched) { + int ret; unsigned long i; + mutex_init(&sched->start_work_mutex); + mutex_init(&sched->work_done_wait_mutex); + + ret = setup_cpus_switch_event(sched); + if (ret) + goto out_mutex_destroy; + calibrate_run_measurement_overhead(sched); calibrate_sleep_measurement_overhead(sched); test_calibrations(sched); - if (perf_sched__read_events(sched)) - return -1; + ret = perf_sched__read_events(sched); + if (ret) + goto out_free_cpus_switch_event; printf("nr_run_events: %ld\n", sched->nr_run_events); printf("nr_sleep_events: %ld\n", sched->nr_sleep_events); @@ -3311,12 +3631,25 @@ static int perf_sched__replay(struct perf_sched *sched) print_task_traces(sched); add_cross_task_wakeups(sched); + sched->thread_funcs_exit = false; create_tasks(sched); printf("------------------------------------------------------------\n"); + if (sched->replay_repeat == 0) + sched->replay_repeat = UINT_MAX; + for (i = 0; i < sched->replay_repeat; i++) run_one_test(sched); - return 0; + sched->thread_funcs_exit = true; + destroy_tasks(sched); + +out_free_cpus_switch_event: + free_cpus_switch_event(sched); + +out_mutex_destroy: + mutex_destroy(&sched->start_work_mutex); + mutex_destroy(&sched->work_done_wait_mutex); + return ret; } static void setup_sorting(struct perf_sched *sched, const struct option *options, @@ -3337,10 +3670,21 @@ static void setup_sorting(struct perf_sched *sched, const struct option *options sort_dimension__add("pid", &sched->cmp_pid); } +static bool schedstat_events_exposed(void) +{ + /* + * Select "sched:sched_stat_wait" event to check + * whether schedstat tracepoints are exposed. + */ + return IS_ERR(trace_event__tp_format("sched", "sched_stat_wait")) ? + false : true; +} + static int __cmd_record(int argc, const char **argv) { unsigned int rec_argc, i, j; - const char **rec_argv; + char **rec_argv; + const char **rec_argv_copy; const char * const record_args[] = { "record", "-a", @@ -3348,60 +3692,78 @@ static int __cmd_record(int argc, const char **argv) "-m", "1024", "-c", "1", "-e", "sched:sched_switch", - "-e", "sched:sched_stat_wait", - "-e", "sched:sched_stat_sleep", - "-e", "sched:sched_stat_iowait", "-e", "sched:sched_stat_runtime", "-e", "sched:sched_process_fork", "-e", "sched:sched_wakeup_new", "-e", "sched:sched_migrate_task", }; + + /* + * The tracepoints trace_sched_stat_{wait, sleep, iowait} + * are not exposed to user if CONFIG_SCHEDSTATS is not set, + * to prevent "perf sched record" execution failure, determine + * whether to record schedstat events according to actual situation. + */ + const char * const schedstat_args[] = { + "-e", "sched:sched_stat_wait", + "-e", "sched:sched_stat_sleep", + "-e", "sched:sched_stat_iowait", + }; + unsigned int schedstat_argc = schedstat_events_exposed() ? + ARRAY_SIZE(schedstat_args) : 0; + struct tep_event *waking_event; + int ret; /* * +2 for either "-e", "sched:sched_wakeup" or * "-e", "sched:sched_waking" */ - rec_argc = ARRAY_SIZE(record_args) + 2 + argc - 1; + rec_argc = ARRAY_SIZE(record_args) + 2 + schedstat_argc + argc - 1; rec_argv = calloc(rec_argc + 1, sizeof(char *)); - if (rec_argv == NULL) return -ENOMEM; + rec_argv_copy = calloc(rec_argc + 1, sizeof(char *)); + if (rec_argv_copy == NULL) { + free(rec_argv); + return -ENOMEM; + } for (i = 0; i < ARRAY_SIZE(record_args); i++) rec_argv[i] = strdup(record_args[i]); - rec_argv[i++] = "-e"; + rec_argv[i++] = strdup("-e"); waking_event = trace_event__tp_format("sched", "sched_waking"); if (!IS_ERR(waking_event)) rec_argv[i++] = strdup("sched:sched_waking"); else rec_argv[i++] = strdup("sched:sched_wakeup"); + for (j = 0; j < schedstat_argc; j++) + rec_argv[i++] = strdup(schedstat_args[j]); + for (j = 1; j < (unsigned int)argc; j++, i++) - rec_argv[i] = argv[j]; + rec_argv[i] = strdup(argv[j]); BUG_ON(i != rec_argc); - return cmd_record(i, rec_argv); + memcpy(rec_argv_copy, rec_argv, sizeof(char *) * rec_argc); + ret = cmd_record(rec_argc, rec_argv_copy); + + for (i = 0; i < rec_argc; i++) + free(rec_argv[i]); + free(rec_argv); + free(rec_argv_copy); + + return ret; } int cmd_sched(int argc, const char **argv) { static const char default_sort_order[] = "avg, max, switch, runtime"; struct perf_sched sched = { - .tool = { - .sample = perf_sched__process_tracepoint_sample, - .comm = perf_sched__process_comm, - .namespaces = perf_event__process_namespaces, - .lost = perf_event__process_lost, - .fork = perf_sched__process_fork_event, - .ordered_events = true, - }, .cmp_pid = LIST_HEAD_INIT(sched.cmp_pid), .sort_list = LIST_HEAD_INIT(sched.sort_list), - .start_work_mutex = PTHREAD_MUTEX_INITIALIZER, - .work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER, .sort_order = default_sort_order, .replay_repeat = 10, .profile_cpu = -1, @@ -3432,7 +3794,7 @@ int cmd_sched(int argc, const char **argv) }; const struct option replay_options[] = { OPT_UINTEGER('r', "repeat", &sched.replay_repeat, - "repeat the workload replay N times (-1: infinite)"), + "repeat the workload replay N times (0: infinite)"), OPT_PARENT(sched_options) }; const struct option map_options[] = { @@ -3444,6 +3806,10 @@ int cmd_sched(int argc, const char **argv) "highlight given CPUs in map"), OPT_STRING(0, "cpus", &sched.map.cpus_str, "cpus", "display given CPUs in map"), + OPT_STRING(0, "task-name", &sched.map.task_name, "task", + "map output only for the given task name(s)."), + OPT_BOOLEAN(0, "fuzzy-name", &sched.map.fuzzy, + "given command name can be partially matched (fuzzy matching)"), OPT_PARENT(sched_options) }; const struct option timehist_options[] = { @@ -3474,6 +3840,10 @@ int cmd_sched(int argc, const char **argv) OPT_STRING('t', "tid", &symbol_conf.tid_list_str, "tid[,tid...]", "analyze events only for given thread id(s)"), OPT_STRING('C', "cpu", &cpu_list, "cpu", "list of cpus to profile"), + OPT_BOOLEAN(0, "show-prio", &sched.show_prio, "Show task priority"), + OPT_STRING(0, "prio", &sched.prio_str, "prio", + "analyze events only for given task priority(ies)"), + OPT_BOOLEAN('P', "pre-migrations", &sched.pre_migrations, "Show pre-migration wait time"), OPT_PARENT(sched_options) }; @@ -3514,10 +3884,14 @@ int cmd_sched(int argc, const char **argv) .switch_event = replay_switch_event, .fork_event = replay_fork_event, }; - unsigned int i; + int ret; - for (i = 0; i < ARRAY_SIZE(sched.curr_pid); i++) - sched.curr_pid[i] = -1; + perf_tool__init(&sched.tool, /*ordered_events=*/true); + sched.tool.sample = perf_sched__process_tracepoint_sample; + sched.tool.comm = perf_sched__process_comm; + sched.tool.namespaces = perf_event__process_namespaces; + sched.tool.lost = perf_event__process_lost; + sched.tool.fork = perf_sched__process_fork_event; argc = parse_options_subcommand(argc, argv, sched_options, sched_subcommands, sched_usage, PARSE_OPT_STOP_AT_NON_OPTION); @@ -3527,12 +3901,11 @@ int cmd_sched(int argc, const char **argv) /* * Aliased to 'perf script' for now: */ - if (!strcmp(argv[0], "script")) + if (!strcmp(argv[0], "script")) { return cmd_script(argc, argv); - - if (!strncmp(argv[0], "rec", 3)) { + } else if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) { return __cmd_record(argc, argv); - } else if (!strncmp(argv[0], "lat", 3)) { + } else if (strlen(argv[0]) > 2 && strstarts("latency", argv[0])) { sched.tp_handler = &lat_ops; if (argc > 1) { argc = parse_options(argc, argv, latency_options, latency_usage, 0); @@ -3546,11 +3919,19 @@ int cmd_sched(int argc, const char **argv) argc = parse_options(argc, argv, map_options, map_usage, 0); if (argc) usage_with_options(map_usage, map_options); + + if (sched.map.task_name) { + sched.map.task_names = strlist__new(sched.map.task_name, NULL); + if (sched.map.task_names == NULL) { + fprintf(stderr, "Failed to parse task names\n"); + return -1; + } + } } sched.tp_handler = &map_ops; setup_sorting(&sched, latency_options, latency_usage); return perf_sched__map(&sched); - } else if (!strncmp(argv[0], "rep", 3)) { + } else if (strlen(argv[0]) > 2 && strstarts("replay", argv[0])) { sched.tp_handler = &replay_ops; if (argc) { argc = parse_options(argc, argv, replay_options, replay_usage, 0); @@ -3575,11 +3956,17 @@ int cmd_sched(int argc, const char **argv) parse_options_usage(NULL, timehist_options, "n", true); return -EINVAL; } + ret = symbol__validate_sym_arguments(); + if (ret) + return ret; return perf_sched__timehist(&sched); } else { usage_with_options(sched_usage, sched_options); } + /* free usage string allocated by parse_options_subcommand */ + free((void *)sched_usage[0]); + return 0; } |