diff options
Diffstat (limited to 'tools/perf/builtin-sched.c')
-rw-r--r-- | tools/perf/builtin-sched.c | 220 |
1 files changed, 114 insertions, 106 deletions
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index dd6065afbbaf..b248c433529a 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -92,24 +92,6 @@ struct sched_atom { struct task_desc *wakee; }; -#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP" - -/* task state bitmask, copied from include/linux/sched.h */ -#define TASK_RUNNING 0 -#define TASK_INTERRUPTIBLE 1 -#define TASK_UNINTERRUPTIBLE 2 -#define __TASK_STOPPED 4 -#define __TASK_TRACED 8 -/* in tsk->exit_state */ -#define EXIT_DEAD 16 -#define EXIT_ZOMBIE 32 -#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD) -/* in tsk->state again */ -#define TASK_DEAD 64 -#define TASK_WAKEKILL 128 -#define TASK_WAKING 256 -#define TASK_PARKED 512 - enum thread_state { THREAD_SLEEPING = 0, THREAD_WAIT_CPU, @@ -266,7 +248,7 @@ struct thread_runtime { u64 total_preempt_time; u64 total_delay_time; - int last_state; + char last_state; char shortname[3]; bool comm_changed; @@ -436,7 +418,7 @@ static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *t } static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task, - u64 timestamp, u64 task_state __maybe_unused) + u64 timestamp, const char task_state __maybe_unused) { struct sched_atom *event = get_new_event(task, timestamp); @@ -860,7 +842,7 @@ static int replay_switch_event(struct perf_sched *sched, *next_comm = evsel__strval(evsel, sample, "next_comm"); const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"), next_pid = evsel__intval(evsel, sample, "next_pid"); - const u64 prev_state = evsel__intval(evsel, sample, "prev_state"); + const char prev_state = evsel__taskstate(evsel, sample, "prev_state"); struct task_desc *prev, __maybe_unused *next; u64 timestamp0, timestamp = sample->time; int cpu = sample->cpu; @@ -1050,13 +1032,6 @@ static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread) return 0; } -static char sched_out_state(u64 prev_state) -{ - const char *str = TASK_STATE_TO_CHAR_STR; - - return str[prev_state]; -} - static int add_sched_out_event(struct work_atoms *atoms, char run_state, @@ -1132,7 +1107,7 @@ static int latency_switch_event(struct perf_sched *sched, { const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"), next_pid = evsel__intval(evsel, sample, "next_pid"); - const u64 prev_state = evsel__intval(evsel, sample, "prev_state"); + const char prev_state = evsel__taskstate(evsel, sample, "prev_state"); struct work_atoms *out_events, *in_events; struct thread *sched_out, *sched_in; u64 timestamp0, timestamp = sample->time; @@ -1168,7 +1143,7 @@ static int latency_switch_event(struct perf_sched *sched, goto out_put; } } - if (add_sched_out_event(out_events, sched_out_state(prev_state), timestamp)) + if (add_sched_out_event(out_events, prev_state, timestamp)) return -1; in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid); @@ -2033,24 +2008,12 @@ static void timehist_header(struct perf_sched *sched) printf("\n"); } -static char task_state_char(struct thread *thread, int state) -{ - static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; - unsigned bit = state ? ffs(state) : 0; - - /* 'I' for idle */ - if (thread__tid(thread) == 0) - return 'I'; - - return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?'; -} - static void timehist_print_sample(struct perf_sched *sched, struct evsel *evsel, struct perf_sample *sample, struct addr_location *al, struct thread *thread, - u64 t, int state) + u64 t, const char state) { struct thread_runtime *tr = thread__priv(thread); const char *next_comm = evsel__strval(evsel, sample, "next_comm"); @@ -2091,7 +2054,7 @@ static void timehist_print_sample(struct perf_sched *sched, print_sched_time(tr->dt_run, 6); if (sched->show_state) - printf(" %5c ", task_state_char(thread, state)); + printf(" %5c ", thread__tid(thread) == 0 ? 'I' : state); if (sched->show_next) { snprintf(nstr, sizeof(nstr), "next: %s[%d]", next_comm, next_pid); @@ -2163,9 +2126,9 @@ static void timehist_update_runtime_stats(struct thread_runtime *r, else if (r->last_time) { u64 dt_wait = tprev - r->last_time; - if (r->last_state == TASK_RUNNING) + if (r->last_state == 'R') r->dt_preempt = dt_wait; - else if (r->last_state == TASK_UNINTERRUPTIBLE) + else if (r->last_state == 'D') r->dt_iowait = dt_wait; else r->dt_sleep = dt_wait; @@ -2590,7 +2553,7 @@ static int timehist_sched_change_event(struct perf_tool *tool, struct thread_runtime *tr = NULL; u64 tprev, t = sample->time; int rc = 0; - int state = evsel__intval(evsel, sample, "prev_state"); + const char state = evsel__taskstate(evsel, sample, "prev_state"); addr_location__init(&al); if (machine__resolve(machine, &al, sample) < 0) { @@ -3204,14 +3167,44 @@ static void perf_sched__merge_lat(struct perf_sched *sched) } } +static int setup_cpus_switch_event(struct perf_sched *sched) +{ + unsigned int i; + + sched->cpu_last_switched = calloc(MAX_CPUS, sizeof(*(sched->cpu_last_switched))); + if (!sched->cpu_last_switched) + return -1; + + sched->curr_pid = malloc(MAX_CPUS * sizeof(*(sched->curr_pid))); + if (!sched->curr_pid) { + zfree(&sched->cpu_last_switched); + return -1; + } + + for (i = 0; i < MAX_CPUS; i++) + sched->curr_pid[i] = -1; + + return 0; +} + +static void free_cpus_switch_event(struct perf_sched *sched) +{ + zfree(&sched->curr_pid); + zfree(&sched->cpu_last_switched); +} + static int perf_sched__lat(struct perf_sched *sched) { + int rc = -1; struct rb_node *next; setup_pager(); + if (setup_cpus_switch_event(sched)) + return rc; + if (perf_sched__read_events(sched)) - return -1; + goto out_free_cpus_switch_event; perf_sched__merge_lat(sched); perf_sched__sort_lat(sched); @@ -3240,13 +3233,15 @@ static int perf_sched__lat(struct perf_sched *sched) print_bad_events(sched); printf("\n"); - return 0; + rc = 0; + +out_free_cpus_switch_event: + free_cpus_switch_event(sched); + return rc; } static int setup_map_cpus(struct perf_sched *sched) { - struct perf_cpu_map *map; - sched->max_cpu.cpu = sysconf(_SC_NPROCESSORS_CONF); if (sched->map.comp) { @@ -3255,16 +3250,15 @@ static int setup_map_cpus(struct perf_sched *sched) return -1; } - if (!sched->map.cpus_str) - return 0; - - map = perf_cpu_map__new(sched->map.cpus_str); - if (!map) { - pr_err("failed to get cpus map from %s\n", sched->map.cpus_str); - return -1; + if (sched->map.cpus_str) { + sched->map.cpus = perf_cpu_map__new(sched->map.cpus_str); + if (!sched->map.cpus) { + pr_err("failed to get cpus map from %s\n", sched->map.cpus_str); + zfree(&sched->map.comp_cpus); + return -1; + } } - sched->map.cpus = map; return 0; } @@ -3304,33 +3298,69 @@ static int setup_color_cpus(struct perf_sched *sched) static int perf_sched__map(struct perf_sched *sched) { + int rc = -1; + + sched->curr_thread = calloc(MAX_CPUS, sizeof(*(sched->curr_thread))); + if (!sched->curr_thread) + return rc; + + if (setup_cpus_switch_event(sched)) + goto out_free_curr_thread; + if (setup_map_cpus(sched)) - return -1; + goto out_free_cpus_switch_event; if (setup_color_pids(sched)) - return -1; + goto out_put_map_cpus; if (setup_color_cpus(sched)) - return -1; + goto out_put_color_pids; setup_pager(); if (perf_sched__read_events(sched)) - return -1; + goto out_put_color_cpus; + + rc = 0; print_bad_events(sched); - return 0; + +out_put_color_cpus: + perf_cpu_map__put(sched->map.color_cpus); + +out_put_color_pids: + perf_thread_map__put(sched->map.color_pids); + +out_put_map_cpus: + zfree(&sched->map.comp_cpus); + perf_cpu_map__put(sched->map.cpus); + +out_free_cpus_switch_event: + free_cpus_switch_event(sched); + +out_free_curr_thread: + zfree(&sched->curr_thread); + return rc; } static int perf_sched__replay(struct perf_sched *sched) { + int ret; unsigned long i; + mutex_init(&sched->start_work_mutex); + mutex_init(&sched->work_done_wait_mutex); + + ret = setup_cpus_switch_event(sched); + if (ret) + goto out_mutex_destroy; + calibrate_run_measurement_overhead(sched); calibrate_sleep_measurement_overhead(sched); test_calibrations(sched); - if (perf_sched__read_events(sched)) - return -1; + ret = perf_sched__read_events(sched); + if (ret) + goto out_free_cpus_switch_event; printf("nr_run_events: %ld\n", sched->nr_run_events); printf("nr_sleep_events: %ld\n", sched->nr_sleep_events); @@ -3355,7 +3385,14 @@ static int perf_sched__replay(struct perf_sched *sched) sched->thread_funcs_exit = true; destroy_tasks(sched); - return 0; + +out_free_cpus_switch_event: + free_cpus_switch_event(sched); + +out_mutex_destroy: + mutex_destroy(&sched->start_work_mutex); + mutex_destroy(&sched->work_done_wait_mutex); + return ret; } static void setup_sorting(struct perf_sched *sched, const struct option *options, @@ -3590,28 +3627,7 @@ int cmd_sched(int argc, const char **argv) .switch_event = replay_switch_event, .fork_event = replay_fork_event, }; - unsigned int i; - int ret = 0; - - mutex_init(&sched.start_work_mutex); - mutex_init(&sched.work_done_wait_mutex); - sched.curr_thread = calloc(MAX_CPUS, sizeof(*sched.curr_thread)); - if (!sched.curr_thread) { - ret = -ENOMEM; - goto out; - } - sched.cpu_last_switched = calloc(MAX_CPUS, sizeof(*sched.cpu_last_switched)); - if (!sched.cpu_last_switched) { - ret = -ENOMEM; - goto out; - } - sched.curr_pid = malloc(MAX_CPUS * sizeof(*sched.curr_pid)); - if (!sched.curr_pid) { - ret = -ENOMEM; - goto out; - } - for (i = 0; i < MAX_CPUS; i++) - sched.curr_pid[i] = -1; + int ret; argc = parse_options_subcommand(argc, argv, sched_options, sched_subcommands, sched_usage, PARSE_OPT_STOP_AT_NON_OPTION); @@ -3622,9 +3638,9 @@ int cmd_sched(int argc, const char **argv) * Aliased to 'perf script' for now: */ if (!strcmp(argv[0], "script")) { - ret = cmd_script(argc, argv); + return cmd_script(argc, argv); } else if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) { - ret = __cmd_record(argc, argv); + return __cmd_record(argc, argv); } else if (strlen(argv[0]) > 2 && strstarts("latency", argv[0])) { sched.tp_handler = &lat_ops; if (argc > 1) { @@ -3633,7 +3649,7 @@ int cmd_sched(int argc, const char **argv) usage_with_options(latency_usage, latency_options); } setup_sorting(&sched, latency_options, latency_usage); - ret = perf_sched__lat(&sched); + return perf_sched__lat(&sched); } else if (!strcmp(argv[0], "map")) { if (argc) { argc = parse_options(argc, argv, map_options, map_usage, 0); @@ -3642,7 +3658,7 @@ int cmd_sched(int argc, const char **argv) } sched.tp_handler = &map_ops; setup_sorting(&sched, latency_options, latency_usage); - ret = perf_sched__map(&sched); + return perf_sched__map(&sched); } else if (strlen(argv[0]) > 2 && strstarts("replay", argv[0])) { sched.tp_handler = &replay_ops; if (argc) { @@ -3650,7 +3666,7 @@ int cmd_sched(int argc, const char **argv) if (argc) usage_with_options(replay_usage, replay_options); } - ret = perf_sched__replay(&sched); + return perf_sched__replay(&sched); } else if (!strcmp(argv[0], "timehist")) { if (argc) { argc = parse_options(argc, argv, timehist_options, @@ -3666,24 +3682,16 @@ int cmd_sched(int argc, const char **argv) parse_options_usage(NULL, timehist_options, "w", true); if (sched.show_next) parse_options_usage(NULL, timehist_options, "n", true); - ret = -EINVAL; - goto out; + return -EINVAL; } ret = symbol__validate_sym_arguments(); if (ret) - goto out; + return ret; - ret = perf_sched__timehist(&sched); + return perf_sched__timehist(&sched); } else { usage_with_options(sched_usage, sched_options); } -out: - free(sched.curr_pid); - free(sched.cpu_last_switched); - free(sched.curr_thread); - mutex_destroy(&sched.start_work_mutex); - mutex_destroy(&sched.work_done_wait_mutex); - - return ret; + return 0; } |