Root/
1 | #include "builtin.h" |
2 | #include "perf.h" |
3 | |
4 | #include "util/util.h" |
5 | #include "util/evlist.h" |
6 | #include "util/cache.h" |
7 | #include "util/evsel.h" |
8 | #include "util/symbol.h" |
9 | #include "util/thread.h" |
10 | #include "util/header.h" |
11 | #include "util/session.h" |
12 | #include "util/tool.h" |
13 | |
14 | #include "util/parse-options.h" |
15 | #include "util/trace-event.h" |
16 | |
17 | #include "util/debug.h" |
18 | |
19 | #include <sys/prctl.h> |
20 | #include <sys/resource.h> |
21 | |
22 | #include <semaphore.h> |
23 | #include <pthread.h> |
24 | #include <math.h> |
25 | |
26 | #define PR_SET_NAME 15 /* Set process name */ |
27 | #define MAX_CPUS 4096 |
28 | #define COMM_LEN 20 |
29 | #define SYM_LEN 129 |
30 | #define MAX_PID 65536 |
31 | |
32 | struct sched_atom; |
33 | |
34 | struct task_desc { |
35 | unsigned long nr; |
36 | unsigned long pid; |
37 | char comm[COMM_LEN]; |
38 | |
39 | unsigned long nr_events; |
40 | unsigned long curr_event; |
41 | struct sched_atom **atoms; |
42 | |
43 | pthread_t thread; |
44 | sem_t sleep_sem; |
45 | |
46 | sem_t ready_for_work; |
47 | sem_t work_done_sem; |
48 | |
49 | u64 cpu_usage; |
50 | }; |
51 | |
52 | enum sched_event_type { |
53 | SCHED_EVENT_RUN, |
54 | SCHED_EVENT_SLEEP, |
55 | SCHED_EVENT_WAKEUP, |
56 | SCHED_EVENT_MIGRATION, |
57 | }; |
58 | |
59 | struct sched_atom { |
60 | enum sched_event_type type; |
61 | int specific_wait; |
62 | u64 timestamp; |
63 | u64 duration; |
64 | unsigned long nr; |
65 | sem_t *wait_sem; |
66 | struct task_desc *wakee; |
67 | }; |
68 | |
69 | #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP" |
70 | |
71 | enum thread_state { |
72 | THREAD_SLEEPING = 0, |
73 | THREAD_WAIT_CPU, |
74 | THREAD_SCHED_IN, |
75 | THREAD_IGNORE |
76 | }; |
77 | |
78 | struct work_atom { |
79 | struct list_head list; |
80 | enum thread_state state; |
81 | u64 sched_out_time; |
82 | u64 wake_up_time; |
83 | u64 sched_in_time; |
84 | u64 runtime; |
85 | }; |
86 | |
87 | struct work_atoms { |
88 | struct list_head work_list; |
89 | struct thread *thread; |
90 | struct rb_node node; |
91 | u64 max_lat; |
92 | u64 max_lat_at; |
93 | u64 total_lat; |
94 | u64 nb_atoms; |
95 | u64 total_runtime; |
96 | }; |
97 | |
98 | typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *); |
99 | |
100 | struct perf_sched; |
101 | |
102 | struct trace_sched_handler { |
103 | int (*switch_event)(struct perf_sched *sched, struct perf_evsel *evsel, |
104 | struct perf_sample *sample, struct machine *machine); |
105 | |
106 | int (*runtime_event)(struct perf_sched *sched, struct perf_evsel *evsel, |
107 | struct perf_sample *sample, struct machine *machine); |
108 | |
109 | int (*wakeup_event)(struct perf_sched *sched, struct perf_evsel *evsel, |
110 | struct perf_sample *sample, struct machine *machine); |
111 | |
112 | /* PERF_RECORD_FORK event, not sched_process_fork tracepoint */ |
113 | int (*fork_event)(struct perf_sched *sched, union perf_event *event, |
114 | struct machine *machine); |
115 | |
116 | int (*migrate_task_event)(struct perf_sched *sched, |
117 | struct perf_evsel *evsel, |
118 | struct perf_sample *sample, |
119 | struct machine *machine); |
120 | }; |
121 | |
122 | struct perf_sched { |
123 | struct perf_tool tool; |
124 | const char *sort_order; |
125 | unsigned long nr_tasks; |
126 | struct task_desc *pid_to_task[MAX_PID]; |
127 | struct task_desc **tasks; |
128 | const struct trace_sched_handler *tp_handler; |
129 | pthread_mutex_t start_work_mutex; |
130 | pthread_mutex_t work_done_wait_mutex; |
131 | int profile_cpu; |
132 | /* |
133 | * Track the current task - that way we can know whether there's any |
134 | * weird events, such as a task being switched away that is not current. |
135 | */ |
136 | int max_cpu; |
137 | u32 curr_pid[MAX_CPUS]; |
138 | struct thread *curr_thread[MAX_CPUS]; |
139 | char next_shortname1; |
140 | char next_shortname2; |
141 | unsigned int replay_repeat; |
142 | unsigned long nr_run_events; |
143 | unsigned long nr_sleep_events; |
144 | unsigned long nr_wakeup_events; |
145 | unsigned long nr_sleep_corrections; |
146 | unsigned long nr_run_events_optimized; |
147 | unsigned long targetless_wakeups; |
148 | unsigned long multitarget_wakeups; |
149 | unsigned long nr_runs; |
150 | unsigned long nr_timestamps; |
151 | unsigned long nr_unordered_timestamps; |
152 | unsigned long nr_context_switch_bugs; |
153 | unsigned long nr_events; |
154 | unsigned long nr_lost_chunks; |
155 | unsigned long nr_lost_events; |
156 | u64 run_measurement_overhead; |
157 | u64 sleep_measurement_overhead; |
158 | u64 start_time; |
159 | u64 cpu_usage; |
160 | u64 runavg_cpu_usage; |
161 | u64 parent_cpu_usage; |
162 | u64 runavg_parent_cpu_usage; |
163 | u64 sum_runtime; |
164 | u64 sum_fluct; |
165 | u64 run_avg; |
166 | u64 all_runtime; |
167 | u64 all_count; |
168 | u64 cpu_last_switched[MAX_CPUS]; |
169 | struct rb_root atom_root, sorted_atom_root; |
170 | struct list_head sort_list, cmp_pid; |
171 | }; |
172 | |
173 | static u64 get_nsecs(void) |
174 | { |
175 | struct timespec ts; |
176 | |
177 | clock_gettime(CLOCK_MONOTONIC, &ts); |
178 | |
179 | return ts.tv_sec * 1000000000ULL + ts.tv_nsec; |
180 | } |
181 | |
182 | static void burn_nsecs(struct perf_sched *sched, u64 nsecs) |
183 | { |
184 | u64 T0 = get_nsecs(), T1; |
185 | |
186 | do { |
187 | T1 = get_nsecs(); |
188 | } while (T1 + sched->run_measurement_overhead < T0 + nsecs); |
189 | } |
190 | |
191 | static void sleep_nsecs(u64 nsecs) |
192 | { |
193 | struct timespec ts; |
194 | |
195 | ts.tv_nsec = nsecs % 999999999; |
196 | ts.tv_sec = nsecs / 999999999; |
197 | |
198 | nanosleep(&ts, NULL); |
199 | } |
200 | |
201 | static void calibrate_run_measurement_overhead(struct perf_sched *sched) |
202 | { |
203 | u64 T0, T1, delta, min_delta = 1000000000ULL; |
204 | int i; |
205 | |
206 | for (i = 0; i < 10; i++) { |
207 | T0 = get_nsecs(); |
208 | burn_nsecs(sched, 0); |
209 | T1 = get_nsecs(); |
210 | delta = T1-T0; |
211 | min_delta = min(min_delta, delta); |
212 | } |
213 | sched->run_measurement_overhead = min_delta; |
214 | |
215 | printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta); |
216 | } |
217 | |
218 | static void calibrate_sleep_measurement_overhead(struct perf_sched *sched) |
219 | { |
220 | u64 T0, T1, delta, min_delta = 1000000000ULL; |
221 | int i; |
222 | |
223 | for (i = 0; i < 10; i++) { |
224 | T0 = get_nsecs(); |
225 | sleep_nsecs(10000); |
226 | T1 = get_nsecs(); |
227 | delta = T1-T0; |
228 | min_delta = min(min_delta, delta); |
229 | } |
230 | min_delta -= 10000; |
231 | sched->sleep_measurement_overhead = min_delta; |
232 | |
233 | printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta); |
234 | } |
235 | |
236 | static struct sched_atom * |
237 | get_new_event(struct task_desc *task, u64 timestamp) |
238 | { |
239 | struct sched_atom *event = zalloc(sizeof(*event)); |
240 | unsigned long idx = task->nr_events; |
241 | size_t size; |
242 | |
243 | event->timestamp = timestamp; |
244 | event->nr = idx; |
245 | |
246 | task->nr_events++; |
247 | size = sizeof(struct sched_atom *) * task->nr_events; |
248 | task->atoms = realloc(task->atoms, size); |
249 | BUG_ON(!task->atoms); |
250 | |
251 | task->atoms[idx] = event; |
252 | |
253 | return event; |
254 | } |
255 | |
256 | static struct sched_atom *last_event(struct task_desc *task) |
257 | { |
258 | if (!task->nr_events) |
259 | return NULL; |
260 | |
261 | return task->atoms[task->nr_events - 1]; |
262 | } |
263 | |
264 | static void add_sched_event_run(struct perf_sched *sched, struct task_desc *task, |
265 | u64 timestamp, u64 duration) |
266 | { |
267 | struct sched_atom *event, *curr_event = last_event(task); |
268 | |
269 | /* |
270 | * optimize an existing RUN event by merging this one |
271 | * to it: |
272 | */ |
273 | if (curr_event && curr_event->type == SCHED_EVENT_RUN) { |
274 | sched->nr_run_events_optimized++; |
275 | curr_event->duration += duration; |
276 | return; |
277 | } |
278 | |
279 | event = get_new_event(task, timestamp); |
280 | |
281 | event->type = SCHED_EVENT_RUN; |
282 | event->duration = duration; |
283 | |
284 | sched->nr_run_events++; |
285 | } |
286 | |
287 | static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task, |
288 | u64 timestamp, struct task_desc *wakee) |
289 | { |
290 | struct sched_atom *event, *wakee_event; |
291 | |
292 | event = get_new_event(task, timestamp); |
293 | event->type = SCHED_EVENT_WAKEUP; |
294 | event->wakee = wakee; |
295 | |
296 | wakee_event = last_event(wakee); |
297 | if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) { |
298 | sched->targetless_wakeups++; |
299 | return; |
300 | } |
301 | if (wakee_event->wait_sem) { |
302 | sched->multitarget_wakeups++; |
303 | return; |
304 | } |
305 | |
306 | wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem)); |
307 | sem_init(wakee_event->wait_sem, 0, 0); |
308 | wakee_event->specific_wait = 1; |
309 | event->wait_sem = wakee_event->wait_sem; |
310 | |
311 | sched->nr_wakeup_events++; |
312 | } |
313 | |
314 | static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task, |
315 | u64 timestamp, u64 task_state __maybe_unused) |
316 | { |
317 | struct sched_atom *event = get_new_event(task, timestamp); |
318 | |
319 | event->type = SCHED_EVENT_SLEEP; |
320 | |
321 | sched->nr_sleep_events++; |
322 | } |
323 | |
324 | static struct task_desc *register_pid(struct perf_sched *sched, |
325 | unsigned long pid, const char *comm) |
326 | { |
327 | struct task_desc *task; |
328 | |
329 | BUG_ON(pid >= MAX_PID); |
330 | |
331 | task = sched->pid_to_task[pid]; |
332 | |
333 | if (task) |
334 | return task; |
335 | |
336 | task = zalloc(sizeof(*task)); |
337 | task->pid = pid; |
338 | task->nr = sched->nr_tasks; |
339 | strcpy(task->comm, comm); |
340 | /* |
341 | * every task starts in sleeping state - this gets ignored |
342 | * if there's no wakeup pointing to this sleep state: |
343 | */ |
344 | add_sched_event_sleep(sched, task, 0, 0); |
345 | |
346 | sched->pid_to_task[pid] = task; |
347 | sched->nr_tasks++; |
348 | sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_task *)); |
349 | BUG_ON(!sched->tasks); |
350 | sched->tasks[task->nr] = task; |
351 | |
352 | if (verbose) |
353 | printf("registered task #%ld, PID %ld (%s)\n", sched->nr_tasks, pid, comm); |
354 | |
355 | return task; |
356 | } |
357 | |
358 | |
359 | static void print_task_traces(struct perf_sched *sched) |
360 | { |
361 | struct task_desc *task; |
362 | unsigned long i; |
363 | |
364 | for (i = 0; i < sched->nr_tasks; i++) { |
365 | task = sched->tasks[i]; |
366 | printf("task %6ld (%20s:%10ld), nr_events: %ld\n", |
367 | task->nr, task->comm, task->pid, task->nr_events); |
368 | } |
369 | } |
370 | |
371 | static void add_cross_task_wakeups(struct perf_sched *sched) |
372 | { |
373 | struct task_desc *task1, *task2; |
374 | unsigned long i, j; |
375 | |
376 | for (i = 0; i < sched->nr_tasks; i++) { |
377 | task1 = sched->tasks[i]; |
378 | j = i + 1; |
379 | if (j == sched->nr_tasks) |
380 | j = 0; |
381 | task2 = sched->tasks[j]; |
382 | add_sched_event_wakeup(sched, task1, 0, task2); |
383 | } |
384 | } |
385 | |
386 | static void perf_sched__process_event(struct perf_sched *sched, |
387 | struct sched_atom *atom) |
388 | { |
389 | int ret = 0; |
390 | |
391 | switch (atom->type) { |
392 | case SCHED_EVENT_RUN: |
393 | burn_nsecs(sched, atom->duration); |
394 | break; |
395 | case SCHED_EVENT_SLEEP: |
396 | if (atom->wait_sem) |
397 | ret = sem_wait(atom->wait_sem); |
398 | BUG_ON(ret); |
399 | break; |
400 | case SCHED_EVENT_WAKEUP: |
401 | if (atom->wait_sem) |
402 | ret = sem_post(atom->wait_sem); |
403 | BUG_ON(ret); |
404 | break; |
405 | case SCHED_EVENT_MIGRATION: |
406 | break; |
407 | default: |
408 | BUG_ON(1); |
409 | } |
410 | } |
411 | |
412 | static u64 get_cpu_usage_nsec_parent(void) |
413 | { |
414 | struct rusage ru; |
415 | u64 sum; |
416 | int err; |
417 | |
418 | err = getrusage(RUSAGE_SELF, &ru); |
419 | BUG_ON(err); |
420 | |
421 | sum = ru.ru_utime.tv_sec*1e9 + ru.ru_utime.tv_usec*1e3; |
422 | sum += ru.ru_stime.tv_sec*1e9 + ru.ru_stime.tv_usec*1e3; |
423 | |
424 | return sum; |
425 | } |
426 | |
427 | static int self_open_counters(void) |
428 | { |
429 | struct perf_event_attr attr; |
430 | int fd; |
431 | |
432 | memset(&attr, 0, sizeof(attr)); |
433 | |
434 | attr.type = PERF_TYPE_SOFTWARE; |
435 | attr.config = PERF_COUNT_SW_TASK_CLOCK; |
436 | |
437 | fd = sys_perf_event_open(&attr, 0, -1, -1, 0); |
438 | |
439 | if (fd < 0) |
440 | pr_err("Error: sys_perf_event_open() syscall returned " |
441 | "with %d (%s)\n", fd, strerror(errno)); |
442 | return fd; |
443 | } |
444 | |
445 | static u64 get_cpu_usage_nsec_self(int fd) |
446 | { |
447 | u64 runtime; |
448 | int ret; |
449 | |
450 | ret = read(fd, &runtime, sizeof(runtime)); |
451 | BUG_ON(ret != sizeof(runtime)); |
452 | |
453 | return runtime; |
454 | } |
455 | |
456 | struct sched_thread_parms { |
457 | struct task_desc *task; |
458 | struct perf_sched *sched; |
459 | }; |
460 | |
461 | static void *thread_func(void *ctx) |
462 | { |
463 | struct sched_thread_parms *parms = ctx; |
464 | struct task_desc *this_task = parms->task; |
465 | struct perf_sched *sched = parms->sched; |
466 | u64 cpu_usage_0, cpu_usage_1; |
467 | unsigned long i, ret; |
468 | char comm2[22]; |
469 | int fd; |
470 | |
471 | zfree(&parms); |
472 | |
473 | sprintf(comm2, ":%s", this_task->comm); |
474 | prctl(PR_SET_NAME, comm2); |
475 | fd = self_open_counters(); |
476 | if (fd < 0) |
477 | return NULL; |
478 | again: |
479 | ret = sem_post(&this_task->ready_for_work); |
480 | BUG_ON(ret); |
481 | ret = pthread_mutex_lock(&sched->start_work_mutex); |
482 | BUG_ON(ret); |
483 | ret = pthread_mutex_unlock(&sched->start_work_mutex); |
484 | BUG_ON(ret); |
485 | |
486 | cpu_usage_0 = get_cpu_usage_nsec_self(fd); |
487 | |
488 | for (i = 0; i < this_task->nr_events; i++) { |
489 | this_task->curr_event = i; |
490 | perf_sched__process_event(sched, this_task->atoms[i]); |
491 | } |
492 | |
493 | cpu_usage_1 = get_cpu_usage_nsec_self(fd); |
494 | this_task->cpu_usage = cpu_usage_1 - cpu_usage_0; |
495 | ret = sem_post(&this_task->work_done_sem); |
496 | BUG_ON(ret); |
497 | |
498 | ret = pthread_mutex_lock(&sched->work_done_wait_mutex); |
499 | BUG_ON(ret); |
500 | ret = pthread_mutex_unlock(&sched->work_done_wait_mutex); |
501 | BUG_ON(ret); |
502 | |
503 | goto again; |
504 | } |
505 | |
506 | static void create_tasks(struct perf_sched *sched) |
507 | { |
508 | struct task_desc *task; |
509 | pthread_attr_t attr; |
510 | unsigned long i; |
511 | int err; |
512 | |
513 | err = pthread_attr_init(&attr); |
514 | BUG_ON(err); |
515 | err = pthread_attr_setstacksize(&attr, |
516 | (size_t) max(16 * 1024, PTHREAD_STACK_MIN)); |
517 | BUG_ON(err); |
518 | err = pthread_mutex_lock(&sched->start_work_mutex); |
519 | BUG_ON(err); |
520 | err = pthread_mutex_lock(&sched->work_done_wait_mutex); |
521 | BUG_ON(err); |
522 | for (i = 0; i < sched->nr_tasks; i++) { |
523 | struct sched_thread_parms *parms = malloc(sizeof(*parms)); |
524 | BUG_ON(parms == NULL); |
525 | parms->task = task = sched->tasks[i]; |
526 | parms->sched = sched; |
527 | sem_init(&task->sleep_sem, 0, 0); |
528 | sem_init(&task->ready_for_work, 0, 0); |
529 | sem_init(&task->work_done_sem, 0, 0); |
530 | task->curr_event = 0; |
531 | err = pthread_create(&task->thread, &attr, thread_func, parms); |
532 | BUG_ON(err); |
533 | } |
534 | } |
535 | |
536 | static void wait_for_tasks(struct perf_sched *sched) |
537 | { |
538 | u64 cpu_usage_0, cpu_usage_1; |
539 | struct task_desc *task; |
540 | unsigned long i, ret; |
541 | |
542 | sched->start_time = get_nsecs(); |
543 | sched->cpu_usage = 0; |
544 | pthread_mutex_unlock(&sched->work_done_wait_mutex); |
545 | |
546 | for (i = 0; i < sched->nr_tasks; i++) { |
547 | task = sched->tasks[i]; |
548 | ret = sem_wait(&task->ready_for_work); |
549 | BUG_ON(ret); |
550 | sem_init(&task->ready_for_work, 0, 0); |
551 | } |
552 | ret = pthread_mutex_lock(&sched->work_done_wait_mutex); |
553 | BUG_ON(ret); |
554 | |
555 | cpu_usage_0 = get_cpu_usage_nsec_parent(); |
556 | |
557 | pthread_mutex_unlock(&sched->start_work_mutex); |
558 | |
559 | for (i = 0; i < sched->nr_tasks; i++) { |
560 | task = sched->tasks[i]; |
561 | ret = sem_wait(&task->work_done_sem); |
562 | BUG_ON(ret); |
563 | sem_init(&task->work_done_sem, 0, 0); |
564 | sched->cpu_usage += task->cpu_usage; |
565 | task->cpu_usage = 0; |
566 | } |
567 | |
568 | cpu_usage_1 = get_cpu_usage_nsec_parent(); |
569 | if (!sched->runavg_cpu_usage) |
570 | sched->runavg_cpu_usage = sched->cpu_usage; |
571 | sched->runavg_cpu_usage = (sched->runavg_cpu_usage * 9 + sched->cpu_usage) / 10; |
572 | |
573 | sched->parent_cpu_usage = cpu_usage_1 - cpu_usage_0; |
574 | if (!sched->runavg_parent_cpu_usage) |
575 | sched->runavg_parent_cpu_usage = sched->parent_cpu_usage; |
576 | sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * 9 + |
577 | sched->parent_cpu_usage)/10; |
578 | |
579 | ret = pthread_mutex_lock(&sched->start_work_mutex); |
580 | BUG_ON(ret); |
581 | |
582 | for (i = 0; i < sched->nr_tasks; i++) { |
583 | task = sched->tasks[i]; |
584 | sem_init(&task->sleep_sem, 0, 0); |
585 | task->curr_event = 0; |
586 | } |
587 | } |
588 | |
589 | static void run_one_test(struct perf_sched *sched) |
590 | { |
591 | u64 T0, T1, delta, avg_delta, fluct; |
592 | |
593 | T0 = get_nsecs(); |
594 | wait_for_tasks(sched); |
595 | T1 = get_nsecs(); |
596 | |
597 | delta = T1 - T0; |
598 | sched->sum_runtime += delta; |
599 | sched->nr_runs++; |
600 | |
601 | avg_delta = sched->sum_runtime / sched->nr_runs; |
602 | if (delta < avg_delta) |
603 | fluct = avg_delta - delta; |
604 | else |
605 | fluct = delta - avg_delta; |
606 | sched->sum_fluct += fluct; |
607 | if (!sched->run_avg) |
608 | sched->run_avg = delta; |
609 | sched->run_avg = (sched->run_avg * 9 + delta) / 10; |
610 | |
611 | printf("#%-3ld: %0.3f, ", sched->nr_runs, (double)delta / 1000000.0); |
612 | |
613 | printf("ravg: %0.2f, ", (double)sched->run_avg / 1e6); |
614 | |
615 | printf("cpu: %0.2f / %0.2f", |
616 | (double)sched->cpu_usage / 1e6, (double)sched->runavg_cpu_usage / 1e6); |
617 | |
618 | #if 0 |
619 | /* |
620 | * rusage statistics done by the parent, these are less |
621 | * accurate than the sched->sum_exec_runtime based statistics: |
622 | */ |
623 | printf(" [%0.2f / %0.2f]", |
624 | (double)sched->parent_cpu_usage/1e6, |
625 | (double)sched->runavg_parent_cpu_usage/1e6); |
626 | #endif |
627 | |
628 | printf("\n"); |
629 | |
630 | if (sched->nr_sleep_corrections) |
631 | printf(" (%ld sleep corrections)\n", sched->nr_sleep_corrections); |
632 | sched->nr_sleep_corrections = 0; |
633 | } |
634 | |
635 | static void test_calibrations(struct perf_sched *sched) |
636 | { |
637 | u64 T0, T1; |
638 | |
639 | T0 = get_nsecs(); |
640 | burn_nsecs(sched, 1e6); |
641 | T1 = get_nsecs(); |
642 | |
643 | printf("the run test took %" PRIu64 " nsecs\n", T1 - T0); |
644 | |
645 | T0 = get_nsecs(); |
646 | sleep_nsecs(1e6); |
647 | T1 = get_nsecs(); |
648 | |
649 | printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0); |
650 | } |
651 | |
652 | static int |
653 | replay_wakeup_event(struct perf_sched *sched, |
654 | struct perf_evsel *evsel, struct perf_sample *sample, |
655 | struct machine *machine __maybe_unused) |
656 | { |
657 | const char *comm = perf_evsel__strval(evsel, sample, "comm"); |
658 | const u32 pid = perf_evsel__intval(evsel, sample, "pid"); |
659 | struct task_desc *waker, *wakee; |
660 | |
661 | if (verbose) { |
662 | printf("sched_wakeup event %p\n", evsel); |
663 | |
664 | printf(" ... pid %d woke up %s/%d\n", sample->tid, comm, pid); |
665 | } |
666 | |
667 | waker = register_pid(sched, sample->tid, "<unknown>"); |
668 | wakee = register_pid(sched, pid, comm); |
669 | |
670 | add_sched_event_wakeup(sched, waker, sample->time, wakee); |
671 | return 0; |
672 | } |
673 | |
674 | static int replay_switch_event(struct perf_sched *sched, |
675 | struct perf_evsel *evsel, |
676 | struct perf_sample *sample, |
677 | struct machine *machine __maybe_unused) |
678 | { |
679 | const char *prev_comm = perf_evsel__strval(evsel, sample, "prev_comm"), |
680 | *next_comm = perf_evsel__strval(evsel, sample, "next_comm"); |
681 | const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"), |
682 | next_pid = perf_evsel__intval(evsel, sample, "next_pid"); |
683 | const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state"); |
684 | struct task_desc *prev, __maybe_unused *next; |
685 | u64 timestamp0, timestamp = sample->time; |
686 | int cpu = sample->cpu; |
687 | s64 delta; |
688 | |
689 | if (verbose) |
690 | printf("sched_switch event %p\n", evsel); |
691 | |
692 | if (cpu >= MAX_CPUS || cpu < 0) |
693 | return 0; |
694 | |
695 | timestamp0 = sched->cpu_last_switched[cpu]; |
696 | if (timestamp0) |
697 | delta = timestamp - timestamp0; |
698 | else |
699 | delta = 0; |
700 | |
701 | if (delta < 0) { |
702 | pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta); |
703 | return -1; |
704 | } |
705 | |
706 | pr_debug(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n", |
707 | prev_comm, prev_pid, next_comm, next_pid, delta); |
708 | |
709 | prev = register_pid(sched, prev_pid, prev_comm); |
710 | next = register_pid(sched, next_pid, next_comm); |
711 | |
712 | sched->cpu_last_switched[cpu] = timestamp; |
713 | |
714 | add_sched_event_run(sched, prev, timestamp, delta); |
715 | add_sched_event_sleep(sched, prev, timestamp, prev_state); |
716 | |
717 | return 0; |
718 | } |
719 | |
720 | static int replay_fork_event(struct perf_sched *sched, |
721 | union perf_event *event, |
722 | struct machine *machine) |
723 | { |
724 | struct thread *child, *parent; |
725 | |
726 | child = machine__findnew_thread(machine, event->fork.pid, |
727 | event->fork.tid); |
728 | parent = machine__findnew_thread(machine, event->fork.ppid, |
729 | event->fork.ptid); |
730 | |
731 | if (child == NULL || parent == NULL) { |
732 | pr_debug("thread does not exist on fork event: child %p, parent %p\n", |
733 | child, parent); |
734 | return 0; |
735 | } |
736 | |
737 | if (verbose) { |
738 | printf("fork event\n"); |
739 | printf("... parent: %s/%d\n", thread__comm_str(parent), parent->tid); |
740 | printf("... child: %s/%d\n", thread__comm_str(child), child->tid); |
741 | } |
742 | |
743 | register_pid(sched, parent->tid, thread__comm_str(parent)); |
744 | register_pid(sched, child->tid, thread__comm_str(child)); |
745 | return 0; |
746 | } |
747 | |
748 | struct sort_dimension { |
749 | const char *name; |
750 | sort_fn_t cmp; |
751 | struct list_head list; |
752 | }; |
753 | |
754 | static int |
755 | thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r) |
756 | { |
757 | struct sort_dimension *sort; |
758 | int ret = 0; |
759 | |
760 | BUG_ON(list_empty(list)); |
761 | |
762 | list_for_each_entry(sort, list, list) { |
763 | ret = sort->cmp(l, r); |
764 | if (ret) |
765 | return ret; |
766 | } |
767 | |
768 | return ret; |
769 | } |
770 | |
771 | static struct work_atoms * |
772 | thread_atoms_search(struct rb_root *root, struct thread *thread, |
773 | struct list_head *sort_list) |
774 | { |
775 | struct rb_node *node = root->rb_node; |
776 | struct work_atoms key = { .thread = thread }; |
777 | |
778 | while (node) { |
779 | struct work_atoms *atoms; |
780 | int cmp; |
781 | |
782 | atoms = container_of(node, struct work_atoms, node); |
783 | |
784 | cmp = thread_lat_cmp(sort_list, &key, atoms); |
785 | if (cmp > 0) |
786 | node = node->rb_left; |
787 | else if (cmp < 0) |
788 | node = node->rb_right; |
789 | else { |
790 | BUG_ON(thread != atoms->thread); |
791 | return atoms; |
792 | } |
793 | } |
794 | return NULL; |
795 | } |
796 | |
797 | static void |
798 | __thread_latency_insert(struct rb_root *root, struct work_atoms *data, |
799 | struct list_head *sort_list) |
800 | { |
801 | struct rb_node **new = &(root->rb_node), *parent = NULL; |
802 | |
803 | while (*new) { |
804 | struct work_atoms *this; |
805 | int cmp; |
806 | |
807 | this = container_of(*new, struct work_atoms, node); |
808 | parent = *new; |
809 | |
810 | cmp = thread_lat_cmp(sort_list, data, this); |
811 | |
812 | if (cmp > 0) |
813 | new = &((*new)->rb_left); |
814 | else |
815 | new = &((*new)->rb_right); |
816 | } |
817 | |
818 | rb_link_node(&data->node, parent, new); |
819 | rb_insert_color(&data->node, root); |
820 | } |
821 | |
822 | static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread) |
823 | { |
824 | struct work_atoms *atoms = zalloc(sizeof(*atoms)); |
825 | if (!atoms) { |
826 | pr_err("No memory at %s\n", __func__); |
827 | return -1; |
828 | } |
829 | |
830 | atoms->thread = thread; |
831 | INIT_LIST_HEAD(&atoms->work_list); |
832 | __thread_latency_insert(&sched->atom_root, atoms, &sched->cmp_pid); |
833 | return 0; |
834 | } |
835 | |
836 | static char sched_out_state(u64 prev_state) |
837 | { |
838 | const char *str = TASK_STATE_TO_CHAR_STR; |
839 | |
840 | return str[prev_state]; |
841 | } |
842 | |
843 | static int |
844 | add_sched_out_event(struct work_atoms *atoms, |
845 | char run_state, |
846 | u64 timestamp) |
847 | { |
848 | struct work_atom *atom = zalloc(sizeof(*atom)); |
849 | if (!atom) { |
850 | pr_err("Non memory at %s", __func__); |
851 | return -1; |
852 | } |
853 | |
854 | atom->sched_out_time = timestamp; |
855 | |
856 | if (run_state == 'R') { |
857 | atom->state = THREAD_WAIT_CPU; |
858 | atom->wake_up_time = atom->sched_out_time; |
859 | } |
860 | |
861 | list_add_tail(&atom->list, &atoms->work_list); |
862 | return 0; |
863 | } |
864 | |
865 | static void |
866 | add_runtime_event(struct work_atoms *atoms, u64 delta, |
867 | u64 timestamp __maybe_unused) |
868 | { |
869 | struct work_atom *atom; |
870 | |
871 | BUG_ON(list_empty(&atoms->work_list)); |
872 | |
873 | atom = list_entry(atoms->work_list.prev, struct work_atom, list); |
874 | |
875 | atom->runtime += delta; |
876 | atoms->total_runtime += delta; |
877 | } |
878 | |
879 | static void |
880 | add_sched_in_event(struct work_atoms *atoms, u64 timestamp) |
881 | { |
882 | struct work_atom *atom; |
883 | u64 delta; |
884 | |
885 | if (list_empty(&atoms->work_list)) |
886 | return; |
887 | |
888 | atom = list_entry(atoms->work_list.prev, struct work_atom, list); |
889 | |
890 | if (atom->state != THREAD_WAIT_CPU) |
891 | return; |
892 | |
893 | if (timestamp < atom->wake_up_time) { |
894 | atom->state = THREAD_IGNORE; |
895 | return; |
896 | } |
897 | |
898 | atom->state = THREAD_SCHED_IN; |
899 | atom->sched_in_time = timestamp; |
900 | |
901 | delta = atom->sched_in_time - atom->wake_up_time; |
902 | atoms->total_lat += delta; |
903 | if (delta > atoms->max_lat) { |
904 | atoms->max_lat = delta; |
905 | atoms->max_lat_at = timestamp; |
906 | } |
907 | atoms->nb_atoms++; |
908 | } |
909 | |
910 | static int latency_switch_event(struct perf_sched *sched, |
911 | struct perf_evsel *evsel, |
912 | struct perf_sample *sample, |
913 | struct machine *machine) |
914 | { |
915 | const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"), |
916 | next_pid = perf_evsel__intval(evsel, sample, "next_pid"); |
917 | const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state"); |
918 | struct work_atoms *out_events, *in_events; |
919 | struct thread *sched_out, *sched_in; |
920 | u64 timestamp0, timestamp = sample->time; |
921 | int cpu = sample->cpu; |
922 | s64 delta; |
923 | |
924 | BUG_ON(cpu >= MAX_CPUS || cpu < 0); |
925 | |
926 | timestamp0 = sched->cpu_last_switched[cpu]; |
927 | sched->cpu_last_switched[cpu] = timestamp; |
928 | if (timestamp0) |
929 | delta = timestamp - timestamp0; |
930 | else |
931 | delta = 0; |
932 | |
933 | if (delta < 0) { |
934 | pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta); |
935 | return -1; |
936 | } |
937 | |
938 | sched_out = machine__findnew_thread(machine, 0, prev_pid); |
939 | sched_in = machine__findnew_thread(machine, 0, next_pid); |
940 | |
941 | out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid); |
942 | if (!out_events) { |
943 | if (thread_atoms_insert(sched, sched_out)) |
944 | return -1; |
945 | out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid); |
946 | if (!out_events) { |
947 | pr_err("out-event: Internal tree error"); |
948 | return -1; |
949 | } |
950 | } |
951 | if (add_sched_out_event(out_events, sched_out_state(prev_state), timestamp)) |
952 | return -1; |
953 | |
954 | in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid); |
955 | if (!in_events) { |
956 | if (thread_atoms_insert(sched, sched_in)) |
957 | return -1; |
958 | in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid); |
959 | if (!in_events) { |
960 | pr_err("in-event: Internal tree error"); |
961 | return -1; |
962 | } |
963 | /* |
964 | * Take came in we have not heard about yet, |
965 | * add in an initial atom in runnable state: |
966 | */ |
967 | if (add_sched_out_event(in_events, 'R', timestamp)) |
968 | return -1; |
969 | } |
970 | add_sched_in_event(in_events, timestamp); |
971 | |
972 | return 0; |
973 | } |
974 | |
975 | static int latency_runtime_event(struct perf_sched *sched, |
976 | struct perf_evsel *evsel, |
977 | struct perf_sample *sample, |
978 | struct machine *machine) |
979 | { |
980 | const u32 pid = perf_evsel__intval(evsel, sample, "pid"); |
981 | const u64 runtime = perf_evsel__intval(evsel, sample, "runtime"); |
982 | struct thread *thread = machine__findnew_thread(machine, 0, pid); |
983 | struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid); |
984 | u64 timestamp = sample->time; |
985 | int cpu = sample->cpu; |
986 | |
987 | BUG_ON(cpu >= MAX_CPUS || cpu < 0); |
988 | if (!atoms) { |
989 | if (thread_atoms_insert(sched, thread)) |
990 | return -1; |
991 | atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid); |
992 | if (!atoms) { |
993 | pr_err("in-event: Internal tree error"); |
994 | return -1; |
995 | } |
996 | if (add_sched_out_event(atoms, 'R', timestamp)) |
997 | return -1; |
998 | } |
999 | |
1000 | add_runtime_event(atoms, runtime, timestamp); |
1001 | return 0; |
1002 | } |
1003 | |
1004 | static int latency_wakeup_event(struct perf_sched *sched, |
1005 | struct perf_evsel *evsel, |
1006 | struct perf_sample *sample, |
1007 | struct machine *machine) |
1008 | { |
1009 | const u32 pid = perf_evsel__intval(evsel, sample, "pid"); |
1010 | struct work_atoms *atoms; |
1011 | struct work_atom *atom; |
1012 | struct thread *wakee; |
1013 | u64 timestamp = sample->time; |
1014 | |
1015 | wakee = machine__findnew_thread(machine, 0, pid); |
1016 | atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid); |
1017 | if (!atoms) { |
1018 | if (thread_atoms_insert(sched, wakee)) |
1019 | return -1; |
1020 | atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid); |
1021 | if (!atoms) { |
1022 | pr_err("wakeup-event: Internal tree error"); |
1023 | return -1; |
1024 | } |
1025 | if (add_sched_out_event(atoms, 'S', timestamp)) |
1026 | return -1; |
1027 | } |
1028 | |
1029 | BUG_ON(list_empty(&atoms->work_list)); |
1030 | |
1031 | atom = list_entry(atoms->work_list.prev, struct work_atom, list); |
1032 | |
1033 | /* |
1034 | * As we do not guarantee the wakeup event happens when |
1035 | * task is out of run queue, also may happen when task is |
1036 | * on run queue and wakeup only change ->state to TASK_RUNNING, |
1037 | * then we should not set the ->wake_up_time when wake up a |
1038 | * task which is on run queue. |
1039 | * |
1040 | * You WILL be missing events if you've recorded only |
1041 | * one CPU, or are only looking at only one, so don't |
1042 | * skip in this case. |
1043 | */ |
1044 | if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING) |
1045 | return 0; |
1046 | |
1047 | sched->nr_timestamps++; |
1048 | if (atom->sched_out_time > timestamp) { |
1049 | sched->nr_unordered_timestamps++; |
1050 | return 0; |
1051 | } |
1052 | |
1053 | atom->state = THREAD_WAIT_CPU; |
1054 | atom->wake_up_time = timestamp; |
1055 | return 0; |
1056 | } |
1057 | |
1058 | static int latency_migrate_task_event(struct perf_sched *sched, |
1059 | struct perf_evsel *evsel, |
1060 | struct perf_sample *sample, |
1061 | struct machine *machine) |
1062 | { |
1063 | const u32 pid = perf_evsel__intval(evsel, sample, "pid"); |
1064 | u64 timestamp = sample->time; |
1065 | struct work_atoms *atoms; |
1066 | struct work_atom *atom; |
1067 | struct thread *migrant; |
1068 | |
1069 | /* |
1070 | * Only need to worry about migration when profiling one CPU. |
1071 | */ |
1072 | if (sched->profile_cpu == -1) |
1073 | return 0; |
1074 | |
1075 | migrant = machine__findnew_thread(machine, 0, pid); |
1076 | atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid); |
1077 | if (!atoms) { |
1078 | if (thread_atoms_insert(sched, migrant)) |
1079 | return -1; |
1080 | register_pid(sched, migrant->tid, thread__comm_str(migrant)); |
1081 | atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid); |
1082 | if (!atoms) { |
1083 | pr_err("migration-event: Internal tree error"); |
1084 | return -1; |
1085 | } |
1086 | if (add_sched_out_event(atoms, 'R', timestamp)) |
1087 | return -1; |
1088 | } |
1089 | |
1090 | BUG_ON(list_empty(&atoms->work_list)); |
1091 | |
1092 | atom = list_entry(atoms->work_list.prev, struct work_atom, list); |
1093 | atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp; |
1094 | |
1095 | sched->nr_timestamps++; |
1096 | |
1097 | if (atom->sched_out_time > timestamp) |
1098 | sched->nr_unordered_timestamps++; |
1099 | |
1100 | return 0; |
1101 | } |
1102 | |
1103 | static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list) |
1104 | { |
1105 | int i; |
1106 | int ret; |
1107 | u64 avg; |
1108 | |
1109 | if (!work_list->nb_atoms) |
1110 | return; |
1111 | /* |
1112 | * Ignore idle threads: |
1113 | */ |
1114 | if (!strcmp(thread__comm_str(work_list->thread), "swapper")) |
1115 | return; |
1116 | |
1117 | sched->all_runtime += work_list->total_runtime; |
1118 | sched->all_count += work_list->nb_atoms; |
1119 | |
1120 | ret = printf(" %s:%d ", thread__comm_str(work_list->thread), work_list->thread->tid); |
1121 | |
1122 | for (i = 0; i < 24 - ret; i++) |
1123 | printf(" "); |
1124 | |
1125 | avg = work_list->total_lat / work_list->nb_atoms; |
1126 | |
1127 | printf("|%11.3f ms |%9" PRIu64 " | avg:%9.3f ms | max:%9.3f ms | max at: %13.6f s\n", |
1128 | (double)work_list->total_runtime / 1e6, |
1129 | work_list->nb_atoms, (double)avg / 1e6, |
1130 | (double)work_list->max_lat / 1e6, |
1131 | (double)work_list->max_lat_at / 1e9); |
1132 | } |
1133 | |
1134 | static int pid_cmp(struct work_atoms *l, struct work_atoms *r) |
1135 | { |
1136 | if (l->thread->tid < r->thread->tid) |
1137 | return -1; |
1138 | if (l->thread->tid > r->thread->tid) |
1139 | return 1; |
1140 | |
1141 | return 0; |
1142 | } |
1143 | |
1144 | static int avg_cmp(struct work_atoms *l, struct work_atoms *r) |
1145 | { |
1146 | u64 avgl, avgr; |
1147 | |
1148 | if (!l->nb_atoms) |
1149 | return -1; |
1150 | |
1151 | if (!r->nb_atoms) |
1152 | return 1; |
1153 | |
1154 | avgl = l->total_lat / l->nb_atoms; |
1155 | avgr = r->total_lat / r->nb_atoms; |
1156 | |
1157 | if (avgl < avgr) |
1158 | return -1; |
1159 | if (avgl > avgr) |
1160 | return 1; |
1161 | |
1162 | return 0; |
1163 | } |
1164 | |
1165 | static int max_cmp(struct work_atoms *l, struct work_atoms *r) |
1166 | { |
1167 | if (l->max_lat < r->max_lat) |
1168 | return -1; |
1169 | if (l->max_lat > r->max_lat) |
1170 | return 1; |
1171 | |
1172 | return 0; |
1173 | } |
1174 | |
1175 | static int switch_cmp(struct work_atoms *l, struct work_atoms *r) |
1176 | { |
1177 | if (l->nb_atoms < r->nb_atoms) |
1178 | return -1; |
1179 | if (l->nb_atoms > r->nb_atoms) |
1180 | return 1; |
1181 | |
1182 | return 0; |
1183 | } |
1184 | |
1185 | static int runtime_cmp(struct work_atoms *l, struct work_atoms *r) |
1186 | { |
1187 | if (l->total_runtime < r->total_runtime) |
1188 | return -1; |
1189 | if (l->total_runtime > r->total_runtime) |
1190 | return 1; |
1191 | |
1192 | return 0; |
1193 | } |
1194 | |
1195 | static int sort_dimension__add(const char *tok, struct list_head *list) |
1196 | { |
1197 | size_t i; |
1198 | static struct sort_dimension avg_sort_dimension = { |
1199 | .name = "avg", |
1200 | .cmp = avg_cmp, |
1201 | }; |
1202 | static struct sort_dimension max_sort_dimension = { |
1203 | .name = "max", |
1204 | .cmp = max_cmp, |
1205 | }; |
1206 | static struct sort_dimension pid_sort_dimension = { |
1207 | .name = "pid", |
1208 | .cmp = pid_cmp, |
1209 | }; |
1210 | static struct sort_dimension runtime_sort_dimension = { |
1211 | .name = "runtime", |
1212 | .cmp = runtime_cmp, |
1213 | }; |
1214 | static struct sort_dimension switch_sort_dimension = { |
1215 | .name = "switch", |
1216 | .cmp = switch_cmp, |
1217 | }; |
1218 | struct sort_dimension *available_sorts[] = { |
1219 | &pid_sort_dimension, |
1220 | &avg_sort_dimension, |
1221 | &max_sort_dimension, |
1222 | &switch_sort_dimension, |
1223 | &runtime_sort_dimension, |
1224 | }; |
1225 | |
1226 | for (i = 0; i < ARRAY_SIZE(available_sorts); i++) { |
1227 | if (!strcmp(available_sorts[i]->name, tok)) { |
1228 | list_add_tail(&available_sorts[i]->list, list); |
1229 | |
1230 | return 0; |
1231 | } |
1232 | } |
1233 | |
1234 | return -1; |
1235 | } |
1236 | |
1237 | static void perf_sched__sort_lat(struct perf_sched *sched) |
1238 | { |
1239 | struct rb_node *node; |
1240 | |
1241 | for (;;) { |
1242 | struct work_atoms *data; |
1243 | node = rb_first(&sched->atom_root); |
1244 | if (!node) |
1245 | break; |
1246 | |
1247 | rb_erase(node, &sched->atom_root); |
1248 | data = rb_entry(node, struct work_atoms, node); |
1249 | __thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list); |
1250 | } |
1251 | } |
1252 | |
1253 | static int process_sched_wakeup_event(struct perf_tool *tool, |
1254 | struct perf_evsel *evsel, |
1255 | struct perf_sample *sample, |
1256 | struct machine *machine) |
1257 | { |
1258 | struct perf_sched *sched = container_of(tool, struct perf_sched, tool); |
1259 | |
1260 | if (sched->tp_handler->wakeup_event) |
1261 | return sched->tp_handler->wakeup_event(sched, evsel, sample, machine); |
1262 | |
1263 | return 0; |
1264 | } |
1265 | |
1266 | static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel, |
1267 | struct perf_sample *sample, struct machine *machine) |
1268 | { |
1269 | const u32 next_pid = perf_evsel__intval(evsel, sample, "next_pid"); |
1270 | struct thread *sched_in; |
1271 | int new_shortname; |
1272 | u64 timestamp0, timestamp = sample->time; |
1273 | s64 delta; |
1274 | int cpu, this_cpu = sample->cpu; |
1275 | |
1276 | BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0); |
1277 | |
1278 | if (this_cpu > sched->max_cpu) |
1279 | sched->max_cpu = this_cpu; |
1280 | |
1281 | timestamp0 = sched->cpu_last_switched[this_cpu]; |
1282 | sched->cpu_last_switched[this_cpu] = timestamp; |
1283 | if (timestamp0) |
1284 | delta = timestamp - timestamp0; |
1285 | else |
1286 | delta = 0; |
1287 | |
1288 | if (delta < 0) { |
1289 | pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta); |
1290 | return -1; |
1291 | } |
1292 | |
1293 | sched_in = machine__findnew_thread(machine, 0, next_pid); |
1294 | |
1295 | sched->curr_thread[this_cpu] = sched_in; |
1296 | |
1297 | printf(" "); |
1298 | |
1299 | new_shortname = 0; |
1300 | if (!sched_in->shortname[0]) { |
1301 | if (!strcmp(thread__comm_str(sched_in), "swapper")) { |
1302 | /* |
1303 | * Don't allocate a letter-number for swapper:0 |
1304 | * as a shortname. Instead, we use '.' for it. |
1305 | */ |
1306 | sched_in->shortname[0] = '.'; |
1307 | sched_in->shortname[1] = ' '; |
1308 | } else { |
1309 | sched_in->shortname[0] = sched->next_shortname1; |
1310 | sched_in->shortname[1] = sched->next_shortname2; |
1311 | |
1312 | if (sched->next_shortname1 < 'Z') { |
1313 | sched->next_shortname1++; |
1314 | } else { |
1315 | sched->next_shortname1 = 'A'; |
1316 | if (sched->next_shortname2 < '9') |
1317 | sched->next_shortname2++; |
1318 | else |
1319 | sched->next_shortname2 = '0'; |
1320 | } |
1321 | } |
1322 | new_shortname = 1; |
1323 | } |
1324 | |
1325 | for (cpu = 0; cpu <= sched->max_cpu; cpu++) { |
1326 | if (cpu != this_cpu) |
1327 | printf(" "); |
1328 | else |
1329 | printf("*"); |
1330 | |
1331 | if (sched->curr_thread[cpu]) |
1332 | printf("%2s ", sched->curr_thread[cpu]->shortname); |
1333 | else |
1334 | printf(" "); |
1335 | } |
1336 | |
1337 | printf(" %12.6f secs ", (double)timestamp/1e9); |
1338 | if (new_shortname) { |
1339 | printf("%s => %s:%d\n", |
1340 | sched_in->shortname, thread__comm_str(sched_in), sched_in->tid); |
1341 | } else { |
1342 | printf("\n"); |
1343 | } |
1344 | |
1345 | return 0; |
1346 | } |
1347 | |
1348 | static int process_sched_switch_event(struct perf_tool *tool, |
1349 | struct perf_evsel *evsel, |
1350 | struct perf_sample *sample, |
1351 | struct machine *machine) |
1352 | { |
1353 | struct perf_sched *sched = container_of(tool, struct perf_sched, tool); |
1354 | int this_cpu = sample->cpu, err = 0; |
1355 | u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"), |
1356 | next_pid = perf_evsel__intval(evsel, sample, "next_pid"); |
1357 | |
1358 | if (sched->curr_pid[this_cpu] != (u32)-1) { |
1359 | /* |
1360 | * Are we trying to switch away a PID that is |
1361 | * not current? |
1362 | */ |
1363 | if (sched->curr_pid[this_cpu] != prev_pid) |
1364 | sched->nr_context_switch_bugs++; |
1365 | } |
1366 | |
1367 | if (sched->tp_handler->switch_event) |
1368 | err = sched->tp_handler->switch_event(sched, evsel, sample, machine); |
1369 | |
1370 | sched->curr_pid[this_cpu] = next_pid; |
1371 | return err; |
1372 | } |
1373 | |
1374 | static int process_sched_runtime_event(struct perf_tool *tool, |
1375 | struct perf_evsel *evsel, |
1376 | struct perf_sample *sample, |
1377 | struct machine *machine) |
1378 | { |
1379 | struct perf_sched *sched = container_of(tool, struct perf_sched, tool); |
1380 | |
1381 | if (sched->tp_handler->runtime_event) |
1382 | return sched->tp_handler->runtime_event(sched, evsel, sample, machine); |
1383 | |
1384 | return 0; |
1385 | } |
1386 | |
1387 | static int perf_sched__process_fork_event(struct perf_tool *tool, |
1388 | union perf_event *event, |
1389 | struct perf_sample *sample, |
1390 | struct machine *machine) |
1391 | { |
1392 | struct perf_sched *sched = container_of(tool, struct perf_sched, tool); |
1393 | |
1394 | /* run the fork event through the perf machineruy */ |
1395 | perf_event__process_fork(tool, event, sample, machine); |
1396 | |
1397 | /* and then run additional processing needed for this command */ |
1398 | if (sched->tp_handler->fork_event) |
1399 | return sched->tp_handler->fork_event(sched, event, machine); |
1400 | |
1401 | return 0; |
1402 | } |
1403 | |
1404 | static int process_sched_migrate_task_event(struct perf_tool *tool, |
1405 | struct perf_evsel *evsel, |
1406 | struct perf_sample *sample, |
1407 | struct machine *machine) |
1408 | { |
1409 | struct perf_sched *sched = container_of(tool, struct perf_sched, tool); |
1410 | |
1411 | if (sched->tp_handler->migrate_task_event) |
1412 | return sched->tp_handler->migrate_task_event(sched, evsel, sample, machine); |
1413 | |
1414 | return 0; |
1415 | } |
1416 | |
1417 | typedef int (*tracepoint_handler)(struct perf_tool *tool, |
1418 | struct perf_evsel *evsel, |
1419 | struct perf_sample *sample, |
1420 | struct machine *machine); |
1421 | |
1422 | static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_unused, |
1423 | union perf_event *event __maybe_unused, |
1424 | struct perf_sample *sample, |
1425 | struct perf_evsel *evsel, |
1426 | struct machine *machine) |
1427 | { |
1428 | int err = 0; |
1429 | |
1430 | evsel->hists.stats.total_period += sample->period; |
1431 | hists__inc_nr_samples(&evsel->hists, true); |
1432 | |
1433 | if (evsel->handler != NULL) { |
1434 | tracepoint_handler f = evsel->handler; |
1435 | err = f(tool, evsel, sample, machine); |
1436 | } |
1437 | |
1438 | return err; |
1439 | } |
1440 | |
1441 | static int perf_sched__read_events(struct perf_sched *sched, |
1442 | struct perf_session **psession) |
1443 | { |
1444 | const struct perf_evsel_str_handler handlers[] = { |
1445 | { "sched:sched_switch", process_sched_switch_event, }, |
1446 | { "sched:sched_stat_runtime", process_sched_runtime_event, }, |
1447 | { "sched:sched_wakeup", process_sched_wakeup_event, }, |
1448 | { "sched:sched_wakeup_new", process_sched_wakeup_event, }, |
1449 | { "sched:sched_migrate_task", process_sched_migrate_task_event, }, |
1450 | }; |
1451 | struct perf_session *session; |
1452 | struct perf_data_file file = { |
1453 | .path = input_name, |
1454 | .mode = PERF_DATA_MODE_READ, |
1455 | }; |
1456 | |
1457 | session = perf_session__new(&file, false, &sched->tool); |
1458 | if (session == NULL) { |
1459 | pr_debug("No Memory for session\n"); |
1460 | return -1; |
1461 | } |
1462 | |
1463 | if (perf_session__set_tracepoints_handlers(session, handlers)) |
1464 | goto out_delete; |
1465 | |
1466 | if (perf_session__has_traces(session, "record -R")) { |
1467 | int err = perf_session__process_events(session, &sched->tool); |
1468 | if (err) { |
1469 | pr_err("Failed to process events, error %d", err); |
1470 | goto out_delete; |
1471 | } |
1472 | |
1473 | sched->nr_events = session->stats.nr_events[0]; |
1474 | sched->nr_lost_events = session->stats.total_lost; |
1475 | sched->nr_lost_chunks = session->stats.nr_events[PERF_RECORD_LOST]; |
1476 | } |
1477 | |
1478 | if (psession) |
1479 | *psession = session; |
1480 | else |
1481 | perf_session__delete(session); |
1482 | |
1483 | return 0; |
1484 | |
1485 | out_delete: |
1486 | perf_session__delete(session); |
1487 | return -1; |
1488 | } |
1489 | |
1490 | static void print_bad_events(struct perf_sched *sched) |
1491 | { |
1492 | if (sched->nr_unordered_timestamps && sched->nr_timestamps) { |
1493 | printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n", |
1494 | (double)sched->nr_unordered_timestamps/(double)sched->nr_timestamps*100.0, |
1495 | sched->nr_unordered_timestamps, sched->nr_timestamps); |
1496 | } |
1497 | if (sched->nr_lost_events && sched->nr_events) { |
1498 | printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n", |
1499 | (double)sched->nr_lost_events/(double)sched->nr_events * 100.0, |
1500 | sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks); |
1501 | } |
1502 | if (sched->nr_context_switch_bugs && sched->nr_timestamps) { |
1503 | printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)", |
1504 | (double)sched->nr_context_switch_bugs/(double)sched->nr_timestamps*100.0, |
1505 | sched->nr_context_switch_bugs, sched->nr_timestamps); |
1506 | if (sched->nr_lost_events) |
1507 | printf(" (due to lost events?)"); |
1508 | printf("\n"); |
1509 | } |
1510 | } |
1511 | |
1512 | static int perf_sched__lat(struct perf_sched *sched) |
1513 | { |
1514 | struct rb_node *next; |
1515 | struct perf_session *session; |
1516 | |
1517 | setup_pager(); |
1518 | |
1519 | /* save session -- references to threads are held in work_list */ |
1520 | if (perf_sched__read_events(sched, &session)) |
1521 | return -1; |
1522 | |
1523 | perf_sched__sort_lat(sched); |
1524 | |
1525 | printf("\n -----------------------------------------------------------------------------------------------------------------\n"); |
1526 | printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms | Maximum delay at |\n"); |
1527 | printf(" -----------------------------------------------------------------------------------------------------------------\n"); |
1528 | |
1529 | next = rb_first(&sched->sorted_atom_root); |
1530 | |
1531 | while (next) { |
1532 | struct work_atoms *work_list; |
1533 | |
1534 | work_list = rb_entry(next, struct work_atoms, node); |
1535 | output_lat_thread(sched, work_list); |
1536 | next = rb_next(next); |
1537 | } |
1538 | |
1539 | printf(" -----------------------------------------------------------------------------------------------------------------\n"); |
1540 | printf(" TOTAL: |%11.3f ms |%9" PRIu64 " |\n", |
1541 | (double)sched->all_runtime / 1e6, sched->all_count); |
1542 | |
1543 | printf(" ---------------------------------------------------\n"); |
1544 | |
1545 | print_bad_events(sched); |
1546 | printf("\n"); |
1547 | |
1548 | perf_session__delete(session); |
1549 | return 0; |
1550 | } |
1551 | |
1552 | static int perf_sched__map(struct perf_sched *sched) |
1553 | { |
1554 | sched->max_cpu = sysconf(_SC_NPROCESSORS_CONF); |
1555 | |
1556 | setup_pager(); |
1557 | if (perf_sched__read_events(sched, NULL)) |
1558 | return -1; |
1559 | print_bad_events(sched); |
1560 | return 0; |
1561 | } |
1562 | |
1563 | static int perf_sched__replay(struct perf_sched *sched) |
1564 | { |
1565 | unsigned long i; |
1566 | |
1567 | calibrate_run_measurement_overhead(sched); |
1568 | calibrate_sleep_measurement_overhead(sched); |
1569 | |
1570 | test_calibrations(sched); |
1571 | |
1572 | if (perf_sched__read_events(sched, NULL)) |
1573 | return -1; |
1574 | |
1575 | printf("nr_run_events: %ld\n", sched->nr_run_events); |
1576 | printf("nr_sleep_events: %ld\n", sched->nr_sleep_events); |
1577 | printf("nr_wakeup_events: %ld\n", sched->nr_wakeup_events); |
1578 | |
1579 | if (sched->targetless_wakeups) |
1580 | printf("target-less wakeups: %ld\n", sched->targetless_wakeups); |
1581 | if (sched->multitarget_wakeups) |
1582 | printf("multi-target wakeups: %ld\n", sched->multitarget_wakeups); |
1583 | if (sched->nr_run_events_optimized) |
1584 | printf("run atoms optimized: %ld\n", |
1585 | sched->nr_run_events_optimized); |
1586 | |
1587 | print_task_traces(sched); |
1588 | add_cross_task_wakeups(sched); |
1589 | |
1590 | create_tasks(sched); |
1591 | printf("------------------------------------------------------------\n"); |
1592 | for (i = 0; i < sched->replay_repeat; i++) |
1593 | run_one_test(sched); |
1594 | |
1595 | return 0; |
1596 | } |
1597 | |
1598 | static void setup_sorting(struct perf_sched *sched, const struct option *options, |
1599 | const char * const usage_msg[]) |
1600 | { |
1601 | char *tmp, *tok, *str = strdup(sched->sort_order); |
1602 | |
1603 | for (tok = strtok_r(str, ", ", &tmp); |
1604 | tok; tok = strtok_r(NULL, ", ", &tmp)) { |
1605 | if (sort_dimension__add(tok, &sched->sort_list) < 0) { |
1606 | error("Unknown --sort key: `%s'", tok); |
1607 | usage_with_options(usage_msg, options); |
1608 | } |
1609 | } |
1610 | |
1611 | free(str); |
1612 | |
1613 | sort_dimension__add("pid", &sched->cmp_pid); |
1614 | } |
1615 | |
1616 | static int __cmd_record(int argc, const char **argv) |
1617 | { |
1618 | unsigned int rec_argc, i, j; |
1619 | const char **rec_argv; |
1620 | const char * const record_args[] = { |
1621 | "record", |
1622 | "-a", |
1623 | "-R", |
1624 | "-m", "1024", |
1625 | "-c", "1", |
1626 | "-e", "sched:sched_switch", |
1627 | "-e", "sched:sched_stat_wait", |
1628 | "-e", "sched:sched_stat_sleep", |
1629 | "-e", "sched:sched_stat_iowait", |
1630 | "-e", "sched:sched_stat_runtime", |
1631 | "-e", "sched:sched_process_fork", |
1632 | "-e", "sched:sched_wakeup", |
1633 | "-e", "sched:sched_wakeup_new", |
1634 | "-e", "sched:sched_migrate_task", |
1635 | }; |
1636 | |
1637 | rec_argc = ARRAY_SIZE(record_args) + argc - 1; |
1638 | rec_argv = calloc(rec_argc + 1, sizeof(char *)); |
1639 | |
1640 | if (rec_argv == NULL) |
1641 | return -ENOMEM; |
1642 | |
1643 | for (i = 0; i < ARRAY_SIZE(record_args); i++) |
1644 | rec_argv[i] = strdup(record_args[i]); |
1645 | |
1646 | for (j = 1; j < (unsigned int)argc; j++, i++) |
1647 | rec_argv[i] = argv[j]; |
1648 | |
1649 | BUG_ON(i != rec_argc); |
1650 | |
1651 | return cmd_record(i, rec_argv, NULL); |
1652 | } |
1653 | |
1654 | int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused) |
1655 | { |
1656 | const char default_sort_order[] = "avg, max, switch, runtime"; |
1657 | struct perf_sched sched = { |
1658 | .tool = { |
1659 | .sample = perf_sched__process_tracepoint_sample, |
1660 | .comm = perf_event__process_comm, |
1661 | .lost = perf_event__process_lost, |
1662 | .fork = perf_sched__process_fork_event, |
1663 | .ordered_samples = true, |
1664 | }, |
1665 | .cmp_pid = LIST_HEAD_INIT(sched.cmp_pid), |
1666 | .sort_list = LIST_HEAD_INIT(sched.sort_list), |
1667 | .start_work_mutex = PTHREAD_MUTEX_INITIALIZER, |
1668 | .work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER, |
1669 | .sort_order = default_sort_order, |
1670 | .replay_repeat = 10, |
1671 | .profile_cpu = -1, |
1672 | .next_shortname1 = 'A', |
1673 | .next_shortname2 = '0', |
1674 | }; |
1675 | const struct option latency_options[] = { |
1676 | OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]", |
1677 | "sort by key(s): runtime, switch, avg, max"), |
1678 | OPT_INCR('v', "verbose", &verbose, |
1679 | "be more verbose (show symbol address, etc)"), |
1680 | OPT_INTEGER('C', "CPU", &sched.profile_cpu, |
1681 | "CPU to profile on"), |
1682 | OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, |
1683 | "dump raw trace in ASCII"), |
1684 | OPT_END() |
1685 | }; |
1686 | const struct option replay_options[] = { |
1687 | OPT_UINTEGER('r', "repeat", &sched.replay_repeat, |
1688 | "repeat the workload replay N times (-1: infinite)"), |
1689 | OPT_INCR('v', "verbose", &verbose, |
1690 | "be more verbose (show symbol address, etc)"), |
1691 | OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, |
1692 | "dump raw trace in ASCII"), |
1693 | OPT_END() |
1694 | }; |
1695 | const struct option sched_options[] = { |
1696 | OPT_STRING('i', "input", &input_name, "file", |
1697 | "input file name"), |
1698 | OPT_INCR('v', "verbose", &verbose, |
1699 | "be more verbose (show symbol address, etc)"), |
1700 | OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, |
1701 | "dump raw trace in ASCII"), |
1702 | OPT_END() |
1703 | }; |
1704 | const char * const latency_usage[] = { |
1705 | "perf sched latency [<options>]", |
1706 | NULL |
1707 | }; |
1708 | const char * const replay_usage[] = { |
1709 | "perf sched replay [<options>]", |
1710 | NULL |
1711 | }; |
1712 | const char *const sched_subcommands[] = { "record", "latency", "map", |
1713 | "replay", "script", NULL }; |
1714 | const char *sched_usage[] = { |
1715 | NULL, |
1716 | NULL |
1717 | }; |
1718 | struct trace_sched_handler lat_ops = { |
1719 | .wakeup_event = latency_wakeup_event, |
1720 | .switch_event = latency_switch_event, |
1721 | .runtime_event = latency_runtime_event, |
1722 | .migrate_task_event = latency_migrate_task_event, |
1723 | }; |
1724 | struct trace_sched_handler map_ops = { |
1725 | .switch_event = map_switch_event, |
1726 | }; |
1727 | struct trace_sched_handler replay_ops = { |
1728 | .wakeup_event = replay_wakeup_event, |
1729 | .switch_event = replay_switch_event, |
1730 | .fork_event = replay_fork_event, |
1731 | }; |
1732 | unsigned int i; |
1733 | |
1734 | for (i = 0; i < ARRAY_SIZE(sched.curr_pid); i++) |
1735 | sched.curr_pid[i] = -1; |
1736 | |
1737 | argc = parse_options_subcommand(argc, argv, sched_options, sched_subcommands, |
1738 | sched_usage, PARSE_OPT_STOP_AT_NON_OPTION); |
1739 | if (!argc) |
1740 | usage_with_options(sched_usage, sched_options); |
1741 | |
1742 | /* |
1743 | * Aliased to 'perf script' for now: |
1744 | */ |
1745 | if (!strcmp(argv[0], "script")) |
1746 | return cmd_script(argc, argv, prefix); |
1747 | |
1748 | symbol__init(); |
1749 | if (!strncmp(argv[0], "rec", 3)) { |
1750 | return __cmd_record(argc, argv); |
1751 | } else if (!strncmp(argv[0], "lat", 3)) { |
1752 | sched.tp_handler = &lat_ops; |
1753 | if (argc > 1) { |
1754 | argc = parse_options(argc, argv, latency_options, latency_usage, 0); |
1755 | if (argc) |
1756 | usage_with_options(latency_usage, latency_options); |
1757 | } |
1758 | setup_sorting(&sched, latency_options, latency_usage); |
1759 | return perf_sched__lat(&sched); |
1760 | } else if (!strcmp(argv[0], "map")) { |
1761 | sched.tp_handler = &map_ops; |
1762 | setup_sorting(&sched, latency_options, latency_usage); |
1763 | return perf_sched__map(&sched); |
1764 | } else if (!strncmp(argv[0], "rep", 3)) { |
1765 | sched.tp_handler = &replay_ops; |
1766 | if (argc) { |
1767 | argc = parse_options(argc, argv, replay_options, replay_usage, 0); |
1768 | if (argc) |
1769 | usage_with_options(replay_usage, replay_options); |
1770 | } |
1771 | return perf_sched__replay(&sched); |
1772 | } else { |
1773 | usage_with_options(sched_usage, sched_options); |
1774 | } |
1775 | |
1776 | return 0; |
1777 | } |
1778 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9